ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
e323db2b-0e80-4d90-9be3-3874744cbaa2 | cpp | google/arolla | delegating_slot_listener | arolla/io/delegating_slot_listener.h | arolla/io/delegating_slot_listener_test.cc | #ifndef AROLLA_IO_DELEGATING_SLOT_LISTENER_H_
#define AROLLA_IO_DELEGATING_SLOT_LISTENER_H_
#include <functional>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace delegating_output_listener_impl {
template <class Output, class DelegateOutput>
class DelegatingSlotListener final : public SlotListener<Output> {
struct PrivateConstructorTag {};
public:
template <class Accessor>
static absl::StatusOr<std::unique_ptr<SlotListener<Output>>> Build(
std::unique_ptr<SlotListener<DelegateOutput>> delegate_listener,
const Accessor& accessor, std::string name_prefix) {
static_assert(std::is_same_v<decltype(accessor(std::declval<Output*>())),
DelegateOutput*>,
"Accessor must have `DelegateOutput*` result type.");
return absl::make_unique<DelegatingSlotListener>(
PrivateConstructorTag{}, std::move(delegate_listener), accessor,
std::move(name_prefix));
}
absl::Nullable<const QType*> GetQTypeOf(
absl::string_view name,
absl::Nullable<const QType*> desired_qtype) const final {
if (!absl::ConsumePrefix(&name, name_prefix_)) {
return nullptr;
}
return delegate_listener_->GetQTypeOf(name, desired_qtype);
}
std::vector<std::string> SuggestAvailableNames() const final {
std::vector<std::string> names =
delegate_listener_->SuggestAvailableNames();
for (auto& name : names) {
name = absl::StrCat(name_prefix_, name);
}
return names;
}
DelegatingSlotListener(
PrivateConstructorTag,
std::unique_ptr<SlotListener<DelegateOutput>> delegate_listener,
std::function<DelegateOutput*(Output*)> accessor, std::string name_prefix)
: delegate_listener_(std::move(delegate_listener)),
accessor_(accessor),
name_prefix_(name_prefix) {}
private:
absl::StatusOr<BoundSlotListener<Output>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& input_slots)
const final {
absl::flat_hash_map<std::string, TypedSlot> delegate_input_slots;
for (const auto& [name, slot] : input_slots) {
absl::string_view name_view = name;
if (absl::ConsumePrefix(&name_view, name_prefix_)) {
delegate_input_slots.emplace(std::string(name_view), slot);
}
}
ASSIGN_OR_RETURN(BoundSlotListener<DelegateOutput> bound_delegate_listener,
delegate_listener_->Bind(delegate_input_slots));
return BoundSlotListener<Output>(
[bound_delegate_listener(std::move(bound_delegate_listener)),
accessor(accessor_)](ConstFramePtr frame,
Output* output) -> absl::Status {
return bound_delegate_listener(frame, accessor(output));
});
}
std::unique_ptr<SlotListener<DelegateOutput>> delegate_listener_;
std::function<DelegateOutput*(Output*)> accessor_;
absl::flat_hash_map<std::string, QTypePtr> types_;
std::string name_prefix_;
};
}
template <class Output, class DelegateOutput, class Accessor>
absl::StatusOr<std::unique_ptr<SlotListener<Output>>>
CreateDelegatingSlotListener(
std::unique_ptr<SlotListener<DelegateOutput>> delegate_listener,
const Accessor& accessor, std::string name_prefix = "") {
return delegating_output_listener_impl::DelegatingSlotListener<
Output, DelegateOutput>::Build(std::move(delegate_listener), accessor,
std::move(name_prefix));
}
}
#endif | #include "arolla/io/delegating_slot_listener.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/io/accessors_slot_listener.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
struct TestStruct {
int a;
double b;
};
struct OuterTestStruct {
TestStruct* a;
int b;
};
TEST(SlotListenerTest, DelegateSlotListener) {
ASSERT_OK_AND_ASSIGN(auto listener_struct,
CreateAccessorsSlotListener<TestStruct>(
"a", [](int a, TestStruct* s) { s->a = a; },
"b", [](double b, TestStruct* s) { s->b = b; }));
auto accessor = [](OuterTestStruct* s) -> TestStruct* { return s->a; };
ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SlotListener<OuterTestStruct>> delegate_slot_listener,
CreateDelegatingSlotListener<OuterTestStruct>(
MakeNotOwningSlotListener(listener_struct.get()), accessor));
ASSERT_OK_AND_ASSIGN(
std::unique_ptr<SlotListener<OuterTestStruct>>
renamed_delegate_slot_listener,
CreateDelegatingSlotListener<OuterTestStruct>(
MakeNotOwningSlotListener(listener_struct.get()), accessor,
"p_"));
for (const auto& [prefix, slot_listener] : std::vector<
std::pair<std::string, const SlotListener<OuterTestStruct>*>>{
{"", delegate_slot_listener.get()},
{"p_", renamed_delegate_slot_listener.get()}}) {
EXPECT_THAT(slot_listener->GetQTypeOf(prefix + "a"),
Eq(GetQType<int32_t>()));
EXPECT_THAT(slot_listener->GetQTypeOf(prefix + "b"),
Eq(GetQType<double>()));
EXPECT_THAT(slot_listener->SuggestAvailableNames(),
ElementsAre(prefix + "a", prefix + "b"));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<int>();
auto b_slot = layout_builder.AddSlot<double>();
ASSERT_OK_AND_ASSIGN(BoundSlotListener<OuterTestStruct> bound_slot_listener,
slot_listener->Bind({
{prefix + "a", TypedSlot::FromSlot(a_slot)},
{prefix + "b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
alloc.frame().Set(a_slot, 5);
alloc.frame().Set(b_slot, 3.5);
TestStruct ts;
OuterTestStruct ots{&ts, -1};
ASSERT_OK(bound_slot_listener(alloc.frame(), &ots));
EXPECT_EQ(ts.a, 5);
EXPECT_EQ(ts.b, 3.5);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/delegating_slot_listener.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/delegating_slot_listener_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
baefae19-7d40-4274-aa5f-dd3fcbdc247e | cpp | google/arolla | prepare_expression | arolla/expr/eval/prepare_expression.cc | arolla/expr/eval/prepare_expression_test.cc | #include "arolla/expr/eval/prepare_expression.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/eval/casting.h"
#include "arolla/expr/eval/compile_where_operator.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/extensions.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_stack_trace.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/string.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
namespace {
using Stage = DynamicEvaluationEngineOptions::PreparationStage;
class InternalRootOperatorImpl final : public BuiltinExprOperatorTag,
public ExprOperatorWithFixedSignature {
public:
InternalRootOperatorImpl()
: ExprOperatorWithFixedSignature(
"_internal_root_operator_",
ExprOperatorSignature{{.name = "arg0"},
{.name = "args",
.kind = ExprOperatorSignature::Parameter::
Kind::kVariadicPositional}},
"",
FingerprintHasher("::arolla::expr::InternalRootOperator")
.Finish()) {}
absl::StatusOr<ExprAttributes> InferAttributes(
absl::Span<const ExprAttributes> inputs) const final {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
return inputs[0];
}
};
bool AllDepsAreLiterals(const ExprNodePtr& node) {
for (const auto& d : node->node_deps()) {
if (!d->qvalue()) {
return false;
}
}
return true;
}
absl::Status MissingInputTypesError(
const absl::flat_hash_map<std::string, QTypePtr>& input_types,
const ExprNodePtr& root) {
std::set<std::string> missing_types;
for (const auto& node : VisitorOrder(root)) {
if (!node->is_op() || IsQTypeAnnotation(node)) {
continue;
}
for (const auto& d : node->node_deps()) {
if (d->is_leaf() && !input_types.contains(d->leaf_key())) {
missing_types.insert(d->leaf_key());
}
}
}
if (root->is_leaf() && !input_types.contains(root->leaf_key())) {
missing_types.insert(root->leaf_key());
}
return absl::InvalidArgumentError(
absl::StrFormat("missing QType information for inputs {%s}",
Truncate(absl::StrJoin(missing_types, ", "), 200)));
}
absl::StatusOr<ExprNodePtr> AnnotateLeafWithQType(
ExprNodePtr leaf,
const absl::flat_hash_map<std::string, QTypePtr>& input_types,
const ExprNodePtr& root) {
auto it = input_types.find(leaf->leaf_key());
if (it == input_types.end()) {
return MissingInputTypesError(input_types, root);
}
return CallOp(QTypeAnnotation::Make(),
{std::move(leaf), Literal(it->second)});
}
NodeTransformationFn PopulateQTypesTransformation(
const absl::flat_hash_map<std::string, QTypePtr>& input_types,
const ExprNodePtr& root) {
return
[&input_types, &root](const DynamicEvaluationEngineOptions&,
ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (!node->is_op()) {
return node;
}
if (const QType* annotated_qtype = ReadQTypeAnnotation(node);
annotated_qtype != nullptr) {
if (node->node_deps()[0]->is_leaf()) {
auto it = input_types.find(node->node_deps()[0]->leaf_key());
if (it != input_types.end() && it->second != annotated_qtype) {
return absl::FailedPreconditionError(absl::StrFormat(
"inconsistent qtype annotation and input qtype: %s",
JoinTypeNames({annotated_qtype, it->second})));
}
return node;
} else if (node->node_deps()[0]->qtype() != nullptr) {
return node->node_deps()[0];
}
}
bool has_leaf_dep = false;
for (const auto& d : node->node_deps()) {
if (d->is_leaf()) {
has_leaf_dep = true;
}
}
if (!has_leaf_dep) {
return node;
}
std::vector<ExprNodePtr> new_deps = node->node_deps();
for (auto& d : new_deps) {
if (d->is_leaf()) {
ASSIGN_OR_RETURN(
d, AnnotateLeafWithQType(std::move(d), input_types, root));
}
}
return WithNewDependencies(node, std::move(new_deps));
};
}
absl::StatusOr<ExprNodePtr> LiteralFoldingTransformation(
const DynamicEvaluationEngineOptions& options, ExprNodePtr node) {
if (!node->is_op() || !AllDepsAreLiterals(node) ||
node->op() == InternalRootOperator()) {
return node;
}
if (node->qvalue()) {
return Literal(*node->qvalue());
}
DynamicEvaluationEngineOptions invoke_options = options;
invoke_options.enabled_preparation_stages &=
~(Stage::kLiteralFolding | Stage::kPopulateQTypes | Stage::kOptimization |
Stage::kWhereOperatorsTransformation);
ASSIGN_OR_RETURN(auto result, Invoke(node, {}, invoke_options),
_ << "while doing literal folding");
return Literal(result);
}
absl::StatusOr<ExprNodePtr> ToLowerTransformation(
const DynamicEvaluationEngineOptions&, ExprNodePtr expr) {
return ToLowerNode(expr);
}
absl::StatusOr<ExprNodePtr> StripAnnotationsTransformation(
const DynamicEvaluationEngineOptions&, const ExprNodePtr& node) {
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(node));
if (is_annotation && node->node_deps().empty()) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid annotation %s: expected at least 1 argument, got 0",
GetDebugSnippet(node)));
}
return (is_annotation &&
!IsQTypeAnnotation(node)
)
? node->node_deps()[0]
: node;
}
absl::Status CheckForTypeMismatchAndSetType(
absl::flat_hash_map<Fingerprint, QTypePtr>* resulting_types,
const ExprNodePtr& expr, QTypePtr qtype) {
auto it = resulting_types->find(expr->fingerprint());
if (it != resulting_types->end() && it->second != nullptr) {
if (it->second != qtype) {
return absl::FailedPreconditionError(absl::StrFormat(
"different QTypes found for the same Expr %s: %s vs %s",
GetDebugSnippet(expr), it->second->name(), qtype->name()));
}
} else {
(*resulting_types)[expr->fingerprint()] = qtype;
}
return absl::OkStatus();
}
absl::StatusOr<ExprNodePtr> ApplyNodeTransformations(
const DynamicEvaluationEngineOptions& options, ExprNodePtr expr,
absl::Span<const std::pair<TransformationType, NodeTransformationFn>>
transformations,
std::shared_ptr<ExprStackTrace> stack_trace) {
return DeepTransform(
expr,
[&options, &transformations,
&stack_trace](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
for (const auto& t : transformations) {
ASSIGN_OR_RETURN(auto result, t.second(options, node));
if (result->fingerprint() == node->fingerprint()) {
continue;
}
if (!node->attr().IsSubsetOf(result->attr())) {
return absl::FailedPreconditionError(absl::StrFormat(
"expression %s attributes changed from %s to %s during "
"compilation",
GetDebugSnippet(node), absl::FormatStreamed(node->attr()),
absl::FormatStreamed(result->attr())));
}
if (stack_trace != nullptr) {
stack_trace->AddTrace(result, node, t.first);
}
return result;
}
return node;
},
[&stack_trace](ExprNodePtr node, ExprNodePtr prev_node,
DeepTransformStage stage) {
if (stack_trace != nullptr) {
if (stage == DeepTransformStage::kWithNewDeps) {
stack_trace->AddTrace(node, prev_node,
TransformationType::kChildTransform);
} else if (stage ==
DeepTransformStage::kNewChildAfterTransformation) {
stack_trace->AddTrace(
node, prev_node,
TransformationType::kCausedByAncestorTransform);
}
}
});
}
absl::StatusOr<ExprNodePtr> PrepareSingleLeafExpression(
const ExprNodePtr& expr,
const absl::flat_hash_map<std::string, QTypePtr>& input_types,
const DynamicEvaluationEngineOptions& options) {
if (options.enabled_preparation_stages & Stage::kPopulateQTypes) {
return AnnotateLeafWithQType(expr, input_types, expr);
} else {
return expr;
}
}
}
absl::StatusOr<ExprNodePtr> PrepareExpression(
const ExprNodePtr& expr,
const absl::flat_hash_map<std::string, QTypePtr>& input_types,
const DynamicEvaluationEngineOptions& options,
std::shared_ptr<ExprStackTrace> stack_trace) {
if (expr->is_leaf()) {
return PrepareSingleLeafExpression(expr, input_types, options);
}
ExprNodePtr current_expr = expr;
std::vector<std::pair<TransformationType, NodeTransformationFn>>
transformations;
if (options.enabled_preparation_stages & Stage::kPopulateQTypes) {
transformations.push_back(
{TransformationType::kUntraced,
PopulateQTypesTransformation(input_types, expr)});
}
if (options.enabled_preparation_stages & Stage::kLiteralFolding) {
transformations.push_back(
{TransformationType::kUntraced, LiteralFoldingTransformation});
}
if (options.enabled_preparation_stages & Stage::kToLower) {
transformations.push_back(
{TransformationType::kLowering, ToLowerTransformation});
}
if (options.enabled_preparation_stages & Stage::kStripAnnotations) {
transformations.push_back(
{TransformationType::kUntraced, StripAnnotationsTransformation});
}
if (options.enabled_preparation_stages &
Stage::kBackendCompatibilityCasting) {
transformations.push_back(
{TransformationType::kUntraced, CastingTransformation});
}
if (options.enabled_preparation_stages & Stage::kOptimization &&
options.optimizer.has_value()) {
transformations.push_back(
{TransformationType::kOptimization,
[](const DynamicEvaluationEngineOptions& options, ExprNodePtr expr) {
return (*options.optimizer)(std::move(expr));
}});
}
if (options.enabled_preparation_stages & Stage::kExtensions) {
transformations.push_back(
{TransformationType::kUntraced, CompilerExtensionRegistry::GetInstance()
.GetCompilerExtensionSet()
.node_transformation_fn});
}
ASSIGN_OR_RETURN(current_expr,
ApplyNodeTransformations(options, current_expr,
transformations, stack_trace));
if (options.enabled_preparation_stages &
Stage::kWhereOperatorsTransformation) {
ASSIGN_OR_RETURN(current_expr,
WhereOperatorGlobalTransformation(options, current_expr));
}
return current_expr;
}
ExprOperatorPtr InternalRootOperator() {
static absl::NoDestructor<ExprOperatorPtr> first_op(
std::make_shared<InternalRootOperatorImpl>());
return (*first_op);
}
absl::StatusOr<absl::flat_hash_map<std::string, QTypePtr>>
LookupNamedOutputTypes(
const ExprNodePtr& prepared_expr,
const std::vector<std::string>& side_output_names,
const absl::flat_hash_map<Fingerprint, QTypePtr>& node_types) {
absl::flat_hash_map<std::string, QTypePtr> named_output_types;
if (!side_output_names.empty()) {
const auto& root_deps = prepared_expr->node_deps();
if (root_deps.size() != side_output_names.size() + 1) {
return absl::InternalError("inconsistent side_output_names size");
}
named_output_types.reserve(side_output_names.size());
for (size_t i = 0; i != side_output_names.size(); ++i) {
const auto& name = side_output_names[i];
if (auto it = node_types.find(root_deps[i + 1]->fingerprint());
it != node_types.end()) {
named_output_types.emplace(name, it->second);
} else {
return absl::FailedPreconditionError(
absl::StrFormat("unable to deduce named output type for %s in "
"the expression %s.",
name, GetDebugSnippet(prepared_expr)));
}
}
}
return named_output_types;
}
absl::StatusOr<ExprNodePtr> ExtractQTypesForCompilation(
const ExprNodePtr& expr,
absl::flat_hash_map<Fingerprint, QTypePtr>* resulting_types,
std::shared_ptr<ExprStackTrace> stack_trace) {
return PostOrderTraverse(
expr,
[&resulting_types, &stack_trace](
const ExprNodePtr& node, absl::Span<const ExprNodePtr* const> visits)
-> absl::StatusOr<ExprNodePtr> {
if (IsQTypeAnnotation(node) && !visits.empty()) {
QTypePtr qtype = node->qtype();
ExprNodePtr wrapped_node = *(visits[0]);
RETURN_IF_ERROR(CheckForTypeMismatchAndSetType(resulting_types,
wrapped_node, qtype));
ASSIGN_OR_RETURN(bool is_annotation, IsAnnotation(wrapped_node));
while (is_annotation && !wrapped_node->node_deps().empty()) {
wrapped_node = wrapped_node->node_deps()[0];
RETURN_IF_ERROR(CheckForTypeMismatchAndSetType(
resulting_types, wrapped_node, qtype));
ASSIGN_OR_RETURN(is_annotation, IsAnnotation(wrapped_node));
}
if (stack_trace != nullptr) {
stack_trace->AddTrace(*(visits[0]), node,
TransformationType::kUntraced);
}
return *(visits[0]);
}
std::vector<expr::ExprNodePtr> node_deps =
DereferenceVisitPointers(visits);
ASSIGN_OR_RETURN(auto new_node,
WithNewDependencies(node, std::move(node_deps)));
RETURN_IF_ERROR(CheckForTypeMismatchAndSetType(
resulting_types, new_node, node->qtype()));
if (stack_trace != nullptr) {
stack_trace->AddTrace(new_node, node, TransformationType::kUntraced);
}
return new_node;
});
}
absl::StatusOr<QTypePtr> LookupQType(
const ExprNodePtr node,
const absl::flat_hash_map<Fingerprint, QTypePtr>& types) {
if (auto it = types.find(node->fingerprint()); it != types.end()) {
return it->second;
}
return absl::InternalError(
absl::StrFormat("unknown QType for node %s", GetDebugSnippet(node)));
}
absl::StatusOr<absl::flat_hash_map<std::string, QTypePtr>> LookupLeafQTypes(
const ExprNodePtr& expr,
const absl::flat_hash_map<Fingerprint, QTypePtr>& types) {
absl::flat_hash_map<std::string, QTypePtr> result;
for (const auto& node : VisitorOrder(expr)) {
if (node->is_leaf()) {
ASSIGN_OR_RETURN(result[node->leaf_key()], LookupQType(node, types));
}
}
return result;
}
} | #include "arolla/expr/eval/prepare_expression.h"
#include <cstdint>
#include <memory>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_stack_trace.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/optimization/optimizer.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
namespace arolla::expr::eval_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::testing::DummyOp;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::Eq;
using ::testing::HasSubstr;
class IdentityAnnotation final : public AnnotationExprOperatorTag,
public ExprOperatorWithFixedSignature {
public:
IdentityAnnotation()
: ExprOperatorWithFixedSignature(
"id", ExprOperatorSignature::MakeArgsN(1), "",
FingerprintHasher("arolla::expr::IdentityAnnotation").Finish()) {}
absl::StatusOr<ExprAttributes> InferAttributes(
absl::Span<const ExprAttributes> inputs) const final {
return inputs[0];
}
};
class OperatorWithBadGetOutputQType : public ExprOperatorWithFixedSignature {
public:
OperatorWithBadGetOutputQType()
: ExprOperatorWithFixedSignature(
"bad_op", ExprOperatorSignature::MakeArgsN(1), "",
FingerprintHasher("arolla::expr::OperatorWithBadGetOutputQType")
.Finish()) {}
absl::StatusOr<ExprAttributes> InferAttributes(
absl::Span<const ExprAttributes> inputs) const final {
return ExprAttributes(GetQType<int64_t>());
}
absl::StatusOr<ExprNodePtr> ToLowerLevel(
const ExprNodePtr& node) const final {
return node->node_deps()[0];
}
};
class OperatorWithNoInferAttributes final
: public ExprOperatorWithFixedSignature {
public:
OperatorWithNoInferAttributes()
: ExprOperatorWithFixedSignature(
"no_infer_attr", ExprOperatorSignature::MakeArgsN(1), "",
FingerprintHasher("arolla::expr::OperatorWithNoInferAttributes")
.Finish()) {}
absl::StatusOr<ExprAttributes> InferAttributes(
absl::Span<const ExprAttributes> inputs) const final {
return ExprAttributes{};
}
};
TEST(PrepareExpressionTest, ExtractQTypeForCompilation) {
const auto id_annotation = std::make_shared<IdentityAnnotation>();
auto x = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto id_expr, CallOp(id_annotation, {x}));
ASSERT_OK_AND_ASSIGN(auto expr,
WithQTypeAnnotation(id_expr, GetQType<float>()));
absl::flat_hash_map<Fingerprint, QTypePtr> types;
ASSERT_OK_AND_ASSIGN(auto stripped_expr,
ExtractQTypesForCompilation(expr, &types));
EXPECT_THAT(stripped_expr, EqualsExpr(id_expr));
EXPECT_EQ(types[x->fingerprint()], GetQType<float>());
EXPECT_EQ(types[id_expr->fingerprint()], GetQType<float>());
}
TEST(PrepareExpressionTest, Optimizations) {
auto pattern_op = std::make_shared<DummyOp>(
"pattern_op", ExprOperatorSignature::MakeArgsN(2));
auto pattern_x = Literal(2);
Optimizer literals_optimizer = [pattern_op, pattern_x](ExprNodePtr node) {
if (node->op() == pattern_op &&
node->node_deps()[0]->fingerprint() == pattern_x->fingerprint()) {
return Literal(57);
}
return node;
};
const absl::flat_hash_map<std::string, QTypePtr> input_qtypes = {
{"u", GetQType<Text>()}, {"v", GetQType<Bytes>()}};
DynamicEvaluationEngineOptions options{.optimizer = literals_optimizer};
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add", {CallOp(pattern_op, {pattern_x, Leaf("u")}),
CallOp(pattern_op, {pattern_x, Leaf("v")})}));
EXPECT_THAT(PrepareExpression(expr, input_qtypes, options),
IsOkAndHolds(EqualsExpr(Literal(114))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(pattern_op,
{CallOp("math.add", {Literal(1), Literal(1)}), Leaf("u")}));
EXPECT_THAT(PrepareExpression(expr, input_qtypes, options),
IsOkAndHolds(EqualsExpr(Literal(57))));
}
ASSERT_OK_AND_ASSIGN(
auto add_1_lambda,
MakeLambdaOperator(ExprOperatorSignature{{"x"}},
CallOp("math.add", {Placeholder("x"), Literal(1)})));
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(add_1_lambda, {CallOp(pattern_op, {pattern_x, Leaf("u")})}));
EXPECT_THAT(PrepareExpression(expr, input_qtypes, options),
IsOkAndHolds(EqualsExpr(Literal(58))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(pattern_op, {CallOp(add_1_lambda, {Literal(1)}), Leaf("u")}));
EXPECT_THAT(PrepareExpression(expr, input_qtypes, options),
IsOkAndHolds(EqualsExpr(Literal(57))));
}
}
TEST(PrepareExpressionTest, DetailedStackTraceBuilding) {
ASSERT_OK_AND_ASSIGN(
auto add_2_lambda,
MakeLambdaOperator("add_2_lambda", ExprOperatorSignature{{"x"}},
CallOp("math.add", {Placeholder("x"), Literal(2)})));
auto pattern_op = std::make_shared<DummyOp>(
"pattern_op", ExprOperatorSignature::MakeArgsN(2));
Optimizer dummy_optimizer =
[pattern_op,
add_2_lambda](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (node->op() == pattern_op &&
node->node_deps()[0]->fingerprint() == Literal(2)->fingerprint()) {
return CallOp(add_2_lambda, {node->node_deps()[1]});
}
return node;
};
DynamicEvaluationEngineOptions options{.optimizer = dummy_optimizer};
auto stack_trace = std::make_shared<DetailedExprStackTrace>();
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(pattern_op,
{CallOp("math.add", {Literal(1), Literal(1)}), Leaf("u")}));
ASSERT_OK_AND_ASSIGN(
auto prepared_expr,
PrepareExpression(expr, {{"u", GetQType<int>()}}, options, stack_trace));
EXPECT_EQ(stack_trace->FullTrace(prepared_expr->fingerprint()),
"ORIGINAL NODE: pattern_op(M.math.add(..., ...):INT32, L.u)\n"
"COMPILED NODE: M.math.add(annotation.qtype(..., ...), 2):INT32\n"
"DETAILED STACK TRACE:\n"
"pattern_op(M.math.add(..., ...):INT32, L.u)\n"
" had transformations applied to its children\n"
"pattern_op(2, L.u)\n"
" was optimized to\n"
"add_2_lambda(annotation.qtype(..., ...)):INT32\n"
" was lowered to\n"
"M.math.add(annotation.qtype(..., ...), 2):INT32");
}
TEST(PrepareExpressionTest, LightweightStackTraceBuilding) {
ASSERT_OK_AND_ASSIGN(
auto add_2_lambda,
MakeLambdaOperator("add_2_lambda", ExprOperatorSignature{{"x"}},
CallOp("math.add", {Placeholder("x"), Literal(2)})));
auto pattern_op = std::make_shared<DummyOp>(
"pattern_op", ExprOperatorSignature::MakeArgsN(2));
Optimizer dummy_optimizer =
[pattern_op,
add_2_lambda](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (node->op() == pattern_op &&
node->node_deps()[0]->fingerprint() == Literal(2)->fingerprint()) {
return CallOp(add_2_lambda, {node->node_deps()[1]});
}
return node;
};
DynamicEvaluationEngineOptions options{.optimizer = dummy_optimizer};
auto stack_trace = std::make_shared<LightweightExprStackTrace>();
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp(pattern_op,
{CallOp("math.add", {Literal(1), Literal(1)}), Leaf("u")}));
ASSERT_OK_AND_ASSIGN(
auto prepared_expr,
PrepareExpression(expr, {{"u", GetQType<int>()}}, options, stack_trace));
stack_trace->AddRepresentations(prepared_expr, expr);
EXPECT_EQ(stack_trace->FullTrace(prepared_expr->fingerprint()),
"ORIGINAL NODE: pattern_op(M.math.add(..., ...):INT32, L.u)\n"
"COMPILED NODE: M.math.add(annotation.qtype(..., ...), 2):INT32");
}
TEST(PrepareExpressionTest, StackTraceWithErrorNestedUnderLambda) {
ASSERT_OK_AND_ASSIGN(
auto lambda_with_nested_error,
MakeLambdaOperator(
"lambda_with_nested_error", ExprOperatorSignature{{"x"}, {"y"}},
CallOp("math.add",
{Literal(2.0), CallOp("math.divide", {Placeholder("x"),
Placeholder("y")})})));
auto stack_trace = std::make_shared<DetailedExprStackTrace>();
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp(lambda_with_nested_error, {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(
auto prepared_expr,
PrepareExpression(expr,
{{"x", GetQType<float>()}, {"y", GetQType<float>()}},
DynamicEvaluationEngineOptions{}, stack_trace));
ASSERT_OK_AND_ASSIGN(auto faulty_node,
CallOp("math.divide", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(
faulty_node,
PrepareExpression(faulty_node,
{{"x", GetQType<float>()}, {"y", GetQType<float>()}},
DynamicEvaluationEngineOptions{}));
EXPECT_THAT(
stack_trace->FullTrace(faulty_node->fingerprint()),
Eq("ORIGINAL NODE: lambda_with_nested_error(L.x, L.y)\n"
"COMPILED NODE: M.math.divide(annotation.qtype(..., ...), "
"annotation.qtype(..., ...)):FLOAT32\n"
"DETAILED STACK TRACE:\n"
"lambda_with_nested_error(L.x, L.y)\n"
" was lowered to\n"
"M.math.add(float64{2}, M.math.divide(..., ...):FLOAT32):FLOAT64\n"
" which contains\n"
"M.math.divide(annotation.qtype(..., ...),"
" annotation.qtype(..., ...)):FLOAT32"));
}
TEST(PrepareExpressionTest, StackTraceBuildingNoTransformations) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("edge.from_sizes",
{CallOp("annotation.qtype",
{Leaf("x"), Literal(GetDenseArrayQType<int64_t>())})}));
auto stack_trace = std::make_shared<DetailedExprStackTrace>();
ASSERT_OK_AND_ASSIGN(
auto prepared_expr,
PrepareExpression(expr, {{"x", GetDenseArrayQType<int64_t>()}},
DynamicEvaluationEngineOptions{}, stack_trace));
BoundExprStackTraceBuilder stack_trace_builder(stack_trace);
stack_trace_builder.RegisterIp(0, prepared_expr);
auto bound_stack_trace = stack_trace_builder.Build(10);
EXPECT_EQ(bound_stack_trace[0], "");
}
TEST(PrepareExpressionTest, StackTraceAnnotationCycle) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("edge.from_sizes", {Leaf("x")}));
auto stack_trace = std::make_shared<DetailedExprStackTrace>();
ASSERT_OK_AND_ASSIGN(
auto prepared_expr,
PrepareExpression(expr, {{"x", GetDenseArrayQType<int64_t>()}},
DynamicEvaluationEngineOptions{}, stack_trace));
absl::flat_hash_map<Fingerprint, QTypePtr> node_types;
ASSERT_OK_AND_ASSIGN(prepared_expr,
eval_internal::ExtractQTypesForCompilation(
prepared_expr, &node_types, stack_trace));
BoundExprStackTraceBuilder stack_trace_builder(stack_trace);
stack_trace_builder.RegisterIp(0, prepared_expr);
auto bound_stack_trace = stack_trace_builder.Build(10);
EXPECT_EQ(bound_stack_trace[0], "");
}
TEST(PrepareExpressionTest, OperatorWithBadGetOutputQType) {
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(std::make_shared<OperatorWithBadGetOutputQType>(),
{Literal(2.0)}));
EXPECT_THAT(
PrepareExpression(expr, {}, DynamicEvaluationEngineOptions{}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"expression bad_op(float64{2}):INT64 attributes changed in "
"ToLower from Attr(qtype=INT64) to Attr(qvalue=float64{2}); "
"this indicates incorrect InferAttributes() or GetOutputType() "
"of the operator bad_op; while transforming "
"bad_op(float64{2}):INT64; while doing literal folding; while "
"transforming bad_op(float64{2}):INT64"));
}
TEST(PrepareExpressionTest, StripAnnotations) {
const auto id_annotation = std::make_shared<IdentityAnnotation>();
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp(id_annotation,
{CallOp("math.neg", {CallOp(id_annotation, {x})})}));
EXPECT_THAT(PrepareExpression(expr, {}, DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(CallOp("math.neg", {x}))));
}
TEST(PrepareExpressionTest, SingleLeafExpression) {
auto expr = Leaf("x");
EXPECT_THAT(
PrepareExpression(expr, {{"x", GetQType<float>()}},
DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(CallOp(
QTypeAnnotation::Make(), {Leaf("x"), Literal(GetQType<float>())}))));
EXPECT_THAT(PrepareExpression(expr, {}, DynamicEvaluationEngineOptions{}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("missing QType information for inputs {x}")));
}
TEST(PrepareExpressionTest, QTypeAnnotations) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.neg", {WithQTypeAnnotation(Leaf("x"), GetQType<float>())}));
EXPECT_THAT(PrepareExpression(expr, {}, DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(expr)));
EXPECT_THAT(PrepareExpression(expr, {{"x", GetQType<float>()}},
DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(expr)));
EXPECT_THAT(PrepareExpression(expr, {{"x", GetQType<double>()}},
DynamicEvaluationEngineOptions{}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("inconsistent qtype annotation and input "
"qtype: FLOAT32,FLOAT64")));
EXPECT_THAT(PrepareExpression(expr, {{"x", nullptr}},
DynamicEvaluationEngineOptions{}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("inconsistent qtype annotation and input "
"qtype: FLOAT32,NULL")));
}
TEST(PrepareExpressionTest, QTypeAnnotations_WithPartiallyAnnotatedLeaves) {
auto x = Leaf("x");
ASSERT_OK_AND_ASSIGN(auto typed_x, CallOp(QTypeAnnotation::Make(),
{x, Literal(GetQType<float>())}));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.make_tuple",
{typed_x,
x}));
EXPECT_THAT(PrepareExpression(expr, {}, DynamicEvaluationEngineOptions{}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("missing QType information for inputs {x}")));
EXPECT_THAT(
PrepareExpression(expr, {{"x", GetQType<float>()}},
DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(CallOp("core.make_tuple", {typed_x, typed_x}))));
}
TEST(PrepareExpressionTest, StripExtraQTypeAnnotations) {
ASSERT_OK_AND_ASSIGN(auto typed_x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto typed_typed_x,
WithQTypeAnnotation(typed_x, GetQType<float>()));
EXPECT_THAT(
PrepareExpression(typed_typed_x, {}, DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(typed_x)));
ASSERT_OK_AND_ASSIGN(
auto expr_with_non_deducible_type_annotation,
WithQTypeAnnotation(
CallOp(std::make_shared<OperatorWithNoInferAttributes>(), {typed_x}),
GetQType<float>()));
EXPECT_THAT(
PrepareExpression(expr_with_non_deducible_type_annotation, {},
DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(expr_with_non_deducible_type_annotation)));
ASSERT_OK_AND_ASSIGN(
auto expr_with_double_type_annotation,
WithQTypeAnnotation(expr_with_non_deducible_type_annotation,
GetQType<float>()));
EXPECT_THAT(
PrepareExpression(expr_with_double_type_annotation, {},
DynamicEvaluationEngineOptions{}),
IsOkAndHolds(EqualsExpr(expr_with_non_deducible_type_annotation)));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/prepare_expression.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/prepare_expression_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
1fa7adaf-94c9-45aa-bdf9-54656547fa6f | cpp | google/cel-cpp | comparison_functions | eval/public/comparison_functions.cc | eval/public/comparison_functions_test.cc | #include "eval/public/comparison_functions.h"
#include "absl/status/status.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_options.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
#include "runtime/standard/comparison_functions.h"
namespace google::api::expr::runtime {
absl::Status RegisterComparisonFunctions(CelFunctionRegistry* registry,
const InterpreterOptions& options) {
cel::RuntimeOptions modern_options = ConvertToRuntimeOptions(options);
cel::FunctionRegistry& modern_registry = registry->InternalGetRegistry();
return cel::RegisterComparisonFunctions(modern_registry, modern_options);
}
} | #include "eval/public/comparison_functions.h"
#include <memory>
#include <tuple>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/rpc/context/attribute_context.pb.h"
#include "google/protobuf/arena.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "eval/public/activation.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/testing/matchers.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "parser/parser.h"
namespace google::api::expr::runtime {
namespace {
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::rpc::context::AttributeContext;
using ::testing::Combine;
using ::testing::ValuesIn;
MATCHER_P2(DefinesHomogenousOverload, name, argument_type,
absl::StrCat(name, " for ", CelValue::TypeName(argument_type))) {
const CelFunctionRegistry& registry = arg;
return !registry
.FindOverloads(name, false,
{argument_type, argument_type})
.empty();
return false;
}
struct ComparisonTestCase {
absl::string_view expr;
bool result;
CelValue lhs = CelValue::CreateNull();
CelValue rhs = CelValue::CreateNull();
};
class ComparisonFunctionTest
: public testing::TestWithParam<std::tuple<ComparisonTestCase, bool>> {
public:
ComparisonFunctionTest() {
options_.enable_heterogeneous_equality = std::get<1>(GetParam());
options_.enable_empty_wrapper_null_unboxing = true;
builder_ = CreateCelExpressionBuilder(options_);
}
CelFunctionRegistry& registry() { return *builder_->GetRegistry(); }
absl::StatusOr<CelValue> Evaluate(absl::string_view expr, const CelValue& lhs,
const CelValue& rhs) {
CEL_ASSIGN_OR_RETURN(ParsedExpr parsed_expr, parser::Parse(expr));
Activation activation;
activation.InsertValue("lhs", lhs);
activation.InsertValue("rhs", rhs);
CEL_ASSIGN_OR_RETURN(auto expression,
builder_->CreateExpression(
&parsed_expr.expr(), &parsed_expr.source_info()));
return expression->Evaluate(activation, &arena_);
}
protected:
std::unique_ptr<CelExpressionBuilder> builder_;
InterpreterOptions options_;
google::protobuf::Arena arena_;
};
TEST_P(ComparisonFunctionTest, SmokeTest) {
ComparisonTestCase test_case = std::get<0>(GetParam());
google::protobuf::LinkMessageReflection<AttributeContext>();
ASSERT_OK(RegisterComparisonFunctions(®istry(), options_));
ASSERT_OK_AND_ASSIGN(auto result,
Evaluate(test_case.expr, test_case.lhs, test_case.rhs));
EXPECT_THAT(result, test::IsCelBool(test_case.result));
}
INSTANTIATE_TEST_SUITE_P(
LessThan, ComparisonFunctionTest,
Combine(ValuesIn<ComparisonTestCase>(
{
{"false < true", true},
{"1 < 2", true},
{"-2 < -1", true},
{"1.1 < 1.2", true},
{"'a' < 'b'", true},
{"lhs < rhs", true, CelValue::CreateBytesView("a"),
CelValue::CreateBytesView("b")},
{"lhs < rhs", true, CelValue::CreateDuration(absl::Seconds(1)),
CelValue::CreateDuration(absl::Seconds(2))},
{"lhs < rhs", true,
CelValue::CreateTimestamp(absl::FromUnixSeconds(20)),
CelValue::CreateTimestamp(absl::FromUnixSeconds(30))}}),
testing::Bool()));
INSTANTIATE_TEST_SUITE_P(
GreaterThan, ComparisonFunctionTest,
testing::Combine(
testing::ValuesIn<ComparisonTestCase>(
{{"false > true", false},
{"1 > 2", false},
{"-2 > -1", false},
{"1.1 > 1.2", false},
{"'a' > 'b'", false},
{"lhs > rhs", false, CelValue::CreateBytesView("a"),
CelValue::CreateBytesView("b")},
{"lhs > rhs", false, CelValue::CreateDuration(absl::Seconds(1)),
CelValue::CreateDuration(absl::Seconds(2))},
{"lhs > rhs", false,
CelValue::CreateTimestamp(absl::FromUnixSeconds(20)),
CelValue::CreateTimestamp(absl::FromUnixSeconds(30))}}),
testing::Bool()));
INSTANTIATE_TEST_SUITE_P(
GreaterOrEqual, ComparisonFunctionTest,
Combine(ValuesIn<ComparisonTestCase>(
{{"false >= true", false},
{"1 >= 2", false},
{"-2 >= -1", false},
{"1.1 >= 1.2", false},
{"'a' >= 'b'", false},
{"lhs >= rhs", false, CelValue::CreateBytesView("a"),
CelValue::CreateBytesView("b")},
{"lhs >= rhs", false,
CelValue::CreateDuration(absl::Seconds(1)),
CelValue::CreateDuration(absl::Seconds(2))},
{"lhs >= rhs", false,
CelValue::CreateTimestamp(absl::FromUnixSeconds(20)),
CelValue::CreateTimestamp(absl::FromUnixSeconds(30))}}),
testing::Bool()));
INSTANTIATE_TEST_SUITE_P(
LessOrEqual, ComparisonFunctionTest,
Combine(testing::ValuesIn<ComparisonTestCase>(
{{"false <= true", true},
{"1 <= 2", true},
{"-2 <= -1", true},
{"1.1 <= 1.2", true},
{"'a' <= 'b'", true},
{"lhs <= rhs", true, CelValue::CreateBytesView("a"),
CelValue::CreateBytesView("b")},
{"lhs <= rhs", true,
CelValue::CreateDuration(absl::Seconds(1)),
CelValue::CreateDuration(absl::Seconds(2))},
{"lhs <= rhs", true,
CelValue::CreateTimestamp(absl::FromUnixSeconds(20)),
CelValue::CreateTimestamp(absl::FromUnixSeconds(30))}}),
testing::Bool()));
INSTANTIATE_TEST_SUITE_P(HeterogeneousNumericComparisons,
ComparisonFunctionTest,
Combine(testing::ValuesIn<ComparisonTestCase>(
{
{"1 < 2u", true},
{"2 < 1u", false},
{"1 < 2.1", true},
{"3 < 2.1", false},
{"1u < 2", true},
{"2u < 1", false},
{"1u < -1.1", false},
{"1u < 2.1", true},
{"1.1 < 2", true},
{"1.1 < 1", false},
{"1.0 < 1u", false},
{"1.0 < 3u", true},
{"1 <= 2u", true},
{"2 <= 1u", false},
{"1 <= 2.1", true},
{"3 <= 2.1", false},
{"1u <= 2", true},
{"1u <= 0", false},
{"1u <= -1.1", false},
{"2u <= 1.0", false},
{"1.1 <= 2", true},
{"2.1 <= 2", false},
{"1.0 <= 1u", true},
{"1.1 <= 1u", false},
{"3 > 2u", true},
{"3 > 4u", false},
{"3 > 2.1", true},
{"3 > 4.1", false},
{"3u > 2", true},
{"3u > 4", false},
{"3u > -1.1", true},
{"3u > 4.1", false},
{"3.1 > 2", true},
{"3.1 > 4", false},
{"3.0 > 1u", true},
{"3.0 > 4u", false},
{"3 >= 2u", true},
{"3 >= 4u", false},
{"3 >= 2.1", true},
{"3 >= 4.1", false},
{"3u >= 2", true},
{"3u >= 4", false},
{"3u >= -1.1", true},
{"3u >= 4.1", false},
{"3.1 >= 2", true},
{"3.1 >= 4", false},
{"3.0 >= 1u", true},
{"3.0 >= 4u", false},
{"1u >= -1", true},
{"1 >= 4u", false},
{"-1 < 1u", true},
{"1 < 9223372036854775808u", true}}),
testing::Values<bool>(true)));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/comparison_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/comparison_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
31510ab2-ecd4-4755-b6bd-8f4bd89138d6 | cpp | google/tensorstore | raw_bytes_hex | tensorstore/internal/json_binding/raw_bytes_hex.cc | tensorstore/internal/json_binding/raw_bytes_hex_test.cc | #include "tensorstore/internal/json_binding/raw_bytes_hex.h"
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
namespace tensorstore {
namespace internal_json_binding {
namespace {
bool IsHexString(std::string_view s) {
for (char c : s) {
if (!(c >= '0' && c <= '9') && !(c >= 'a' && c <= 'f') &&
!(c >= 'A' && c <= 'F')) {
return false;
}
}
return true;
}
}
namespace raw_bytes_hex_binder {
absl::Status RawBytesHexImpl::operator()(std::true_type is_loading, NoOptions,
void* obj, ::nlohmann::json* j) const {
auto* s = j->get_ptr<const std::string*>();
if (!s || s->size() != 2 * num_bytes ||
!internal_json_binding::IsHexString(*s)) {
return absl::InvalidArgumentError(
absl::StrFormat("Expected string with %d hex digits, but received: %s",
num_bytes * 2, j->dump()));
}
std::string temp = absl::HexStringToBytes(*s);
assert(temp.size() == num_bytes);
std::memcpy(obj, temp.data(), num_bytes);
return absl::OkStatus();
}
absl::Status RawBytesHexImpl::operator()(std::false_type is_loading, NoOptions,
const void* obj,
::nlohmann::json* j) const {
*j = absl::BytesToHexString(
absl::string_view(reinterpret_cast<const char*>(obj), num_bytes));
return absl::OkStatus();
}
}
}
} | #include "tensorstore/internal/json_binding/raw_bytes_hex.h"
#include <string>
#include <tuple>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(RawBytesHexTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTrip<std::array<unsigned char, 3>>(
{
{{{1, 2, 0xab}}, "0102ab"},
},
jb::RawBytesHex);
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<
std::array<unsigned char, 3>>(
{
{"0102AB", "0102ab"},
},
jb::RawBytesHex);
}
TEST(RawBytesHexTest, Invalid) {
tensorstore::TestJsonBinderFromJson<std::array<unsigned char, 3>>(
{
{1,
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string with 6 hex digits, but received: 1")},
{"0102zb", MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string with 6 hex "
"digits, but received: \"0102zb\"")},
},
jb::RawBytesHex);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/raw_bytes_hex.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/raw_bytes_hex_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
61064559-7093-4c01-8255-fe9997c3c16e | cpp | google/tensorstore | index_interval | tensorstore/index_interval.cc | tensorstore/index_interval_test.cc | #include "tensorstore/index_interval.h"
#include <ostream>
#include "absl/status/status.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
Result<IndexInterval> IndexInterval::Closed(Index inclusive_min,
Index inclusive_max) {
if (!ValidClosed(inclusive_min, inclusive_max)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("(", inclusive_min, ", ", inclusive_max,
") do not specify a valid closed index interval"));
}
return UncheckedClosed(inclusive_min, inclusive_max);
}
Result<IndexInterval> IndexInterval::HalfOpen(Index inclusive_min,
Index exclusive_max) {
if (!ValidHalfOpen(inclusive_min, exclusive_max)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"(", inclusive_min, ", ", exclusive_max,
") do not specify a valid half-open index interval"));
}
return UncheckedHalfOpen(inclusive_min, exclusive_max);
}
Result<IndexInterval> IndexInterval::Sized(Index inclusive_min, Index size) {
if (!ValidSized(inclusive_min, size)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("(", inclusive_min, ", ", size,
") do not specify a valid sized index interval"));
}
return UncheckedSized(inclusive_min, size);
}
std::ostream& operator<<(std::ostream& os,
const OptionallyImplicitIndexInterval& x) {
if (x.inclusive_min() == -kInfIndex) {
os << "(-inf";
} else {
os << '[' << x.inclusive_min();
}
if (x.implicit_lower()) os << '*';
os << ", ";
if (x.inclusive_max() == +kInfIndex) {
os << "+inf";
} else {
os << x.exclusive_max();
}
if (x.implicit_upper()) os << '*';
return os << ")";
}
std::ostream& operator<<(std::ostream& os, IndexInterval x) {
return os << OptionallyImplicitIndexInterval(x, false, false);
}
namespace {
template <ContainerKind CKindA, ContainerKind CKindB>
bool EqualImpl(const IndexDomainDimension<CKindA>& a,
const IndexDomainDimension<CKindB>& b) {
return (a.optionally_implicit_interval() ==
b.optionally_implicit_interval() &&
a.label() == b.label());
}
}
bool operator==(const IndexDomainDimension<container>& a,
const IndexDomainDimension<container>& b) {
return EqualImpl(a, b);
}
bool operator==(const IndexDomainDimension<view>& a,
const IndexDomainDimension<view>& b) {
return EqualImpl(a, b);
}
bool operator==(const IndexDomainDimension<view>& a,
const IndexDomainDimension<container>& b) {
return EqualImpl(a, b);
}
bool operator==(const IndexDomainDimension<container>& a,
const IndexDomainDimension<view>& b) {
return EqualImpl(a, b);
}
std::ostream& operator<<(std::ostream& os,
const IndexDomainDimension<view>& x) {
if (!x.label().empty()) {
os << QuoteString(x.label()) << ": ";
}
return os << x.optionally_implicit_interval();
}
std::ostream& operator<<(std::ostream& os,
const IndexDomainDimension<container>& x) {
return os << IndexDomainDimension<view>(x);
}
bool AreCompatibleOrUnbounded(IndexInterval a, IndexInterval b) {
Index a_lower = a.inclusive_min();
Index a_upper = a.inclusive_max();
Index b_lower = b.inclusive_min();
Index b_upper = b.inclusive_max();
return (a_lower == b_lower || a_lower == -kInfIndex ||
b_lower == -kInfIndex) &&
(a_upper == b_upper || a_upper == kInfIndex || b_upper == kInfIndex);
}
IndexInterval Hull(IndexInterval a, IndexInterval b) {
if (a.empty()) return b;
if (b.empty()) return a;
const Index lower = std::min(a.inclusive_min(), b.inclusive_min());
const Index upper = std::max(a.inclusive_max(), b.inclusive_max());
return IndexInterval::UncheckedClosed(lower, upper);
}
IndexInterval Intersect(IndexInterval a, IndexInterval b) {
const Index lower = std::max(a.inclusive_min(), b.inclusive_min());
const Index upper = std::min(a.inclusive_max(), b.inclusive_max());
const Index size = upper < lower ? 0 : upper - lower + 1;
return IndexInterval::UncheckedSized(lower, size);
}
OptionallyImplicitIndexInterval Hull(OptionallyImplicitIndexInterval a,
OptionallyImplicitIndexInterval b) {
IndexInterval interval = Hull(a.interval(), b.interval());
bool implicit_lower = (a.inclusive_min() == b.inclusive_min())
? (a.implicit_lower() && b.implicit_lower())
: (interval.inclusive_min() == a.inclusive_min()
? a.implicit_lower()
: b.implicit_lower());
bool implicit_upper = (a.inclusive_max() == b.inclusive_max())
? (a.implicit_upper() && b.implicit_upper())
: (a.inclusive_max() == interval.inclusive_max()
? a.implicit_upper()
: b.implicit_upper());
return OptionallyImplicitIndexInterval{interval, implicit_lower,
implicit_upper};
}
OptionallyImplicitIndexInterval Intersect(OptionallyImplicitIndexInterval a,
OptionallyImplicitIndexInterval b) {
IndexInterval interval = Intersect(a.interval(), b.interval());
bool implicit_lower = (a.inclusive_min() == b.inclusive_min())
? (a.implicit_lower() && b.implicit_lower())
: (interval.inclusive_min() == a.inclusive_min()
? a.implicit_lower()
: b.implicit_lower());
bool implicit_upper = (a.inclusive_max() == b.inclusive_max())
? (a.implicit_upper() && b.implicit_upper())
: (a.inclusive_max() == interval.inclusive_max()
? a.implicit_upper()
: b.implicit_upper());
return OptionallyImplicitIndexInterval{interval, implicit_lower,
implicit_upper};
}
OptionallyImplicitIndexInterval IntersectPreferringExplicit(
OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b) {
const Index inclusive_min =
a.implicit_lower() == b.implicit_lower()
? std::max(a.inclusive_min(), b.inclusive_min())
: std::max(a.effective_interval().inclusive_min(),
b.effective_interval().inclusive_min());
const Index inclusive_max =
a.implicit_upper() == b.implicit_upper()
? std::min(a.inclusive_max(), b.inclusive_max())
: std::min(a.effective_interval().inclusive_max(),
b.effective_interval().inclusive_max());
return OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(
inclusive_min, std::max(inclusive_min - 1, inclusive_max)),
a.implicit_lower() && b.implicit_lower(),
a.implicit_upper() && b.implicit_upper()};
}
bool ContainsOrUnbounded(IndexInterval outer, IndexInterval inner) {
return (inner.inclusive_min() == -kInfIndex ||
inner.inclusive_min() >= outer.inclusive_min()) &&
(inner.inclusive_max() == kInfIndex ||
inner.inclusive_max() <= outer.inclusive_max());
}
Result<IndexInterval> ShiftInterval(IndexInterval interval, Index min_offset,
Index max_offset) {
Index inclusive_min;
if (interval.inclusive_min() == -kInfIndex) {
inclusive_min = -kInfIndex;
} else if (internal::AddOverflow(interval.inclusive_min(), min_offset,
&inclusive_min) ||
!IsFiniteIndex(inclusive_min)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
interval.inclusive_min(), " + ", min_offset, " is outside valid range ",
IndexInterval::FiniteRange()));
}
Index inclusive_max;
if (interval.inclusive_max() == kInfIndex) {
inclusive_max = kInfIndex;
} else if (internal::AddOverflow(interval.inclusive_max(), max_offset,
&inclusive_max) ||
!IsFiniteIndex(inclusive_max)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
interval.inclusive_max(), " + ", max_offset, " is outside valid range ",
IndexInterval::FiniteRange()));
}
return IndexInterval::UncheckedClosed(inclusive_min, inclusive_max);
}
Result<IndexInterval> ShiftIntervalBackward(IndexInterval interval,
Index min_offset,
Index max_offset) {
return ShiftInterval(
interval, internal::wrap_on_overflow::Multiply(min_offset, Index(-1)),
internal::wrap_on_overflow::Multiply(max_offset, Index(-1)));
}
Result<IndexInterval> ShiftInterval(IndexInterval interval, Index offset) {
return ShiftInterval(interval, offset, offset);
}
Result<IndexInterval> ShiftIntervalBackward(IndexInterval interval,
Index offset) {
return ShiftIntervalBackward(interval, offset, offset);
}
Result<IndexInterval> ShiftIntervalTo(IndexInterval interval, Index origin) {
if (!IsFiniteIndex(origin)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Origin ", origin, " is outside valid range ",
IndexInterval::FiniteRange()));
}
if (interval.inclusive_min() == -kInfIndex) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Interval ", interval, " is not bounded below"));
}
Index offset;
[[maybe_unused]] const bool overflow =
internal::SubOverflow(origin, interval.inclusive_min(), &offset);
assert(!overflow);
return ShiftInterval(interval, offset);
}
absl::Status CheckContains(IndexInterval interval, Index index) {
if (Contains(interval, index)) return absl::OkStatus();
return absl::OutOfRangeError(tensorstore::StrCat(
"Index ", index, " is outside valid range ", interval));
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>> ExtractStridedSlice(
OptionallyImplicitIndexInterval orig, IntervalForm interval_form,
Index start, Index stop_or_size, Index stride) {
const IndexInterval constraint = IndexInterval::UncheckedClosed(
orig.implicit_lower() ? -kInfIndex : orig.inclusive_min(),
orig.implicit_upper() ? kInfIndex : orig.inclusive_max());
if (stride == 0 || stride == std::numeric_limits<Index>::min()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid stride ", stride));
}
if (start == kImplicit) {
start = stride > 0 ? orig.inclusive_min() : orig.inclusive_max();
} else {
if (!IsValidIndex(start)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid start index ", start));
}
orig.implicit_lower() = false;
}
Index inclusive_stop;
if (interval_form == IntervalForm::sized) {
Index size = stop_or_size;
if (size == kImplicit) {
inclusive_stop = stride > 0 ? orig.inclusive_max() : orig.inclusive_min();
} else {
if (size < 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Negative size ", size, " specified for sized interval"));
}
orig.implicit_upper() = false;
if (size == 0) {
inclusive_stop = start + (stride > 0 ? -1 : 1);
} else {
if (internal::MulOverflow(stride, size - 1, &inclusive_stop) ||
internal::AddOverflow(start, inclusive_stop, &inclusive_stop)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Integer overflow computing slice result"));
}
}
}
} else {
if (stop_or_size == kImplicit) {
inclusive_stop = stride > 0 ? orig.inclusive_max() : orig.inclusive_min();
} else {
orig.implicit_upper() = false;
if (interval_form == IntervalForm::closed ||
!IsFiniteIndex(stop_or_size)) {
inclusive_stop = stop_or_size;
} else {
assert(interval_form == IntervalForm::half_open);
inclusive_stop = stop_or_size + (stride > 0 ? -1 : 1);
}
}
}
if (std::abs(stride) != 1 && !IsFiniteIndex(start)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Slicing with non-unit stride of ", stride,
" requires a finite start index"));
}
Index adjusted_inclusive_min, adjusted_inclusive_max;
if (stride > 0) {
adjusted_inclusive_min = start;
adjusted_inclusive_max = inclusive_stop;
} else {
adjusted_inclusive_min = inclusive_stop;
adjusted_inclusive_max = start;
std::swap(orig.implicit_lower(), orig.implicit_upper());
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto adjusted_interval,
IndexInterval::Closed(adjusted_inclusive_min, adjusted_inclusive_max));
if (!Contains(constraint, adjusted_interval)) {
return absl::OutOfRangeError(
tensorstore::StrCat("Slice interval ", adjusted_interval,
" is not contained within domain ", constraint));
}
Index new_start = start / stride;
Index new_size =
std::abs(inclusive_stop) == kInfIndex
? kInfIndex + 1 - new_start
: CeilOfRatio(adjusted_interval.size(), std::abs(stride));
orig.interval() = IndexInterval::UncheckedSized(new_start, new_size);
return {std::in_place, orig, start};
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>>
ExtractHalfOpenStridedSlice(OptionallyImplicitIndexInterval orig, Index start,
Index stop, Index stride) {
return ExtractStridedSlice(orig, IntervalForm::half_open, start, stop,
stride);
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>>
ExtractClosedStridedSlice(OptionallyImplicitIndexInterval orig, Index start,
Index stop, Index stride) {
return ExtractStridedSlice(orig, IntervalForm::closed, start, stop, stride);
}
Result<std::pair<OptionallyImplicitIndexInterval, Index>>
ExtractSizedStridedSlice(OptionallyImplicitIndexInterval orig, Index start,
Index size, Index stride) {
return ExtractStridedSlice(orig, IntervalForm::sized, start, size, stride);
}
absl::Status ComputeStridedSliceMap(OptionallyImplicitIndexInterval orig,
IntervalForm interval_form,
Index translate_origin_to, Index start,
Index stop_or_size, Index stride,
OptionallyImplicitIndexInterval* new_domain,
Index* output_offset) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_interval_and_adjusted_start,
ExtractStridedSlice(orig, interval_form, start, stop_or_size, stride));
OptionallyImplicitIndexInterval& new_interval =
new_interval_and_adjusted_start.first;
Index adjusted_start = new_interval_and_adjusted_start.second;
if (translate_origin_to != kImplicit) {
TENSORSTORE_ASSIGN_OR_RETURN(
new_interval.interval(),
ShiftIntervalTo(new_interval.interval(), translate_origin_to));
}
*new_domain = new_interval;
*output_offset = adjusted_start - new_interval.inclusive_min() * stride;
return absl::OkStatus();
}
Result<IndexInterval> GetAffineTransformDomain(IndexInterval interval,
Index offset, Index divisor) {
assert(divisor != 0);
if (interval == IndexInterval()) {
return interval;
}
do {
Index result_lower, result_size;
Index lower, upper;
if (divisor < 0) {
if (divisor == std::numeric_limits<Index>::min() ||
offset == std::numeric_limits<Index>::min()) {
break;
}
divisor = -divisor;
offset = -offset;
lower = -interval.inclusive_max();
upper = -interval.inclusive_min();
if (interval.empty()) {
--lower;
--upper;
}
} else {
lower = interval.inclusive_min();
upper = interval.inclusive_max();
}
if (lower == -kInfIndex) {
result_lower = -kInfIndex;
} else {
if (internal::SubOverflow(lower, offset, &result_lower)) break;
result_lower = CeilOfRatio(result_lower, divisor);
if (!IsFiniteIndex(result_lower)) break;
}
if (interval.empty()) {
result_size = 0;
} else if (upper == kInfIndex) {
result_size = kInfIndex - result_lower + 1;
} else {
Index result_upper;
if (internal::SubOverflow(upper, offset, &result_upper)) break;
result_upper = FloorOfRatio(result_upper, divisor);
if (!IsFiniteIndex(result_upper)) break;
result_size = result_upper - result_lower + 1;
}
return IndexInterval::UncheckedSized(result_lower, result_size);
} while (false);
return absl::InvalidArgumentError(
tensorstore::StrCat("Integer overflow propagating range ", interval,
" through inverse affine transform with offset ",
offset, " and multiplier ", divisor));
}
Result<OptionallyImplicitIndexInterval> GetAffineTransformDomain(
OptionallyImplicitIndexInterval interval, Index offset, Index divisor) {
TENSORSTORE_ASSIGN_OR_RETURN(
interval.interval(),
GetAffineTransformDomain(interval.interval(), offset, divisor));
if (divisor < 0) {
std::swap(interval.implicit_lower(), interval.implicit_upper());
}
return interval;
}
namespace {
absl::Status GetAffineTransformError(IndexInterval interval, Index offset,
Index multiplier) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing affine transform of domain ", interval,
" with offset ", offset, " and multiplier ", multiplier));
}
}
Result<IndexInterval> GetAffineTransformRange(IndexInterval interval,
Index offset, Index multiplier) {
const auto transform_bound_overflow = [&](Index* bound) {
if (*bound == -kInfIndex || *bound == kInfIndex) {
if (multiplier < 0) *bound *= -1;
return false;
}
return (internal::MulOverflow(*bound, multiplier, bound) ||
internal::AddOverflow(*bound, offset, bound) ||
!IsFiniteIndex(*bound));
};
Index lower = interval.inclusive_min(), upper = interval.inclusive_max();
if (transform_bound_overflow(&lower) || transform_bound_overflow(&upper)) {
return GetAffineTransformError(interval, offset, multiplier);
}
if (interval.empty()) {
return IndexInterval::UncheckedSized(lower, 0);
}
if (multiplier == 0) {
return IndexInterval::UncheckedSized(lower, 1);
}
if (multiplier < 0) std::swap(lower, upper);
return IndexInterval::UncheckedClosed(lower, upper);
}
Result<IndexInterval> GetAffineTransformInverseDomain(IndexInterval interval,
Index offset,
Index divisor) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_interval, GetAffineTransformRange(interval, offset, divisor));
if (new_interval.empty()) return new_interval;
if (divisor > 0 && new_interval.inclusive_max() != kInfIndex) {
Index new_inclusive_max;
if (internal::AddOverflow(new_interval.inclusive_max(), divisor - 1,
&new_inclusive_max) ||
!IsFiniteIndex(new_inclusive_max)) {
return GetAffineTransformError(interval, offset, divisor);
}
return IndexInterval::UncheckedClosed(new_interval.inclusive_min(),
new_inclusive_max);
}
if (divisor < 0 && new_interval.inclusive_min() != -kInfIndex) {
Index new_inclusive_min;
if (internal::AddOverflow(new_interval.inclusive_min(), divisor + 1,
&new_inclusive_min) ||
!IsFiniteIndex(new_inclusive_min)) {
return GetAffineTransformError(interval, offset, divisor);
}
return IndexInterval::UncheckedClosed(new_inclusive_min,
new_interval.inclusive_max());
}
return new_interval;
}
Result<OptionallyImplicitIndexInterval> GetAffineTransformRange(
OptionallyImplicitIndexInterval interval, Index offset, Index multiplier) {
TENSORSTORE_ASSIGN_OR_RETURN(
interval.interval(),
GetAffineTransformRange(interval.interval(), offset, multiplier));
if (multiplier < 0) {
std::swap(interval.implicit_lower(), interval.implicit_upper());
}
return interval;
}
Result<std::string_view> MergeDimensionLabels(std::string_view a,
std::string_view b) {
if (a.empty()) return b;
if (b.empty()) return a;
if (a == b) return a;
return absl::InvalidArgumentError("Dimension labels do not match");
}
Result<OptionallyImplicitIndexInterval> MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b) {
if (a == b) return a;
Index inclusive_min, inclusive_max;
bool implicit_lower, implicit_upper;
if (a.inclusive_min() == -kInfIndex && a.implicit_lower() == true) {
inclusive_min = b.inclusive_min();
implicit_lower = b.implicit_lower();
} else if (b.inclusive_min() == -kInfIndex && b.implicit_lower() == true) {
inclusive_min = a.inclusive_min();
implicit_lower = a.implicit_lower();
} else if (a.inclusive_min() != b.inclusive_min()) {
return absl::InvalidArgumentError("Lower bounds do not match");
} else {
inclusive_min = a.inclusive_min();
implicit_lower = a.implicit_lower() && b.implicit_lower();
}
if (a.inclusive_max() == kInfIndex && a.implicit_upper() == true) {
inclusive_max = b.inclusive_max();
implicit_upper = b.implicit_upper();
} else if (b.inclusive_max() == kInfIndex && b.implicit_upper() == true) {
inclusive_max = a.inclusive_max();
implicit_upper = a.implicit_upper();
} else if (a.inclusive_max() != b.inclusive_max()) {
return absl::InvalidArgumentError("Upper bounds do not match");
} else {
inclusive_max = a.inclusive_max();
implicit_upper = a.implicit_upper() && b.implicit_upper();
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto interval, IndexInterval::Closed(inclusive_min, inclusive_max));
return OptionallyImplicitIndexInterval{interval, implicit_lower,
implicit_upper};
}
namespace serialization {
bool Serializer<IndexInterval>::Encode(EncodeSink& sink,
const IndexInterval& value) {
return serialization::EncodeTuple(sink, value.inclusive_min(), value.size());
}
bool Serializer<IndexInterval>::Decode(DecodeSource& source,
IndexInterval& value) {
Index inclusive_min, size;
if (!serialization::DecodeTuple(source, inclusive_min, size)) {
return false;
}
TENSORSTORE_ASSIGN_OR_RETURN(value, IndexInterval::Sized(inclusive_min, size),
(source.Fail(_), false));
return true;
}
}
} | #include "tensorstore/index_interval.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::AreCompatibleOrUnbounded;
using ::tensorstore::ComputeStridedSliceMap;
using ::tensorstore::container;
using ::tensorstore::DividePositiveRoundOut;
using ::tensorstore::ExplicitIndexOr;
using ::tensorstore::ExtractClosedStridedSlice;
using ::tensorstore::ExtractHalfOpenStridedSlice;
using ::tensorstore::ExtractSizedStridedSlice;
using ::tensorstore::GetAffineTransformInverseDomain;
using ::tensorstore::ImplicitOrEqual;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainDimension;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexIntervalRef;
using ::tensorstore::Intersect;
using ::tensorstore::IntervalForm;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeDimensionLabels;
using ::tensorstore::MergeOptionallyImplicitIndexIntervals;
using ::tensorstore::OptionallyImplicitIndexInterval;
using ::tensorstore::ShiftInterval;
using ::tensorstore::ShiftIntervalBackward;
using ::tensorstore::ShiftIntervalTo;
using ::tensorstore::StrCat;
using ::tensorstore::view;
using ::tensorstore::serialization::TestSerializationRoundTrip;
using ::testing::Optional;
using ::testing::Pair;
TEST(IndexIntervalTest, DefaultConstruct) {
IndexInterval x;
EXPECT_EQ(-kInfIndex, x.inclusive_min());
EXPECT_EQ(-kInfIndex - 1, x.exclusive_min());
EXPECT_EQ(kInfIndex, x.inclusive_max());
EXPECT_EQ(kInfIndex + 1, x.exclusive_max());
EXPECT_EQ(kInfSize, x.size());
EXPECT_FALSE(x.empty());
}
TEST(IndexIntervalTest, Empty) {
EXPECT_TRUE(IndexInterval::UncheckedSized(1, 0).empty());
}
TEST(IndexIntervalTest, ValidSized) {
EXPECT_TRUE(IndexInterval::ValidSized(0, 0));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, kInfSize));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, 100));
EXPECT_TRUE(IndexInterval::ValidSized(kInfIndex - 5, 6));
EXPECT_TRUE(IndexInterval::ValidSized(-kInfIndex, 2));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidSized(5, -1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex - 5, 7));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex, 0));
EXPECT_FALSE(IndexInterval::ValidSized(-kInfIndex, 1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex, 1));
EXPECT_FALSE(IndexInterval::ValidSized(kInfIndex, 0));
}
TEST(IndexIntervalTest, ValidClosed) {
EXPECT_TRUE(IndexInterval::ValidClosed(0, 0));
EXPECT_TRUE(IndexInterval::ValidClosed(0, -1));
EXPECT_TRUE(IndexInterval::ValidClosed(-kInfIndex, kInfIndex));
EXPECT_TRUE(IndexInterval::ValidClosed(-5, kInfIndex));
EXPECT_TRUE(IndexInterval::ValidClosed(-kInfIndex, -kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidClosed(0, -2));
EXPECT_FALSE(IndexInterval::ValidClosed(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidClosed(0, kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidClosed(-kInfIndex, -kInfIndex));
EXPECT_FALSE(IndexInterval::ValidClosed(+kInfIndex, +kInfIndex));
}
TEST(IndexIntervalTest, ValidHalfOpen) {
EXPECT_TRUE(IndexInterval::ValidHalfOpen(0, 0));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(0, -1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-kInfIndex, kInfIndex + 1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-5, kInfIndex + 1));
EXPECT_TRUE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex + 2));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex - 1, 0));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(0, kInfIndex + 2));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(-kInfIndex, -kInfIndex + 1));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(kInfIndex, kInfIndex));
EXPECT_FALSE(IndexInterval::ValidHalfOpen(kInfIndex, kInfIndex + 1));
}
TEST(IndexIntervalTest, Sized) {
EXPECT_EQ(IndexInterval::UncheckedSized(0, 5), IndexInterval::Sized(0, 5));
EXPECT_THAT(IndexInterval::Sized(0, -1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, UncheckedSized) {
auto x = IndexInterval::UncheckedSized(1, 5);
EXPECT_EQ(1, x.inclusive_min());
EXPECT_EQ(0, x.exclusive_min());
EXPECT_EQ(5, x.size());
EXPECT_EQ(5, x.inclusive_max());
EXPECT_EQ(6, x.exclusive_max());
}
TEST(IndexIntervalTest, Equality) {
EXPECT_TRUE(IndexInterval::UncheckedSized(1, 2) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(1, 2) !=
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(1, 3) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_FALSE(IndexInterval::UncheckedSized(2, 2) ==
IndexInterval::UncheckedSized(1, 2));
EXPECT_TRUE(IndexInterval::UncheckedSized(2, 3) ==
IndexInterval::UncheckedClosed(2, 4));
EXPECT_TRUE(IndexInterval::UncheckedSized(2, 3) ==
IndexInterval::UncheckedHalfOpen(2, 5));
}
TEST(IndexIntervalTest, UncheckedClosed) {
EXPECT_EQ(IndexInterval::UncheckedSized(2, 3),
IndexInterval::UncheckedClosed(2, 4));
}
TEST(IndexIntervalTest, Closed) {
EXPECT_EQ(IndexInterval::UncheckedClosed(2, 4), IndexInterval::Closed(2, 4));
EXPECT_THAT(IndexInterval::Closed(2, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, UncheckedHalfOpen) {
EXPECT_EQ(IndexInterval::UncheckedSized(2, 2),
IndexInterval::UncheckedHalfOpen(2, 4));
}
TEST(IndexIntervalTest, HalfOpen) {
EXPECT_EQ(IndexInterval::UncheckedHalfOpen(2, 4),
IndexInterval::HalfOpen(2, 4));
EXPECT_THAT(IndexInterval::HalfOpen(2, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalTest, ContainsIndex) {
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 5));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 3));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15), 15));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15), 2));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15), 16));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15),
kMinFiniteIndex));
EXPECT_FALSE(
Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15), -kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, 15), 16));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, kInfIndex), 16));
EXPECT_TRUE(
Contains(IndexInterval::UncheckedClosed(3, kInfIndex), kMaxFiniteIndex));
EXPECT_FALSE(
Contains(IndexInterval::UncheckedClosed(3, kInfIndex), kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
-kInfIndex));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
kInfIndex));
EXPECT_TRUE(
Contains(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), 3));
}
TEST(IndexIntervalTest, ContainsInterval) {
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 15)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(4, 15)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 14)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(6, 8)));
EXPECT_TRUE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedSized(20, 0)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(2, 10)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(3, 16)));
EXPECT_FALSE(Contains(IndexInterval::UncheckedClosed(3, 15),
IndexInterval::UncheckedClosed(5, 16)));
}
TEST(IndexIntervalTest, IsFinite) {
EXPECT_TRUE(IsFinite(IndexInterval::UncheckedClosed(3, 15)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(-kInfIndex, 15)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_FALSE(IsFinite(IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex)));
}
TEST(IndexIntervalTest, Intersect) {
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 5),
Intersect(IndexInterval::UncheckedClosed(-3, 5),
IndexInterval::UncheckedClosed(3, 10)));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 5),
Intersect(IndexInterval::UncheckedClosed(3, 10),
IndexInterval::UncheckedClosed(-3, 5)));
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 10),
Intersect(IndexInterval::UncheckedClosed(3, 10),
IndexInterval::UncheckedClosed(-3, 11)));
EXPECT_EQ(IndexInterval::UncheckedSized(3, 0),
Intersect(IndexInterval::UncheckedClosed(-3, 0),
IndexInterval::UncheckedClosed(3, 5)));
}
TEST(IndexIntervalTest, IntersectOptionallyImplicit) {
using OIII = OptionallyImplicitIndexInterval;
EXPECT_THAT(
Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), false, false},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, false}));
EXPECT_THAT(
Intersect(OIII{IndexInterval::UncheckedClosed(2, 5), false, true},
OIII{IndexInterval::UncheckedClosed(1, 6), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, true}));
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), c, d}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), c, b}))
<< x;
}
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(0, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), false, false}));
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kMaxFiniteIndex),
false, true}));
EXPECT_THAT(
Intersect(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false},
OIII{IndexInterval::UncheckedClosed(0, 10), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), true, true}));
}
TEST(IndexIntervalTest, IntersectPreferringExplicit) {
using OIII = OptionallyImplicitIndexInterval;
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(Intersect(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), a, b}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), a, b}))
<< x;
}
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(1, 5), false, false},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(2, 5), false, true},
OIII{IndexInterval::UncheckedClosed(1, 6), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(2, 5), false, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false},
OIII{IndexInterval::UncheckedClosed(3, 10), true, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), true, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), false, false},
OIII{IndexInterval::UncheckedClosed(3, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), false, true},
OIII{IndexInterval::UncheckedClosed(3, 10), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), false, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, true},
OIII{IndexInterval::UncheckedClosed(3, 10), true, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(3, 5), true, true}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false},
OIII{IndexInterval::UncheckedClosed(-5, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-5, 5), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-5, 10), false, false},
OIII{IndexInterval::UncheckedClosed(-3, 5), true, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-5, 5), false, false}));
EXPECT_THAT(IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-3, 12), true, false},
OIII{IndexInterval::UncheckedClosed(-5, 10), false, true}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-5, 12), false, false}));
EXPECT_THAT(IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-5, 10), false, true},
OIII{IndexInterval::UncheckedClosed(-3, 12), true, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-5, 12), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(0, 10), false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(0, 10), false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}));
EXPECT_THAT(
IntersectPreferringExplicit(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false},
OIII{IndexInterval::UncheckedClosed(0, 10), true, true}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
false, false}));
}
TEST(IndexIntervalTest, Hull) {
EXPECT_EQ(IndexInterval::UncheckedClosed(3, 15),
Hull(IndexInterval::UncheckedClosed(3, 5),
IndexInterval::UncheckedClosed(10, 15)));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 15),
Hull(IndexInterval::UncheckedClosed(0, -1),
IndexInterval::UncheckedClosed(5, 15)));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 15),
Hull(IndexInterval::UncheckedClosed(5, 15),
IndexInterval::UncheckedClosed(0, -1)));
EXPECT_EQ(IndexInterval::UncheckedClosed(0, -1),
Hull(IndexInterval::UncheckedClosed(5, 4),
IndexInterval::UncheckedClosed(0, -1)));
}
TEST(IndexIntervalTest, HullOptionallyImplicit) {
using OIII = OptionallyImplicitIndexInterval;
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(1, 5), false, true},
OIII{IndexInterval::UncheckedClosed(2, 6), false, true}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(1, 6), false, true}));
for (int x = 0; x < 16; x++) {
const bool a = ((x & 1) != 0);
const bool b = ((x & 2) != 0);
const bool c = ((x & 4) != 0);
const bool d = ((x & 8) != 0);
EXPECT_THAT(Hull(OIII{IndexInterval::UncheckedClosed(1, 5), a, b},
OIII{IndexInterval::UncheckedClosed(1, 5), c, d}),
::testing::Eq(
OIII{IndexInterval::UncheckedClosed(1, 5), a && c, b && d}))
<< x;
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(-3, 5), a, b},
OIII{IndexInterval::UncheckedClosed(3, 10), c, d}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-3, 10), a, d}))
<< x;
}
EXPECT_THAT(
Hull(OIII{IndexInterval::UncheckedClosed(-kInfIndex, kMaxFiniteIndex),
true, true},
OIII{IndexInterval::UncheckedClosed(kMinFiniteIndex, kInfIndex),
false, false}),
::testing::Eq(OIII{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex),
true, false}));
}
TEST(IndexIntervalTest, ContainsOrUnbounded) {
EXPECT_TRUE(
ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_TRUE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(6, 9)));
EXPECT_FALSE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(4, 10)));
EXPECT_TRUE(
ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(5, kInfIndex)));
EXPECT_FALSE(ContainsOrUnbounded(IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(5, 11)));
EXPECT_TRUE(ContainsOrUnbounded(
IndexInterval::UncheckedClosed(5, 10),
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, AreCompatibleOrUnbounded) {
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval(), IndexInterval()));
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval(),
IndexInterval::UncheckedSized(1, 4)));
EXPECT_TRUE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval()));
EXPECT_FALSE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval::UncheckedSized(1, 5)));
EXPECT_FALSE(AreCompatibleOrUnbounded(IndexInterval::UncheckedSized(1, 4),
IndexInterval::UncheckedSized(2, 3)));
EXPECT_TRUE(
AreCompatibleOrUnbounded(IndexInterval::UncheckedClosed(1, 4),
IndexInterval::UncheckedClosed(-kInfIndex, 4)));
EXPECT_TRUE(
AreCompatibleOrUnbounded(IndexInterval::UncheckedClosed(1, 4),
IndexInterval::UncheckedClosed(1, kInfIndex)));
}
TEST(IndexIntervalTest, Ostream) {
EXPECT_EQ("[1, 3)", StrCat(IndexInterval::UncheckedClosed(1, 2)));
EXPECT_EQ("(-inf, 3)", StrCat(IndexInterval::UncheckedClosed(-kInfIndex, 2)));
EXPECT_EQ("[7, +inf)", StrCat(IndexInterval::UncheckedClosed(7, kInfIndex)));
}
TEST(IndexIntervalTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
IndexInterval(),
IndexInterval::UncheckedSized(0, 1),
IndexInterval::UncheckedSized(0, 0),
IndexInterval::UncheckedSized(0, 2),
IndexInterval::UncheckedSized(1, 2),
}));
}
TEST(IndexIntervalTest, ShiftInterval) {
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, kInfIndex), 2),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(ShiftInterval(
IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101), -1),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
Optional(IndexInterval::UncheckedClosed(-kInfIndex + 5,
-kInfIndex + 10)));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(5, 10), kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-1, 10), kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, -5),
kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-5 \\+ -[0-9]+ is outside valid range .*"));
}
TEST(IndexIntervalTest, ShiftIntervalBackward) {
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval(), std::numeric_limits<Index>::min()),
Optional(IndexInterval()));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kInfIndex, 8), -2),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, kInfIndex), -2),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101), 1),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
Optional(
IndexInterval::UncheckedClosed(-kInfIndex + 5, -kInfIndex + 10)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(5, 10),
kMinFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"5 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(-1, 10),
kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kInfIndex, -5),
kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-5 \\+ -[0-9]+ is outside valid range .*"));
}
TEST(IndexIntervalTest, ShiftIntervalSeparateOffsets) {
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 0, 5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 1, 5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex + 1, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), -1, 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-[0-9]+ \\+ -1 is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(-1, 8),
std::numeric_limits<Index>::min(), 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), -1, 0),
Optional(IndexInterval::UncheckedClosed(1, kMaxFiniteIndex)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), -1, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"[0-9]+ \\+ 1 is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(2, 1), -1,
std::numeric_limits<Index>::max()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1 \\+ [0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(0, 8),
std::numeric_limits<Index>::min(), 5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"0 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftInterval(IndexInterval::UncheckedClosed(1, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2, 5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 13)));
EXPECT_THAT(
ShiftInterval(IndexInterval::UncheckedClosed(1, +kInfIndex), 2, 5),
Optional(IndexInterval::UncheckedClosed(3, +kInfIndex)));
EXPECT_THAT(ShiftInterval(
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex), 2, 5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, ShiftIntervalBackwardSeparateOffsets) {
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 0, -5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 13)));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8),
-1, -5),
Optional(IndexInterval::UncheckedClosed(-kMaxFiniteIndex + 1, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kMaxFiniteIndex, 8), 1, -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-[0-9]+ \\+ -1 is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(-1, 8),
std::numeric_limits<Index>::max(), -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"-1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), 1, 0),
Optional(IndexInterval::UncheckedClosed(1, kMaxFiniteIndex)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(2, kMaxFiniteIndex), 1, -1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"[0-9]+ \\+ 1 is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(2, 1), 1,
std::numeric_limits<Index>::min()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"1 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalBackward(IndexInterval::UncheckedClosed(0, 8),
std::numeric_limits<Index>::max(), -5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"0 \\+ -[0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalBackward(IndexInterval::UncheckedClosed(1, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kInfIndex, 8), -2, -5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, 13)));
EXPECT_THAT(ShiftIntervalBackward(
IndexInterval::UncheckedClosed(1, +kInfIndex), -2, -5),
Optional(IndexInterval::UncheckedClosed(3, +kInfIndex)));
EXPECT_THAT(
ShiftIntervalBackward(
IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex), -2, -5),
Optional(IndexInterval::UncheckedClosed(-kInfIndex, +kInfIndex)));
}
TEST(IndexIntervalTest, ShiftIntervalTo) {
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(1, 8), 3),
Optional(IndexInterval::UncheckedClosed(3, 10)));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(-kInfIndex, 8), 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Interval .* is not bounded below"));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(1, kInfIndex), 3),
Optional(IndexInterval::UncheckedClosed(3, kInfIndex)));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(kMinFiniteIndex + 1, 101),
kMinFiniteIndex),
Optional(IndexInterval::Closed(kMinFiniteIndex, 100)));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), -kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Origin -[0-9]+ is outside valid range .*"));
EXPECT_THAT(ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Origin [0-9]+ is outside valid range .*"));
EXPECT_THAT(
ShiftIntervalTo(IndexInterval::UncheckedClosed(5, 10), kMaxFiniteIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"10 \\+ [0-9]+ is outside valid range .*"));
}
TEST(ExtractStridedSliceTest, Closed) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 6, 9, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(6, 4), false, false}, 6));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, true}, 3, 15, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(3, 15), false, false}, 3));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), false, true}, 10));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), true, false}, 10));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, 6, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-4, 2), false, false}, 9));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
kImplicit, 9, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(5, 5), false, false}, 5));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
-kInfIndex, 9, 1),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Slice interval \\(-inf, 10\\) is not contained "
"within domain \\[5, 11\\)"));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, kImplicit, 6,
-2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-5, 3), false, false}, 10));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9,
-kInfIndex, -2),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Slice interval \\(-inf, 10\\) is not contained "
"within domain \\[5, 11\\)"));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, kImplicit,
-2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-4, 3), false, false}, 9));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7,
kImplicit, 2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(3, 2), false, false}, 7));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), false, false},
kImplicit, 10, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-kInfIndex, 10), false, false},
-kInfIndex));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), false, false},
5, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(5, kInfIndex), false, false},
5));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, kInfIndex), false, false},
kImplicit, 5, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-kInfIndex, -5), false, false},
kInfIndex));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, kImplicit, 6,
0),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid stride 0"));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
kImplicit, 6, std::numeric_limits<Index>::min()),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid stride -[0-9]+"));
EXPECT_THAT(
ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 4, 6, 1),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Slice interval \\[4, 7\\) is not contained within domain "
"\\[5, 11\\)"));
EXPECT_THAT(ExtractClosedStridedSlice(
{IndexInterval::UncheckedClosed(3, 10), false, false},
-kInfIndex - 1, 10, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid start index -[0-9]+"));
}
TEST(ExtractStridedSliceTest, Sized) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, 3, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-4, -2), false, false}, 9));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, 7, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(7, 10), false, true}, 7));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, true},
kImplicit, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(5, 10), true, true}, 5));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), false, true}, 10));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, kImplicit,
kImplicit, -1)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-10, -5), true, false}, 10));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, kImplicit,
-2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-4, -2), false, false}, 9));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7, kImplicit,
2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(3, 4), false, false}, 7));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7, 0, 2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(3, 0), false, false}, 7));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 7, 0, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedSized(-3, 0), false, false}, 7));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, -1, -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Negative size -1 specified for sized interval"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
std::numeric_limits<Index>::min() + 1, 0, 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid start index -[0-9]+"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false},
std::numeric_limits<Index>::max(), 0, -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid start index [0-9]+"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 5, 100,
kInfIndex),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Integer overflow computing slice result"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 5,
kInfIndex, 2),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Integer overflow computing slice result"));
EXPECT_THAT(
ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(-kInfIndex, 10), false, false},
kImplicit, kImplicit, 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Slicing with non-unit stride of 2 requires a "
"finite start index"));
EXPECT_THAT(ExtractSizedStridedSlice(
{IndexInterval::UncheckedClosed(3, kInfIndex), false, false},
kImplicit, kImplicit, -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Slicing with non-unit stride of -2 requires a "
"finite start index"));
}
TEST(ExtractStridedSliceTest, HalfOpen) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9, 7, -2)
.value(),
Pair(OIII{IndexInterval::UncheckedClosed(-4, -4), false, false}, 9));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, kImplicit, 8, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(5, 8), true, false}, 5));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, 6, kImplicit, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(6, 11), false, true}, 6));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), true, false}, 3, 8, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(3, 8), false, false}, 3));
EXPECT_THAT(
ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, true}, 6, 15, 1)
.value(),
Pair(OIII{IndexInterval::UncheckedHalfOpen(6, 15), false, false}, 6));
EXPECT_THAT(ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9,
std::numeric_limits<Index>::min() + 1, 2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".* do not specify a valid closed index interval"));
EXPECT_THAT(ExtractHalfOpenStridedSlice(
{IndexInterval::UncheckedClosed(5, 10), false, false}, 9,
std::numeric_limits<Index>::max(), -2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".* do not specify a valid closed index interval"));
}
TEST(ComputeStridedSliceMapTest, NoTranslationStride1) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kImplicit,
2,
8,
1, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(2, 8), false, false}),
new_domain);
EXPECT_EQ(0, output_offset);
}
TEST(ComputeStridedSliceMapTest, NoTranslationStride2) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kImplicit,
2,
8,
2, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 4), false, false}),
new_domain);
EXPECT_EQ(0, output_offset);
}
TEST(ComputeStridedSliceMapTest, NoTranslationStrideNegative2) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kImplicit,
9,
2,
-2, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(-4, 0), false, false}),
new_domain);
EXPECT_EQ(1, output_offset);
}
TEST(ComputeStridedSliceMapTest, TranslationStride1) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_EQ(absl::OkStatus(),
ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
7,
2,
8,
1, &new_domain, &output_offset));
EXPECT_EQ((OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(7, 13), false, false}),
new_domain);
EXPECT_EQ(-5, output_offset);
}
TEST(ComputeStridedSliceMapTest, TranslationError) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_THAT(ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(1, 10), false, false},
IntervalForm::half_open,
kMaxFiniteIndex,
2,
8,
1, &new_domain, &output_offset),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(ComputeStridedSliceMapTest, SliceError) {
OptionallyImplicitIndexInterval new_domain;
Index output_offset;
EXPECT_THAT(ComputeStridedSliceMap(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedHalfOpen(3, 10), false, false},
IntervalForm::half_open,
kMaxFiniteIndex,
2,
8,
1, &new_domain, &output_offset),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(GetAffineTransformDomainTest, Divisor1) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-9, -1),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
10, 1)
.value());
}
TEST(GetAffineTransformDomainTest, Divisor2) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-2, 1),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
6, 2)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorNegative1) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-3, 5),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
6, -1)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorNegative2) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-1, 2),
GetAffineTransformDomain(IndexInterval::UncheckedClosed(1, 9),
6, -2)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorNegative2LargeMagnitude) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-(kInfIndex - 10) / 2, 5),
GetAffineTransformDomain(
IndexInterval::UncheckedClosed(-10, kInfIndex - 10),
0, -2)
.value());
}
TEST(GetAffineTransformDomainTest, EmptyInterval) {
EXPECT_EQ(IndexInterval::UncheckedSized(-2, 0),
GetAffineTransformDomain(IndexInterval::UncheckedSized(10, 0),
5, -2)
.value());
}
TEST(GetAffineTransformDomainTest, DivisorInvalid) {
EXPECT_THAT(GetAffineTransformDomain(
IndexInterval::UncheckedClosed(1, 10),
0, std::numeric_limits<Index>::min()),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GetAffineTransformDomainTest, OffsetInvalid) {
EXPECT_THAT(GetAffineTransformDomain(
IndexInterval::UncheckedClosed(1, 10),
std::numeric_limits<Index>::min(), -1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
void TestGetAffineTransformRangeRoundTrip(IndexInterval domain, Index offset,
Index multiplier,
IndexInterval range) {
EXPECT_THAT(GetAffineTransformRange(domain, offset, multiplier),
::testing::Optional(range))
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
EXPECT_THAT(GetAffineTransformDomain(range, offset, multiplier),
::testing::Optional(domain))
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto inv_domain,
GetAffineTransformInverseDomain(domain, offset, multiplier));
EXPECT_THAT(GetAffineTransformDomain(inv_domain, offset, multiplier),
::testing::Optional(domain))
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range
<< ", inv_domain=" << inv_domain;
}
TEST(GetAffineTransformRangeTest, SerializationRoundTrip) {
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedClosed(1, 10), 3,
1,
IndexInterval::UncheckedClosed(4, 13));
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedClosed(1, 10), 3,
2,
IndexInterval::UncheckedClosed(2 + 3, 10 * 2 + 3));
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedSized(4, 0), 3,
2, IndexInterval::UncheckedSized(2 * 4 + 3, 0));
TestGetAffineTransformRangeRoundTrip(
IndexInterval::UncheckedSized(4, 0), 3,
-2,
IndexInterval::UncheckedSized(-2 * 4 + 3, 0));
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::min(),
1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::max(),
1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), 0,
1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::min(),
-1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), std::numeric_limits<Index>::max(),
-1,
IndexInterval());
TestGetAffineTransformRangeRoundTrip(
IndexInterval(), 0,
-1,
IndexInterval());
}
TEST(GetAffineTransformRangeTest, ZeroMultiplier) {
EXPECT_EQ(IndexInterval::UncheckedSized(3, 1),
GetAffineTransformRange(IndexInterval::UncheckedClosed(4, 10), 3, 0)
.value());
}
TEST(GetAffineTransformRangeTest, ErrorCases) {
EXPECT_THAT(GetAffineTransformRange(IndexInterval::UncheckedClosed(3, 10),
kInfIndex, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(GetAffineTransformRange(IndexInterval::UncheckedClosed(3, 10), 5,
kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(GetAffineTransformRange(
IndexInterval::UncheckedClosed(-1, 1),
std::numeric_limits<Index>::max() - kInfIndex + 1, kInfIndex),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GetAffineTransformInverseDomainTest, Examples) {
EXPECT_THAT(
GetAffineTransformRange(IndexInterval::UncheckedClosed(2, 4), 1, 3),
::testing::Optional(IndexInterval::UncheckedClosed(7, 13)));
EXPECT_THAT(GetAffineTransformInverseDomain(
IndexInterval::UncheckedClosed(2, 4), 1, 3),
::testing::Optional(IndexInterval::UncheckedClosed(7, 15)));
EXPECT_THAT(
GetAffineTransformRange(IndexInterval::UncheckedClosed(2, 4), 1, -3),
::testing::Optional(IndexInterval::UncheckedClosed(-11, -5)));
EXPECT_THAT(GetAffineTransformInverseDomain(
IndexInterval::UncheckedClosed(2, 4), 1, -3),
::testing::Optional(IndexInterval::UncheckedClosed(-13, -5)));
}
void TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
OptionallyImplicitIndexInterval domain, Index offset, Index multiplier,
OptionallyImplicitIndexInterval range) {
EXPECT_EQ(GetAffineTransformRange(domain, offset, multiplier).value(), range)
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
EXPECT_EQ(GetAffineTransformDomain(range, offset, multiplier).value(), domain)
<< "domain=" << domain << ", offset=" << offset
<< ", multiplier=" << multiplier << ", range=" << range;
}
TEST(GetAffineTransformRangeTest, OptionallyImplicitRoundTrip) {
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), true, false},
3,
1, {IndexInterval::UncheckedClosed(4, 13), true, false});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), true, true},
3,
1, {IndexInterval::UncheckedClosed(4, 13), true, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, false},
3,
1, {IndexInterval::UncheckedClosed(4, 13), false, false});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, true},
3,
1, {IndexInterval::UncheckedClosed(4, 13), false, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, true},
-3,
1, {IndexInterval::UncheckedClosed(-2, 7), false, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), false, true},
3,
-1, {IndexInterval::UncheckedClosed(-7, 2), true, false});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedClosed(1, 10), true, false},
3,
-1, {IndexInterval::UncheckedClosed(-7, 2), false, true});
TestGetAffineTransformRangeOptionallyImplicitRoundTrip(
{IndexInterval::UncheckedSized(4, 0), true, false},
3,
-2,
{IndexInterval::UncheckedSized(-2 * 4 + 3, 0), false, true});
}
TEST(GetAffineTransformRangeTest, OptionallyImplicitErrorCases) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(GetAffineTransformRange(
OIII{IndexInterval::UncheckedClosed(3, 10), true, false},
kInfIndex, 1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(GetAffineTransformDomainTest, OptionallyImplicitErrorCases) {
using OIII = tensorstore::OptionallyImplicitIndexInterval;
EXPECT_THAT(GetAffineTransformDomain(
OIII{IndexInterval::UncheckedClosed(1, 10), true, false},
std::numeric_limits<Index>::min(), -1),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(IndexIntervalRefTest, Basic) {
Index inclusive_min = 5, size = 10;
IndexIntervalRef ref = IndexIntervalRef::UncheckedSized(inclusive_min, size);
EXPECT_EQ(5, ref.inclusive_min());
EXPECT_EQ(4, ref.exclusive_min());
EXPECT_EQ(10, ref.size());
EXPECT_EQ(15, ref.exclusive_max());
EXPECT_EQ(14, ref.inclusive_max());
EXPECT_EQ(IndexInterval::UncheckedSized(5, 10),
static_cast<IndexInterval>(ref));
ref = IndexInterval::UncheckedSized(6, 9);
EXPECT_EQ(6, inclusive_min);
EXPECT_EQ(9, size);
EXPECT_FALSE(ref.empty());
size = 0;
EXPECT_TRUE(ref.empty());
}
TEST(IndexIntervalRefTest, ConstructFromIndexInterval) {
IndexInterval interval = IndexInterval::UncheckedSized(5, 10);
IndexIntervalRef ref(interval);
ref = IndexInterval::UncheckedSized(3, 6);
EXPECT_EQ(interval, IndexInterval::UncheckedSized(3, 6));
}
TEST(IndexIntervalRefTest, ImplicitConversion) {
IndexInterval interval = IndexInterval::UncheckedSized(5, 10);
IndexIntervalRef ref(interval);
IndexInterval interval2 = ref;
EXPECT_EQ(interval, interval2);
EXPECT_TRUE(IsFinite(ref));
EXPECT_TRUE(Contains(ref, ref.inclusive_min()));
EXPECT_TRUE(Contains(ref, ref));
EXPECT_EQ(ref, Intersect(ref, ref));
EXPECT_EQ(ref, Hull(ref, ref));
}
TEST(IndexIntervalRefTest, Assignment) {
IndexInterval interval = IndexInterval::UncheckedSized(5, 10);
IndexIntervalRef ref(interval);
IndexInterval interval2 = ref;
IndexIntervalRef ref2(interval2);
ref2 = ref;
EXPECT_EQ(IndexInterval::UncheckedSized(5, 10), interval2);
EXPECT_EQ(IndexInterval::UncheckedSized(5, 10), interval);
}
TEST(OptionallyImplicitIndexIntervalTest, EffectiveInterval) {
EXPECT_EQ(IndexInterval::UncheckedClosed(-kInfIndex, 2),
OptionallyImplicitIndexInterval(
IndexInterval::UncheckedClosed(1, 2), true, false)
.effective_interval());
EXPECT_EQ(IndexInterval::UncheckedClosed(1, +kInfIndex),
OptionallyImplicitIndexInterval(
IndexInterval::UncheckedClosed(1, 2), false, true)
.effective_interval());
EXPECT_EQ(IndexInterval(),
OptionallyImplicitIndexInterval(
IndexInterval::UncheckedClosed(1, 2), true, true)
.effective_interval());
}
TEST(OptionallyImplicitIndexIntervalTest, Ostream) {
EXPECT_EQ("[1*, 3)", StrCat(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 2), true, false}));
EXPECT_EQ("(-inf, 3*)",
StrCat(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 2), false, true}));
EXPECT_EQ("[7*, +inf*)",
StrCat(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(7, kInfIndex), true, true}));
}
TEST(OptionallyImplicitIndexIntervalTest, Comparison) {
OptionallyImplicitIndexInterval a{};
OptionallyImplicitIndexInterval b{IndexInterval::UncheckedSized(0, 1), false,
false};
OptionallyImplicitIndexInterval c{IndexInterval::UncheckedSized(0, 1), false,
true};
OptionallyImplicitIndexInterval d{IndexInterval::UncheckedSized(0, 1), true,
false};
OptionallyImplicitIndexInterval e{IndexInterval::UncheckedSized(0, 1), true,
true};
OptionallyImplicitIndexInterval f{IndexInterval::UncheckedSized(0, 0), false,
false};
OptionallyImplicitIndexInterval g{IndexInterval::UncheckedSized(0, 2), false,
false};
OptionallyImplicitIndexInterval h{IndexInterval::UncheckedSized(1, 2), false,
false};
OptionallyImplicitIndexInterval i{IndexInterval::UncheckedSized(1, 2), false,
true};
OptionallyImplicitIndexInterval j{IndexInterval::UncheckedSized(1, 2), true,
false};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_EQ(e, e);
EXPECT_EQ(f, f);
EXPECT_EQ(g, g);
EXPECT_EQ(h, h);
EXPECT_EQ(i, i);
EXPECT_EQ(j, j);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
EXPECT_NE(a, f);
EXPECT_NE(a, g);
EXPECT_NE(a, h);
EXPECT_NE(a, i);
EXPECT_NE(a, j);
EXPECT_NE(g, j);
EXPECT_NE(g, h);
EXPECT_NE(g, i);
EXPECT_NE(g, j);
}
TEST(OptionallyImplicitIndexIntervalTest, Hash) {
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
OptionallyImplicitIndexInterval{},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1),
false, true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1), true,
false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 1), true,
true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 0),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(0, 2),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(1, 2),
false, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(1, 2),
false, true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedSized(1, 2), true,
false},
}));
}
static_assert(std::is_convertible_v<IndexDomainDimension<container>,
IndexDomainDimension<view>>);
static_assert(std::is_convertible_v<IndexDomainDimension<view>,
IndexDomainDimension<container>>);
static_assert(std::is_assignable_v<IndexDomainDimension<container>,
IndexDomainDimension<view>>);
static_assert(std::is_assignable_v<IndexDomainDimension<view>,
IndexDomainDimension<container>>);
TEST(IndexDomainDimensionTest, DefaultConstruct) {
IndexDomainDimension<> d;
EXPECT_EQ(OptionallyImplicitIndexInterval(),
d.optionally_implicit_interval());
EXPECT_EQ("", d.label());
}
TEST(IndexDomainDimensionTest, ConstructFromOptionallyImplicitIndexInterval) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = interval;
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("", d.label());
}
TEST(IndexDomainDimensionTest, ConstructLabel) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = {interval, "label"};
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("label", d.label());
}
TEST(IndexDomainDimensionTest, ConstructContainerFromView) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<view> d_view = {interval, "label"};
IndexDomainDimension<> d(d_view);
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("label", d.label());
}
TEST(IndexDomainDimensionTest, ConstructViewFromContainer) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = {interval, "label"};
IndexDomainDimension<view> d_view = d;
EXPECT_EQ(interval, d_view.optionally_implicit_interval());
EXPECT_EQ("label", d_view.label());
}
TEST(IndexDomainDimensionTest, AssignContainerFromView) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<view> d_view = {interval, "label"};
IndexDomainDimension<> d;
d = d_view;
EXPECT_EQ(interval, d.optionally_implicit_interval());
EXPECT_EQ("label", d.label());
}
TEST(IndexDomainDimensionTest, AssignViewFromContainer) {
OptionallyImplicitIndexInterval interval{IndexInterval::UncheckedSized(0, 10),
false, true};
IndexDomainDimension<> d = {interval, "label"};
IndexDomainDimension<view> d_view;
d_view = d;
EXPECT_EQ(interval, d_view.optionally_implicit_interval());
EXPECT_EQ("label", d_view.label());
}
TEST(IndexDomainDimensionTest, PrintToOstream) {
EXPECT_EQ("[0, 10*)",
StrCat(IndexDomainDimension<>{
{IndexInterval::UncheckedSized(0, 10), false, true}, ""}));
EXPECT_EQ("[0, 10*)",
StrCat(IndexDomainDimension<view>{
{IndexInterval::UncheckedSized(0, 10), false, true}, ""}));
EXPECT_EQ("\"label\": [0, 10*)",
StrCat(IndexDomainDimension<>{
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"}));
}
TEST(IndexDomainDimensionTest, Compare) {
IndexDomainDimension<> d1 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<view> d1_view = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<> d2 = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<view> d2_view = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<> d3 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label2"};
EXPECT_EQ(d1, d1);
EXPECT_EQ(d1, d1_view);
EXPECT_EQ(d1_view, d1);
EXPECT_EQ(d1_view, d1_view);
EXPECT_EQ(d2, d2);
EXPECT_EQ(d3, d3);
EXPECT_NE(d1, d2);
EXPECT_NE(d1, d2_view);
EXPECT_NE(d1_view, d2);
EXPECT_NE(d1_view, d2_view);
EXPECT_NE(d1, d3);
}
TEST(IndexDomainDimensionTest, Hash) {
IndexDomainDimension<> d1 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<view> d1_view = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label"};
IndexDomainDimension<> d2 = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<view> d2_view = {
{IndexInterval::UncheckedSized(3, 10), false, true}, "label"};
IndexDomainDimension<> d3 = {
{IndexInterval::UncheckedSized(0, 10), false, true}, "label2"};
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({d1, d2, d3}));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({d1_view, d2_view}));
}
static_assert(ExplicitIndexOr(10, 11) == 10);
static_assert(ExplicitIndexOr(kImplicit, 11) == 11);
static_assert(ImplicitOrEqual(10, 10));
static_assert(ImplicitOrEqual(kImplicit, 10));
static_assert(!ImplicitOrEqual(10, 11));
static_assert(DividePositiveRoundOut(IndexInterval::UncheckedHalfOpen(3, 10),
2) ==
IndexInterval::UncheckedHalfOpen(1, 5));
static_assert(DividePositiveRoundOut(IndexInterval::UncheckedHalfOpen(3, 11),
2) ==
IndexInterval::UncheckedHalfOpen(1, 6));
static_assert(DividePositiveRoundOut(IndexInterval::UncheckedHalfOpen(-3, 10),
2) ==
IndexInterval::UncheckedHalfOpen(-2, 5));
TEST(IndexIntervalTest, Negate) {
EXPECT_EQ(IndexInterval::UncheckedSized(0, 0),
-IndexInterval::UncheckedSized(0, 0));
EXPECT_EQ(IndexInterval::UncheckedSized(5, 0),
-IndexInterval::UncheckedSized(-5, 0));
EXPECT_EQ(
IndexInterval::UncheckedClosed(kMaxFiniteIndex, kMaxFiniteIndex),
-IndexInterval::UncheckedClosed(-kMaxFiniteIndex, -kMaxFiniteIndex));
EXPECT_EQ(IndexInterval(), -IndexInterval());
EXPECT_EQ(IndexInterval::UncheckedClosed(-5, 6),
-IndexInterval::UncheckedClosed(-6, 5));
EXPECT_EQ(IndexInterval::UncheckedClosed(5, 30),
-IndexInterval::UncheckedClosed(-30, -5));
}
TEST(MergeDimensionLabelsTest, Basic) {
EXPECT_THAT(MergeDimensionLabels("a", ""),
::testing::Optional(std::string("a")));
EXPECT_THAT(MergeDimensionLabels("a", "a"),
::testing::Optional(std::string("a")));
EXPECT_THAT(MergeDimensionLabels("", "a"),
::testing::Optional(std::string("a")));
EXPECT_THAT(MergeDimensionLabels("", ""),
::testing::Optional(std::string("")));
EXPECT_THAT(MergeDimensionLabels("a", "b"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Dimension labels do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, EqualExplicit) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, EqualImplicit) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false}));
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, UpperUnspecified) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, kInfIndex), false, true},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, kInfIndex), false, true}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, LowerUnspecified) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 5), true, false},
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 5), true, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchLower) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(2, 5), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Lower bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchLowerInfinite) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, 5), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Lower bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, LowerImplicitMerge) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), true, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, UpperImplicitMerge) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, true},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}),
::testing::Optional(OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false}));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchUpper) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 6), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Upper bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchUpperInfinite) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{IndexInterval::UncheckedClosed(1, 5),
false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, kInfIndex), false, false}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Upper bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, MismatchUpperImplicit) {
EXPECT_THAT(MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 5), false, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(1, 6), false, true}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Upper bounds do not match"));
}
TEST(MergeOptionallyImplicitIndexIntervalsTest, InvalidInterval) {
EXPECT_THAT(
MergeOptionallyImplicitIndexIntervals(
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(-kInfIndex, -5), true, false},
OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(5, kInfIndex), false, true}),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"\\(5, -5\\) do not specify a valid closed index interval"));
}
TEST(IndexIntervalSerializationTest, Basic) {
TestSerializationRoundTrip(IndexInterval::UncheckedSized(1, 2));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_interval.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_interval_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
5d78d715-f803-4538-adf8-a21734a52fc6 | cpp | google/cel-cpp | any_type | common/types/any_type.h | common/types/any_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_ANY_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_ANY_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class AnyType final {
public:
static constexpr TypeKind kKind = TypeKind::kAny;
static constexpr absl::string_view kName = "google.protobuf.Any";
AnyType() = default;
AnyType(const AnyType&) = default;
AnyType(AnyType&&) = default;
AnyType& operator=(const AnyType&) = default;
AnyType& operator=(AnyType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(AnyType&) noexcept {}
};
inline constexpr void swap(AnyType& lhs, AnyType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(AnyType, AnyType) { return true; }
inline constexpr bool operator!=(AnyType lhs, AnyType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, AnyType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const AnyType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(AnyType, Kind) {
EXPECT_EQ(AnyType().kind(), AnyType::kKind);
EXPECT_EQ(Type(AnyType()).kind(), AnyType::kKind);
}
TEST(AnyType, Name) {
EXPECT_EQ(AnyType().name(), AnyType::kName);
EXPECT_EQ(Type(AnyType()).name(), AnyType::kName);
}
TEST(AnyType, DebugString) {
{
std::ostringstream out;
out << AnyType();
EXPECT_EQ(out.str(), AnyType::kName);
}
{
std::ostringstream out;
out << Type(AnyType());
EXPECT_EQ(out.str(), AnyType::kName);
}
}
TEST(AnyType, Hash) {
EXPECT_EQ(absl::HashOf(AnyType()), absl::HashOf(AnyType()));
}
TEST(AnyType, Equal) {
EXPECT_EQ(AnyType(), AnyType());
EXPECT_EQ(Type(AnyType()), AnyType());
EXPECT_EQ(AnyType(), Type(AnyType()));
EXPECT_EQ(Type(AnyType()), Type(AnyType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/any_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/any_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
17337708-4d38-49da-ac30-9296214bfc5a | cpp | google/tensorstore | file_credential_provider | tensorstore/kvstore/s3/credentials/file_credential_provider.cc | tensorstore/kvstore/s3/credentials/file_credential_provider_test.cc | #include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/time/time.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/lines/line_reading.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/s3/credentials/aws_credentials.h"
#include "tensorstore/util/result.h"
using ::tensorstore::internal::GetEnv;
using ::tensorstore::internal::JoinPath;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag s3_logging("s3");
static constexpr char kEnvAwsCredentialsFile[] = "AWS_SHARED_CREDENTIALS_FILE";
static constexpr char kDefaultAwsCredentialsFilePath[] = ".aws/credentials";
static constexpr char kCfgAwsAccessKeyId[] = "aws_access_key_id";
static constexpr char kCfgAwsSecretAccessKeyId[] = "aws_secret_access_key";
static constexpr char kCfgAwsSessionToken[] = "aws_session_token";
static constexpr char kEnvAwsProfile[] = "AWS_PROFILE";
static constexpr char kDefaultProfile[] = "default";
std::optional<std::string> GetAwsCredentialsFileName() {
if (auto credentials_file = GetEnv(kEnvAwsCredentialsFile);
credentials_file) {
return credentials_file;
}
if (auto home_dir = GetEnv("HOME"); home_dir) {
return JoinPath(*home_dir, kDefaultAwsCredentialsFilePath);
}
return std::nullopt;
}
}
FileCredentialProvider::FileCredentialProvider(std::string_view filename,
std::string_view profile)
: filename_(filename), profile_(profile) {
if (filename_.empty()) {
if (auto credentials_file = GetAwsCredentialsFileName(); credentials_file) {
filename_ = *std::move(credentials_file);
}
}
if (profile_.empty()) {
profile_ = GetEnv(kEnvAwsProfile).value_or(kDefaultProfile);
}
}
Result<AwsCredentials> FileCredentialProvider::GetCredentials() {
if (filename_.empty()) {
return absl::NotFoundError("No credentials file specified");
}
riegeli::FdReader reader(filename_);
if (!reader.ok()) {
return absl::NotFoundError(
absl::StrFormat("Could not open credentials file [%s]", filename_));
}
AwsCredentials credentials{};
std::string_view line;
bool profile_found = false;
while (riegeli::ReadLine(reader, line)) {
auto sline = absl::StripAsciiWhitespace(line);
if (sline.empty() || sline[0] == '#') continue;
if (sline[0] == '[' && sline[sline.size() - 1] == ']') {
if (profile_found) break;
auto section_name =
absl::StripAsciiWhitespace(sline.substr(1, sline.size() - 2));
ABSL_LOG_IF(INFO, s3_logging) << "Found section name [" << section_name
<< "] in file [" << filename_ << "]";
profile_found = (section_name == profile_);
continue;
}
if (profile_found) {
std::pair<std::string_view, std::string_view> kv =
absl::StrSplit(sline, absl::MaxSplits('=', 1));
kv.first = absl::StripAsciiWhitespace(kv.first);
kv.second = absl::StripAsciiWhitespace(kv.second);
if (kv.first == kCfgAwsAccessKeyId) {
credentials.access_key = kv.second;
} else if (kv.first == kCfgAwsSecretAccessKeyId) {
credentials.secret_key = kv.second;
} else if (kv.first == kCfgAwsSessionToken) {
credentials.session_token = kv.second;
}
}
}
if (!profile_found) {
return absl::NotFoundError(
absl::StrFormat("Profile [%s] not found in credentials file [%s]",
profile_, filename_));
}
ABSL_LOG_FIRST_N(INFO, 1)
<< "Using profile [" << profile_ << "] in file [" << filename_ << "]";
credentials.expires_at = absl::InfiniteFuture();
return credentials;
}
}
} | #include "tensorstore/kvstore/s3/credentials/file_credential_provider.h"
#include <fstream>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SetEnv;
using ::tensorstore::internal::UnsetEnv;
using ::tensorstore::internal_kvstore_s3::FileCredentialProvider;
class TestData
: public tensorstore::internal_testing::ScopedTemporaryDirectory {
public:
std::string WriteCredentialsFile() {
auto p = JoinPath(path(), "aws_config");
std::ofstream ofs(p);
ofs << "discarded_value = 500\n"
"\n"
"[default]\n"
"aws_access_key_id =AKIAIOSFODNN7EXAMPLE\n"
"aws_secret_access_key = wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\n"
"aws_session_token= abcdef1234567890 \n"
"\n"
"[alice]\n"
"aws_access_key_id = AKIAIOSFODNN6EXAMPLE\n"
"aws_secret_access_key = "
"wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY\n"
"\n";
ofs.close();
return p;
}
};
class FileCredentialProviderTest : public ::testing::Test {
protected:
void SetUp() override {
UnsetEnv("AWS_SHARED_CREDENTIALS_FILE");
UnsetEnv("AWS_PROFILE");
}
};
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileDefault) {
TestData test_data;
std::string credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
auto provider = FileCredentialProvider("", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "default");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "abcdef1234567890");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest,
ProviderAwsCredentialsFromFileProfileOverride) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
auto provider = FileCredentialProvider("", "alice");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileProfileEnv) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
SetEnv("AWS_PROFILE", "alice");
auto provider = FileCredentialProvider("", "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials, provider.GetCredentials());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
TEST_F(FileCredentialProviderTest,
ProviderAwsCredentialsFromFileInvalidProfileEnv) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
SetEnv("AWS_SHARED_CREDENTIALS_FILE", credentials_filename.c_str());
SetEnv("AWS_PROFILE", "bob");
auto provider = FileCredentialProvider("", "");
ASSERT_FALSE(provider.GetCredentials().ok());
ASSERT_EQ(provider.GetFileName(), credentials_filename);
ASSERT_EQ(provider.GetProfile(), "bob");
}
TEST_F(FileCredentialProviderTest, ProviderAwsCredentialsFromFileOverride) {
TestData test_data;
auto credentials_filename = test_data.WriteCredentialsFile();
auto provider =
std::make_unique<FileCredentialProvider>(credentials_filename, "");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto credentials,
provider->GetCredentials());
ASSERT_EQ(provider->GetFileName(), credentials_filename);
ASSERT_EQ(provider->GetProfile(), "default");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN7EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "abcdef1234567890");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
provider =
std::make_unique<FileCredentialProvider>(credentials_filename, "alice");
TENSORSTORE_ASSERT_OK_AND_ASSIGN(credentials, provider->GetCredentials());
ASSERT_EQ(provider->GetFileName(), credentials_filename);
ASSERT_EQ(provider->GetProfile(), "alice");
ASSERT_EQ(credentials.access_key, "AKIAIOSFODNN6EXAMPLE");
ASSERT_EQ(credentials.secret_key, "wJalrXUtnFEMI/K7MDENG/bPxRfiCZEXAMPLEKEY");
ASSERT_EQ(credentials.session_token, "");
ASSERT_EQ(credentials.expires_at, absl::InfiniteFuture());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/file_credential_provider.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/credentials/file_credential_provider_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
02a0ac9b-8137-474c-8c98-7dfed739d9d5 | cpp | tensorflow/tensorflow | data_type | tensorflow/lite/delegates/gpu/common/data_type.cc | tensorflow/lite/delegates/gpu/common/data_type_test.cc | #include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <stddef.h>
#include <string>
#include "absl/strings/str_cat.h"
namespace tflite {
namespace gpu {
namespace {
std::string ToGlslType(const std::string& scalar_type,
const std::string& vec_type, int vec_size) {
return vec_size == 1 ? scalar_type : absl::StrCat(vec_type, vec_size);
}
std::string GetGlslPrecisionModifier(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
return "lowp ";
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return "mediump ";
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return "highp ";
case DataType::BOOL:
return "";
default:
return "";
}
}
}
size_t SizeOf(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
case DataType::BOOL:
return 1;
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return 2;
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return 4;
case DataType::FLOAT64:
case DataType::INT64:
case DataType::UINT64:
return 8;
case DataType::UNKNOWN:
return 0;
}
return 0;
}
std::string ToString(DataType data_type) {
switch (data_type) {
case DataType::FLOAT16:
return "float16";
case DataType::FLOAT32:
return "float32";
case DataType::FLOAT64:
return "float64";
case DataType::INT16:
return "int16";
case DataType::INT32:
return "int32";
case DataType::INT64:
return "int64";
case DataType::INT8:
return "int8";
case DataType::UINT16:
return "uint16";
case DataType::UINT32:
return "uint32";
case DataType::UINT64:
return "uint64";
case DataType::UINT8:
return "uint8";
case DataType::BOOL:
return "bool";
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToCLDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToMetalDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
DataType ToMetalTextureType(DataType data_type) {
switch (data_type) {
case DataType::FLOAT32:
case DataType::FLOAT16:
case DataType::INT32:
case DataType::INT16:
case DataType::UINT32:
case DataType::UINT16:
return data_type;
case DataType::INT8:
return DataType::INT16;
case DataType::UINT8:
case DataType::BOOL:
return DataType::UINT16;
default:
return DataType::UNKNOWN;
}
}
std::string ToGlslShaderDataType(DataType data_type, int vec_size,
bool add_precision, bool explicit_fp16) {
const std::string precision_modifier =
add_precision ? GetGlslPrecisionModifier(data_type) : "";
switch (data_type) {
case DataType::FLOAT16:
if (explicit_fp16) {
return ToGlslType("float16_t", "f16vec", vec_size);
} else {
return precision_modifier + ToGlslType("float", "vec", vec_size);
}
case DataType::FLOAT32:
return precision_modifier + ToGlslType("float", "vec", vec_size);
case DataType::FLOAT64:
return precision_modifier + ToGlslType("double", "dvec", vec_size);
case DataType::INT8:
case DataType::INT16:
case DataType::INT32:
case DataType::INT64:
return precision_modifier + ToGlslType("int", "ivec", vec_size);
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
case DataType::UINT64:
return precision_modifier + ToGlslType("uint", "uvec", vec_size);
case DataType::BOOL:
return ToGlslType("bool", "bvec", vec_size);
case DataType::UNKNOWN:
return "unknown";
}
return "unknown";
}
}
} | #include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace {
TEST(DataTypeTest, GlslShaderDataTypes) {
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT16));
EXPECT_EQ("mediump float",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
false));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, false,
true));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
true));
EXPECT_EQ("vec4", ToGlslShaderDataType(DataType::FLOAT16, 4));
EXPECT_EQ("mediump vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
false));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, false,
true));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32));
EXPECT_EQ("highp float",
ToGlslShaderDataType(DataType::FLOAT32, 1, true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32, 1,
false));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2));
EXPECT_EQ("highp vec2",
ToGlslShaderDataType(DataType::FLOAT32, 2, true));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2,
false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT32, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT16, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT8, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, true));
EXPECT_EQ("highp int",
ToGlslShaderDataType(DataType::INT32, 1, true));
EXPECT_EQ("mediump int",
ToGlslShaderDataType(DataType::INT16, 1, true));
EXPECT_EQ("lowp int",
ToGlslShaderDataType(DataType::INT8, 1, true));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT32, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT16, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT8, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, true));
EXPECT_EQ("highp uint",
ToGlslShaderDataType(DataType::UINT32, 1, true));
EXPECT_EQ("mediump uint",
ToGlslShaderDataType(DataType::UINT16, 1, true));
EXPECT_EQ("lowp uint",
ToGlslShaderDataType(DataType::UINT8, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL));
EXPECT_EQ("bvec4", ToGlslShaderDataType(DataType::BOOL, 4));
EXPECT_EQ("bool",
ToGlslShaderDataType(DataType::BOOL, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL, 1,
false));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/data_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/data_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
af3f2af3-6f07-4e3c-b1f8-a9fbaaa3332c | cpp | tensorflow/tensorflow | hlo_opcode | third_party/xla/xla/hlo/ir/hlo_opcode.cc | third_party/xla/xla/service/hlo_opcode_test.cc | #include "xla/hlo/ir/hlo_opcode.h"
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "xla/util.h"
namespace xla {
absl::string_view HloOpcodeString(HloOpcode opcode) {
switch (opcode) {
#define CASE_OPCODE_STRING(enum_name, opcode_name, ...) \
case HloOpcode::enum_name: \
return opcode_name;
HLO_OPCODE_LIST(CASE_OPCODE_STRING)
#undef CASE_OPCODE_STRING
}
}
absl::StatusOr<HloOpcode> StringToHloOpcode(absl::string_view opcode_name) {
static auto* opcode_map = new absl::flat_hash_map<std::string, HloOpcode>({
#define STRING_TO_OPCODE_ENTRY(enum_name, opcode_name, ...) \
{opcode_name, HloOpcode::enum_name},
HLO_OPCODE_LIST(STRING_TO_OPCODE_ENTRY)
#undef STRING_TO_OPCODE_ENTRY
});
auto it = opcode_map->find(opcode_name);
if (it == opcode_map->end()) {
return InvalidArgument("Unknown opcode: %s", opcode_name);
}
return it->second;
}
bool HloOpcodeIsComparison(HloOpcode opcode) {
return opcode == HloOpcode::kCompare;
}
bool HloOpcodeIsVariadic(HloOpcode opcode) {
switch (opcode) {
#define CASE_IS_VARIADIC(enum_name, opcode_name, arity, ...) \
case HloOpcode::enum_name: \
return arity == kHloOpcodeIsVariadic;
HLO_OPCODE_LIST(CASE_IS_VARIADIC)
#undef CASE_IS_VARIADIC
}
}
std::optional<int> HloOpcodeArity(HloOpcode opcode) {
switch (opcode) {
#define CASE_ARITY(enum_name, opcode_name, arity, ...) \
case HloOpcode::enum_name: \
return arity == kHloOpcodeIsVariadic ? std::nullopt \
: std::make_optional(arity);
HLO_OPCODE_LIST(CASE_ARITY)
#undef CASE_ARITY
}
}
bool HloOpcodeIsAsync(HloOpcode opcode) {
return opcode == HloOpcode::kAsyncStart ||
opcode == HloOpcode::kAsyncUpdate || opcode == HloOpcode::kAsyncDone;
}
} | #include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/types.h"
namespace xla {
namespace {
TEST(HloOpcodeTest, StringifyMultiply) {
ASSERT_EQ("multiply", HloOpcodeString(HloOpcode::kMultiply));
}
TEST(HloOpcodeTest, OpcodeProperties) {
#define SOME_LIST(X) \
X(One) \
X(Two) \
X(Three)
EXPECT_EQ(3, HLO_XLIST_LENGTH(SOME_LIST));
#undef SOME_LIST
for (int i = 0; i < HloOpcodeCount(); ++i) {
auto opcode = static_cast<HloOpcode>(i);
EXPECT_EQ(opcode, StringToHloOpcode(HloOpcodeString(opcode)).value());
switch (opcode) {
case HloOpcode::kCompare:
EXPECT_TRUE(HloOpcodeIsComparison(opcode));
break;
default:
EXPECT_FALSE(HloOpcodeIsComparison(opcode));
}
switch (opcode) {
case HloOpcode::kAfterAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllReduce:
case HloOpcode::kAsyncStart:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllToAll:
case HloOpcode::kCall:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kDynamicReshape:
case HloOpcode::kFusion:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kRng:
case HloOpcode::kScatter:
case HloOpcode::kSort:
case HloOpcode::kTuple:
case HloOpcode::kReduceWindow:
EXPECT_TRUE(HloOpcodeIsVariadic(opcode));
break;
default:
EXPECT_FALSE(HloOpcodeIsVariadic(opcode));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_opcode.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_opcode_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94fa025d-5568-47c6-8195-73a16c710571 | cpp | tensorflow/tensorflow | parallel_device | tensorflow/c/eager/parallel_device/parallel_device.cc | tensorflow/c/eager/parallel_device/parallel_device_test.cc | #include "tensorflow/c/eager/parallel_device/parallel_device.h"
#include <cstring>
#include <memory>
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace parallel_device {
namespace {
class OpDeleter {
public:
void operator()(TFE_Op* to_delete) const { TFE_DeleteOp(to_delete); }
};
using OpPtr = std::unique_ptr<TFE_Op, OpDeleter>;
using MaybeParallelTensorOwned =
absl::variant<std::unique_ptr<ParallelTensor>, TensorHandlePtr>;
using MaybeParallelTensorUnowned =
absl::variant<ParallelTensor*, TFE_TensorHandle*>;
class NamedParallelDevice {
public:
NamedParallelDevice(const std::string& name,
std::unique_ptr<ParallelDevice> parallel_device)
: device_name_(name), parallel_device_(std::move(parallel_device)) {}
const std::string& name() const { return device_name_; }
const ParallelDevice& device() const { return *parallel_device_; }
private:
std::string device_name_;
std::unique_ptr<ParallelDevice> parallel_device_;
};
absl::optional<std::vector<MaybeParallelTensorOwned>> ExecuteWithSpecialOps(
const ParallelDevice& parallel_device,
const std::string& parallel_device_name, TFE_Context* context,
std::vector<MaybeParallelTensorUnowned> inputs, const char* operation_name,
const TFE_OpAttrs* attributes, int expected_max_outputs,
TF_Status* status) {
absl::optional<std::vector<MaybeParallelTensorOwned>> result;
if (operation_name == std::string("TPUReplicatedInput")) {
if (inputs.size() != parallel_device.num_underlying_devices()) {
std::string message(absl::StrCat(
"The parallel device ", parallel_device_name, " expected ",
parallel_device.num_underlying_devices(),
" inputs to TPUReplicatedInput, but got ", inputs.size()));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
std::vector<TensorHandlePtr> components;
components.reserve(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
if (absl::holds_alternative<ParallelTensor*>(inputs[i])) {
std::string message(absl::StrCat(
"Expected all inputs to TPUReplicatedInput to be non-parallel "
"TensorHandles. The input ",
i,
" was a parallel tensor (already "
"placed on the parallel device)."));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
components.emplace_back(TFE_TensorHandleCopySharingTensor(
absl::get<TFE_TensorHandle*>(inputs[i]), status));
}
std::vector<MaybeParallelTensorOwned> result_content;
result_content.reserve(1);
result_content.push_back(ParallelTensor::FromTensorHandles(
parallel_device, std::move(components), status));
if (TF_GetCode(status) != TF_OK) return result;
result.emplace(std::move(result_content));
return result;
} else if (operation_name == std::string("TPUReplicatedOutput")) {
OpPtr op(TFE_NewOp(context, operation_name, status));
TFE_OpAddAttrs(op.get(), attributes);
int expected_outputs = TFE_OpGetOutputLength(op.get(), "outputs", status);
if (TF_GetCode(status) != TF_OK) return result;
if (expected_outputs != parallel_device.num_underlying_devices()) {
std::string message(absl::StrCat(
"The parallel device ", parallel_device_name, " expected ",
parallel_device.num_underlying_devices(),
" outputs for TPUReplicatedOutput, but got ", expected_outputs));
TF_SetStatus(status, TF_INVALID_ARGUMENT, message.c_str());
return result;
}
if (absl::holds_alternative<TFE_TensorHandle*>(inputs[0])) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"Expected the input to "
"TPUReplicatedOutput to be a parallel tensor (placed on the "
"parallel device).");
return result;
}
ParallelTensor* t = absl::get<ParallelTensor*>(inputs[0]);
std::vector<MaybeParallelTensorOwned> outputs;
outputs.reserve(t->num_tensors());
for (int i = 0; i < t->num_tensors(); ++i) {
TensorHandlePtr this_output(
TFE_TensorHandleCopySharingTensor(t->tensor(i), status));
outputs.emplace_back(std::move(this_output));
if (TF_GetCode(status) != TF_OK) return result;
}
result.emplace(std::move(outputs));
return result;
}
std::vector<ParallelTensor*> parallel_inputs;
std::vector<std::unique_ptr<ParallelTensor>> implicitly_broadcast_tensors;
parallel_inputs.reserve(inputs.size());
implicitly_broadcast_tensors.reserve(inputs.size());
for (const auto& input : inputs) {
if (absl::holds_alternative<TFE_TensorHandle*>(input)) {
if (operation_name == std::string("_EagerConst")) {
std::unique_ptr<ParallelTensor> parallel_tensor(
parallel_device.CopyToParallelDevice(
context, absl::get<TFE_TensorHandle*>(input), status));
if (TF_GetCode(status) != TF_OK) return absl::nullopt;
parallel_inputs.push_back(parallel_tensor.get());
implicitly_broadcast_tensors.emplace_back(std::move(parallel_tensor));
} else {
TF_SetStatus(
status, TF_INVALID_ARGUMENT,
absl::StrCat(
"Got a non-parallel tensor ",
tensorflow::unwrap(absl::get<TFE_TensorHandle*>(input))
->DebugString(),
" as input to a parallel operation. First pack non-parallel "
"tensors for each device into a parallel tensor explicitly.")
.c_str());
return absl::nullopt;
}
} else {
parallel_inputs.push_back(absl::get<ParallelTensor*>(input));
}
}
absl::optional<std::vector<std::unique_ptr<ParallelTensor>>>
maybe_parallel_results(
parallel_device.Execute(context, parallel_inputs, operation_name,
attributes, expected_max_outputs, status));
if (!maybe_parallel_results.has_value()) return result;
std::vector<std::unique_ptr<ParallelTensor>> parallel_results(
std::move(maybe_parallel_results.value()));
std::vector<MaybeParallelTensorOwned> result_content;
result_content.reserve(parallel_results.size());
for (std::unique_ptr<ParallelTensor>& parallel_result : parallel_results) {
result_content.push_back(
MaybeParallelTensorOwned(std::move(parallel_result)));
}
result.emplace(std::move(result_content));
return result;
}
void ParallelTensorDeallocator(void* data) {
delete reinterpret_cast<ParallelTensor*>(data);
}
int ParallelTensorNumDims(void* data, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return shape->size();
}
int64_t ParallelTensorDim(void* data, int dim_index, TF_Status* status) {
const std::vector<int64_t>* shape;
Status s = reinterpret_cast<ParallelTensor*>(data)->Shape(&shape);
if (!s.ok()) {
tsl::Set_TF_Status_from_Status(status, s);
return -1;
}
return (*shape)[dim_index];
}
TF_Buffer* ParallelTensorSummarize(void* data, TF_Status* status) {
ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(data);
std::string summary;
Status cpp_status = parallel_tensor->SummarizeValue(summary);
if (!cpp_status.ok()) {
tsl::Set_TF_Status_from_Status(status, cpp_status);
return nullptr;
}
return TF_NewBufferFromString(summary.data(), summary.size());
}
TensorHandlePtr ParallelTensorToTensorHandle(
const std::string& parallel_device_name, TFE_Context* context,
std::unique_ptr<ParallelTensor> t, TF_Status* status) {
ParallelTensor* t_released = t.release();
TFE_CustomDeviceTensorHandleMethods handle_methods;
handle_methods.num_dims = &ParallelTensorNumDims;
handle_methods.dim = &ParallelTensorDim;
handle_methods.deallocator = &ParallelTensorDeallocator;
handle_methods.summarize = &ParallelTensorSummarize;
return TensorHandlePtr(TFE_NewCustomDeviceTensorHandle(
context, parallel_device_name.c_str(), t_released->dtype(), t_released,
handle_methods, status));
}
TFE_TensorHandle* CopyToParallelDevice(TFE_Context* context,
TFE_TensorHandle* tensor,
TF_Status* status, void* device_info) {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
absl::StrCat("Trying to copy a tensor ",
tensorflow::unwrap(tensor)->DebugString(),
" on to a parallel device. Pack non-parallel "
"tensors for each device into a parallel tensor explicitly.")
.c_str());
return nullptr;
}
TFE_TensorHandle* CopyTensorFromParallelDevice(TFE_Context* context,
TFE_TensorHandle* tensor,
const char* target_device_name,
TF_Status* status,
void* device_info) {
ParallelTensor* parallel_tensor = reinterpret_cast<ParallelTensor*>(
TFE_TensorHandleDevicePointer(tensor, status));
if (TF_GetCode(status) != TF_OK) return nullptr;
if (parallel_tensor->num_tensors() == 1) {
return TFE_TensorHandleCopySharingTensor(parallel_tensor->tensor(0),
status);
} else {
TF_SetStatus(
status, TF_UNIMPLEMENTED,
absl::StrCat(
"Trying to copy a tensor out of a parallel device. Since there "
"are multiple components to parallel tensors, they must be "
"unpacked explicitly.\n",
tensorflow::unwrap(tensor)->DebugString())
.c_str());
return nullptr;
}
}
void ParallelDeviceExecute(const TFE_Op* original_op, int* num_outputs,
TFE_TensorHandle** outputs, TF_Status* status,
void* device_info) {
const char* requested_placement = TFE_OpGetDevice(original_op, status);
if (*requested_placement == '\0') {
TF_SetStatus(
status, TF_INTERNAL,
"Ops must be placed on the parallel device explicitly, or their inputs "
"first un-packed. Got an un-placed op with an input placed on the "
"parallel device.");
return;
}
TFE_Context* context = TFE_OpGetContext(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
const char* operation_name = TFE_OpGetName(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
const TFE_OpAttrs* attributes = TFE_OpGetAttrs(original_op);
NamedParallelDevice* named_device =
reinterpret_cast<NamedParallelDevice*>(device_info);
std::vector<MaybeParallelTensorUnowned> typed_inputs;
int num_inputs = TFE_OpGetFlatInputCount(original_op, status);
if (TF_GetCode(status) != TF_OK) return;
typed_inputs.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
TFE_TensorHandle* input = TFE_OpGetFlatInput(original_op, i, status);
if (TF_GetCode(status) != TF_OK) return;
const char* tensor_handle_device =
TFE_TensorHandleDeviceName(input, status);
if (TF_GetCode(status) != TF_OK) return;
if (named_device->name() == tensor_handle_device) {
typed_inputs.emplace_back(reinterpret_cast<ParallelTensor*>(
TFE_TensorHandleDevicePointer(input, status)));
if (TF_GetCode(status) != TF_OK) return;
} else {
typed_inputs.emplace_back(input);
}
}
absl::optional<std::vector<MaybeParallelTensorOwned>> maybe_typed_outputs(
ExecuteWithSpecialOps(named_device->device(), named_device->name(),
context, std::move(typed_inputs), operation_name,
attributes, *num_outputs, status));
if (TF_GetCode(status) != TF_OK) return;
if (!maybe_typed_outputs.has_value()) {
TF_SetStatus(status, TF_INTERNAL, "OK status but no value was returned.");
return;
}
std::vector<MaybeParallelTensorOwned> typed_outputs(
std::move(maybe_typed_outputs.value()));
if (typed_outputs.size() > *num_outputs) {
TF_SetStatus(status, TF_INTERNAL,
"The allocated output buffer was too small.");
return;
}
for (int i = 0; i < typed_outputs.size(); ++i) {
MaybeParallelTensorOwned typed_output(std::move(typed_outputs[i]));
if (absl::holds_alternative<TensorHandlePtr>(typed_output)) {
outputs[i] = absl::get<TensorHandlePtr>(typed_output).release();
} else {
outputs[i] = ParallelTensorToTensorHandle(
named_device->name(), context,
std::move(absl::get<std::unique_ptr<ParallelTensor>>(
typed_output)),
status)
.release();
if (TF_GetCode(status) != TF_OK) return;
}
}
*num_outputs = typed_outputs.size();
}
void DeleteParallelDevice(void* device_info) {
delete reinterpret_cast<NamedParallelDevice*>(device_info);
}
}
void AllocateParallelDevice(const char* device_name,
const char* const* underlying_devices,
int num_underlying_devices,
TFE_CustomDevice* device, void** device_info) {
device->copy_tensor_to_device = &CopyToParallelDevice;
device->copy_tensor_from_device = &CopyTensorFromParallelDevice;
device->delete_device = &DeleteParallelDevice;
device->execute = &ParallelDeviceExecute;
std::vector<std::string> underlying_devices_vector;
underlying_devices_vector.reserve(num_underlying_devices);
for (int device_index = 0; device_index < num_underlying_devices;
++device_index) {
underlying_devices_vector.push_back(underlying_devices[device_index]);
}
std::unique_ptr<ParallelDevice> parallel_device(
new ParallelDevice(underlying_devices_vector));
*device_info =
new NamedParallelDevice{device_name, std::move(parallel_device)};
}
}
} | #include <array>
#include <gmock/gmock.h>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_experimental.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_lib.h"
#include "tensorflow/c/eager/parallel_device/parallel_device_testlib.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_internal.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace parallel_device {
using ::testing::HasSubstr;
TEST(PARALLEL_DEVICE, TestBasicCPU) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1");
}
TEST(PARALLEL_DEVICE, TestBasicCPUAliased) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(PARALLEL_DEVICE, TestBasicTPUAliased) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<TF_DeviceList, decltype(&TF_DeleteDeviceList)> devices(
TFE_ContextListDevices(context.get(), status.get()), TF_DeleteDeviceList);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
bool has_tpu = false;
for (int device_index = 0; device_index < TF_DeviceListCount(devices.get());
++device_index) {
std::string device_type =
TF_DeviceListType(devices.get(), device_index, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
if (device_type == "TPU") {
has_tpu = true;
break;
}
}
if (has_tpu) {
BasicTestsForTwoDevices(context.get(),
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:0");
}
}
TEST(PARALLEL_DEVICE, TestExplicitCopies) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
const char* first_device_name =
"/job:localhost/replica:0/task:0/device:CPU:0";
const char* second_device_name =
"/job:localhost/replica:0/task:0/device:CPU:1";
std::array<const char*, 2> underlying_devices{first_device_name,
second_device_name};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr failed_copy_on_result(TFE_TensorHandleCopyToDevice(
cpu_value.get(), context.get(), device_name, status.get()));
EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED);
std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()};
TensorHandlePtr device_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr copy_off(TFE_TensorHandleCopyToDevice(
device_value.get(), context.get(), first_device_name, status.get()));
EXPECT_EQ(TF_GetCode(status.get()), TF_UNIMPLEMENTED);
}
TEST(PARALLEL_DEVICE, TestDifferentShapes) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::vector<float> size_two_value{1., 2.};
std::vector<float> size_three_value{1., 2., 3.};
TensorHandlePtr size_two(
VectorFloatTensorHandle(size_two_value, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr size_three(
VectorFloatTensorHandle(size_three_value, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{size_two.get(), size_three.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
int num_axes = TFE_TensorHandleNumDims(combined_value.get(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
EXPECT_EQ(num_axes, 1);
}
TEST(PARALLEL_DEVICE, TestNestedParallelDevices) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 3),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* first_device_name =
"/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> first_underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), first_device_name,
first_underlying_devices, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* second_device_name =
"/job:localhost/replica:0/task:0/device:CUSTOM:1";
std::array<const char*, 2> second_underlying_devices{
"/job:localhost/replica:0/task:0/device:CUSTOM:0",
"/job:localhost/replica:0/task:0/device:CPU:2"};
RegisterParallelDevice(context.get(), second_device_name,
second_underlying_devices, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr first_combined_value = CreatePerDeviceValues(
context.get(), components, first_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_three(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = first_combined_value.get();
components[1] = value_three.get();
TensorHandlePtr second_combined_value = CreatePerDeviceValues(
context.get(), components, second_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr negative_one_cpu(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = negative_one_cpu.get();
components[1] = negative_one_cpu.get();
TensorHandlePtr first_negative_one = CreatePerDeviceValues(
context.get(), components, first_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
components[0] = first_negative_one.get();
components[1] = negative_one_cpu.get();
TensorHandlePtr second_negative_one = CreatePerDeviceValues(
context.get(), components, second_device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr multiply_result(
Multiply(context.get(), second_combined_value.get(),
second_negative_one.get(), status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> second_components;
ExtractPerDeviceValues(context.get(), multiply_result.get(),
&second_components, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(second_components[1].get(), 9.);
std::string first_device = TFE_TensorHandleBackingDeviceName(
second_components[0].get(), status.get());
ASSERT_EQ(second_underlying_devices[0], first_device);
std::string second_device = TFE_TensorHandleBackingDeviceName(
second_components[1].get(), status.get());
ASSERT_EQ(second_underlying_devices[1], second_device);
std::array<TensorHandlePtr, 2> first_components;
ExtractPerDeviceValues(context.get(), second_components[0].get(),
&first_components, status.get());
ExpectScalarEq<float>(first_components[0].get(), 3.);
ExpectScalarEq<float>(first_components[1].get(), 6.);
first_device = TFE_TensorHandleBackingDeviceName(first_components[0].get(),
status.get());
ASSERT_EQ(first_underlying_devices[0], first_device);
second_device = TFE_TensorHandleBackingDeviceName(first_components[1].get(),
status.get());
ASSERT_EQ(first_underlying_devices[1], second_device);
}
TEST(PARALLEL_DEVICE, TestInvalidPacking) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 1> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
{
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(),
value_two.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), correct_components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> incorrect_components;
ExtractPerDeviceValues(context.get(), combined_value.get(),
&incorrect_components, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::array<TFE_TensorHandle*, 1> correct_components{value_one.get()};
TensorHandlePtr combined_value = CreatePerDeviceValues(
context.get(), correct_components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 1> incorrect_components{combined_value.get()};
TensorHandlePtr recombined_value = CreatePerDeviceValues(
context.get(), incorrect_components, device_name, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
{
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), "TPUReplicatedOutput", status.get()),
TFE_DeleteOp);
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_OpSetAttrInt(op.get(), "num_replicas", 1);
TFE_OpAddInput(op.get(), value_one.get(), status.get());
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_OpSetDevice(op.get(), device_name, status.get());
if (TF_GetCode(status.get()) != TF_OK) return;
TFE_TensorHandle* result_handles;
int num_retvals = 1;
TFE_Execute(op.get(), &result_handles, &num_retvals, status.get());
ASSERT_TRUE(TF_GetCode(status.get()) == TF_INVALID_ARGUMENT)
<< TF_Message(status.get());
}
}
TensorHandlePtr CollectiveSum(TFE_Context* context, TFE_TensorHandle* input,
int group_size, TF_Status* status) {
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context, "CollectiveReduce", status), TFE_DeleteOp);
if (TF_GetCode(status) != TF_OK) return nullptr;
const char* device = TFE_TensorHandleDeviceName(input, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetDevice(op.get(), device, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_OpSetAttrType(op.get(), "T", TFE_TensorHandleDataType(input));
TFE_OpSetAttrInt(op.get(), "group_size", group_size);
TFE_OpSetAttrInt(op.get(), "group_key", 0);
TFE_OpSetAttrInt(op.get(), "instance_key", 0);
const std::string merge_op("Add");
TFE_OpSetAttrString(op.get(), "merge_op", merge_op.c_str(),
merge_op.length());
const std::string final_op("Id");
TFE_OpSetAttrString(op.get(), "final_op", final_op.c_str(),
final_op.length());
TFE_OpSetAttrIntList(op.get(), "subdiv_offsets", nullptr, 0);
TFE_OpAddInput(op.get(), input, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
TFE_TensorHandle* result_handle;
int num_retvals = 1;
TFE_Execute(op.get(), &result_handle, &num_retvals, status);
if (TF_GetCode(status) != TF_OK) return nullptr;
return TensorHandlePtr(result_handle);
}
void TestCollective(bool async) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
TFE_ContextOptionsSetAsync(opts.get(), async);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(1., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(2., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr parallel_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr reduced(
CollectiveSum(context.get(), parallel_value.get(), 2, status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TensorHandlePtr, 2> result_components;
ExtractPerDeviceValues(context.get(), reduced.get(), &result_components,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(result_components[0].get(), 3.);
ExpectScalarEq<float>(result_components[1].get(), 3.);
}
TEST(PARALLEL_DEVICE, TestCollectiveSync) { TestCollective(false); }
TEST(PARALLEL_DEVICE, TestCollectiveAsync) { TestCollective(true); }
void RegisterCollectiveMulFunction(TFE_Context* context,
const char* function_name, int group_size,
TF_Status* status) {
std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)> body(TF_NewGraph(),
TF_DeleteGraph);
TF_OperationDescription* placeholder_desc =
TF_NewOperation(body.get(), "Placeholder", "Placeholder");
TF_SetAttrType(placeholder_desc, "dtype", TF_FLOAT);
TF_Operation* placeholder_op = TF_FinishOperation(placeholder_desc, status);
if (TF_GetCode(status) != TF_OK) return;
TF_Output x{placeholder_op, 0};
TF_OperationDescription* reduce_desc =
TF_NewOperation(body.get(), "CollectiveReduce", "CollectiveReduce");
TF_SetAttrType(reduce_desc, "T", TF_FLOAT);
TF_SetAttrInt(reduce_desc, "group_size", group_size);
TF_SetAttrInt(reduce_desc, "group_key", 0);
TF_SetAttrInt(reduce_desc, "instance_key", 0);
const std::string merge_op("Mul");
TF_SetAttrString(reduce_desc, "merge_op", merge_op.c_str(),
merge_op.length());
const std::string final_op("Id");
TF_SetAttrString(reduce_desc, "final_op", final_op.c_str(),
final_op.length());
TF_SetAttrIntList(reduce_desc, "subdiv_offsets", nullptr, 0);
TF_AddInput(reduce_desc, x);
TF_Operation* reduce_op = TF_FinishOperation(reduce_desc, status);
if (TF_GetCode(status) != TF_OK) return;
TF_Operation* operations[]{placeholder_op, reduce_op};
TF_Output y{reduce_op, 0};
const char* output_name = "y";
std::unique_ptr<TF_Function, decltype(&TF_DeleteFunction)> function(
TF_GraphToFunction(
body.get(), function_name,
0, 2,
operations, 1, &x,
1, &y, &output_name,
nullptr, "", status),
TF_DeleteFunction);
if (TF_GetCode(status) != TF_OK) return;
TFE_ContextAddFunction(context, function.get(), status);
}
TEST(PARALLEL_DEVICE, TestFunction) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* function_name = "test_reduce_mul";
RegisterCollectiveMulFunction(context.get(), function_name, 2, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr value_one(FloatTensorHandle(7., status.get()));
TensorHandlePtr value_two(FloatTensorHandle(9., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{value_one.get(), value_two.get()};
TensorHandlePtr parallel_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op(
TFE_NewOp(context.get(), function_name, status.get()), TFE_DeleteOp);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_OpSetDevice(op.get(), device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_OpAddInput(op.get(), parallel_value.get(), status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TFE_TensorHandle* raw_result_handle;
int num_retvals = 1;
TFE_Execute(op.get(), &raw_result_handle, &num_retvals, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr reduced(raw_result_handle);
std::array<TensorHandlePtr, 2> result_components;
ExtractPerDeviceValues(context.get(), reduced.get(), &result_components,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ExpectScalarEq<float>(result_components[0].get(), 7. * 9.);
ExpectScalarEq<float>(result_components[1].get(), 7. * 9.);
std::string first_device = TFE_TensorHandleBackingDeviceName(
result_components[0].get(), status.get());
ASSERT_EQ(underlying_devices[0], first_device);
std::string second_device = TFE_TensorHandleBackingDeviceName(
result_components[1].get(), status.get());
ASSERT_EQ(underlying_devices[1], second_device);
}
TEST(PARALLEL_DEVICE, TestSummaryString) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
std::unique_ptr<TFE_ContextOptions, decltype(&TFE_DeleteContextOptions)> opts(
TFE_NewContextOptions(), TFE_DeleteContextOptions);
std::unique_ptr<TF_Buffer, decltype(&TF_DeleteBuffer)> config(
TF_CreateConfig(
false,
true, 2),
TF_DeleteBuffer);
TFE_ContextOptionsSetConfig(opts.get(), config->data, config->length,
status.get());
std::unique_ptr<TFE_Context, decltype(&TFE_DeleteContext)> context(
TFE_NewContext(opts.get(), status.get()), TFE_DeleteContext);
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
const char* device_name = "/job:localhost/replica:0/task:0/device:CUSTOM:0";
std::array<const char*, 2> underlying_devices{
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:localhost/replica:0/task:0/device:CPU:1"};
RegisterParallelDevice(context.get(), device_name, underlying_devices,
status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
TensorHandlePtr cpu_value(FloatTensorHandle(3., status.get()));
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
std::array<TFE_TensorHandle*, 2> components{cpu_value.get(), cpu_value.get()};
TensorHandlePtr device_value = CreatePerDeviceValues(
context.get(), components, device_name, status.get());
ASSERT_EQ(TF_GetCode(status.get()), TF_OK) << TF_Message(status.get());
ImmediateExecutionTensorHandle* unwrapped_handle =
tensorflow::unwrap(device_value.get());
std::string summarized;
TF_ASSERT_OK(unwrapped_handle->SummarizeValue(summarized));
EXPECT_THAT(summarized, HasSubstr("\"CPU:0\": 3"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/parallel_device/parallel_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/parallel_device/parallel_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c18dc50b-44db-4d22-ad51-b6e9d1082d44 | cpp | google/tensorstore | grid_partition | tensorstore/internal/grid_partition.cc | tensorstore/internal/grid_partition_test.cc | #include "tensorstore/internal/grid_partition.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <utility>
#include <vector>
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_map.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
using ::tensorstore::internal::OutputToGridCellFn;
using ::tensorstore::internal_index_space::TransformAccess;
namespace tensorstore {
namespace internal_grid_partition {
namespace {
using IndexArraySet = IndexTransformGridPartition::IndexArraySet;
using StridedSet = IndexTransformGridPartition::StridedSet;
struct ConnectedSetIterateParameters {
const IndexTransformGridPartition& info;
tensorstore::span<const DimensionIndex> grid_output_dimensions;
OutputToGridCellFn output_to_grid_cell;
IndexTransformView<> transform;
absl::FunctionRef<absl::Status(
tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func;
};
void InitializeConstantGridCellIndices(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell,
tensorstore::span<Index> grid_cell_indices) {
for (DimensionIndex grid_dim = 0; grid_dim < grid_output_dimensions.size();
++grid_dim) {
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
const OutputIndexMapRef<> map = transform.output_index_map(output_dim);
if (map.method() != OutputIndexMethod::constant) continue;
grid_cell_indices[grid_dim] =
output_to_grid_cell(grid_dim, map.offset(), nullptr);
}
}
class StridedSetGridCellIterator {
public:
explicit StridedSetGridCellIterator(
IndexTransformView<> transform,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell, StridedSet strided_set)
: transform_(transform),
grid_output_dimensions_(grid_output_dimensions),
output_to_grid_cell_(output_to_grid_cell),
strided_set_(strided_set) {
Reset();
}
void Reset() {
const IndexInterval domain =
transform_.input_domain()[strided_set_.input_dimension];
input_end_index_ = domain.exclusive_max();
input_index_ = domain.inclusive_min();
}
bool AtEnd() const { return input_index_ == input_end_index_; }
IndexInterval Next(tensorstore::span<Index> output_grid_cell_indices) {
assert(!AtEnd());
IndexInterval restricted_domain =
IndexInterval::UncheckedHalfOpen(input_index_, input_end_index_);
for (const DimensionIndex grid_dim :
strided_set_.grid_dimensions.index_view()) {
const DimensionIndex output_dim = grid_output_dimensions_[grid_dim];
const OutputIndexMapRef<> map = transform_.output_index_map(output_dim);
IndexInterval cell_range;
output_grid_cell_indices[grid_dim] = output_to_grid_cell_(
grid_dim, input_index_ * map.stride() + map.offset(), &cell_range);
const IndexInterval cell_domain =
GetAffineTransformDomain(cell_range, map.offset(), map.stride())
.value();
restricted_domain = Intersect(restricted_domain, cell_domain);
}
assert(!restricted_domain.empty());
input_index_ = restricted_domain.exclusive_max();
return restricted_domain;
}
private:
IndexTransformView<> transform_;
tensorstore::span<const DimensionIndex> grid_output_dimensions_;
OutputToGridCellFn output_to_grid_cell_;
StridedSet strided_set_;
Index input_end_index_;
Index input_index_;
};
class IndexArraySetIterator {
public:
IndexArraySetIterator(const IndexArraySet& index_array_set)
: grid_dimensions_(index_array_set.grid_dimensions),
grid_cell_indices_(index_array_set.grid_cell_indices),
partition_end_index_(index_array_set.num_partitions()),
partition_index_(0) {}
void Reset() { partition_index_ = 0; }
bool AtEnd() const { return partition_index_ == partition_end_index_; }
Index Next(tensorstore::span<Index> output_grid_cell_indices) {
assert(!AtEnd());
const Index grid_cell_indices_offset =
partition_index_ * grid_dimensions_.count();
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dimensions_.index_view()) {
output_grid_cell_indices[grid_dim] =
grid_cell_indices_[grid_cell_indices_offset + grid_i++];
}
return partition_index_++;
}
private:
DimensionSet grid_dimensions_;
tensorstore::span<const Index> grid_cell_indices_;
Index partition_end_index_;
Index partition_index_;
};
class ConnectedSetIterateHelper {
public:
explicit ConnectedSetIterateHelper(ConnectedSetIterateParameters params)
: params_(std::move(params)),
grid_cell_indices_(params_.grid_output_dimensions.size()),
cell_transform_(internal_grid_partition::InitializeCellTransform(
params_.info, params_.transform)) {
InitializeConstantGridCellIndices(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, grid_cell_indices_);
}
absl::Status Iterate() { return IterateOverIndexArraySets(0); }
private:
absl::Status IterateOverIndexArraySets(DimensionIndex set_i) {
if (set_i == params_.info.index_array_sets().size()) {
return IterateOverStridedSets(0);
}
const IndexArraySet& index_array_set =
params_.info.index_array_sets()[set_i];
IndexArraySetIterator iterator(index_array_set);
while (!iterator.AtEnd()) {
Index partition_i = iterator.Next(grid_cell_indices_);
UpdateCellTransformForIndexArraySetPartition(
index_array_set, set_i, partition_i, cell_transform_.get());
TENSORSTORE_RETURN_IF_ERROR(IterateOverIndexArraySets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status IterateOverStridedSets(DimensionIndex set_i) {
if (set_i == params_.info.strided_sets().size()) return InvokeCallback();
StridedSetGridCellIterator iterator(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, params_.info.strided_sets()[set_i]);
const DimensionIndex cell_input_dim =
set_i + params_.info.index_array_sets().size();
while (!iterator.AtEnd()) {
auto restricted_domain = iterator.Next(grid_cell_indices_);
cell_transform_->input_origin()[cell_input_dim] =
restricted_domain.inclusive_min();
cell_transform_->input_shape()[cell_input_dim] = restricted_domain.size();
TENSORSTORE_RETURN_IF_ERROR(IterateOverStridedSets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status InvokeCallback() {
internal_index_space::DebugCheckInvariants(cell_transform_.get());
auto status = params_.func(
grid_cell_indices_,
TransformAccess::Make<IndexTransformView<>>(cell_transform_.get()));
cell_transform_ = MutableRep(std::move(cell_transform_));
return status;
}
ConnectedSetIterateParameters params_;
absl::FixedArray<Index, internal::kNumInlinedDims> grid_cell_indices_;
internal_index_space::TransformRep::Ptr<> cell_transform_;
};
bool GetStridedGridCellRanges(
IndexTransformView<> transform, OutputToGridCellFn output_to_grid_cell,
DimensionIndex grid_dim, DimensionIndex output_dim,
absl::FunctionRef<bool(IndexInterval grid_cell_range)> callback) {
const auto output_map = transform.output_index_maps()[output_dim];
assert(output_map.method() == OutputIndexMethod::single_input_dimension);
const Index output_offset = output_map.offset();
const Index output_stride = output_map.stride();
const DimensionIndex input_dim = output_map.input_dimension();
const IndexInterval input_domain = transform.domain().box()[input_dim];
if (output_map.stride() == 1 || output_map.stride() == -1) {
auto output_range = tensorstore::GetAffineTransformRange(
input_domain, output_offset, output_stride)
.value();
Index min_cell_index =
output_to_grid_cell(grid_dim, output_range.inclusive_min(), nullptr);
Index max_cell_index =
output_to_grid_cell(grid_dim, output_range.inclusive_max(), nullptr);
return callback(
IndexInterval::UncheckedClosed(min_cell_index, max_cell_index));
}
IndexInterval prev_interval;
for (Index input_index = input_domain.inclusive_min();
input_index < input_domain.exclusive_max();) {
IndexInterval output_range;
Index grid_cell = output_to_grid_cell(
grid_dim, input_index * output_stride + output_offset, &output_range);
const IndexInterval cell_domain =
GetAffineTransformDomain(output_range, output_offset, output_stride)
.value();
assert(!cell_domain.empty());
if (grid_cell == prev_interval.exclusive_min() ||
grid_cell == prev_interval.exclusive_max()) {
prev_interval = IndexInterval::UncheckedClosed(
std::min(prev_interval.inclusive_min(), grid_cell),
std::max(prev_interval.inclusive_max(), grid_cell));
} else {
if (IsFinite(prev_interval)) {
if (!callback(prev_interval)) return false;
}
prev_interval = IndexInterval::UncheckedClosed(grid_cell, grid_cell);
}
input_index = cell_domain.exclusive_max();
}
return callback(prev_interval);
}
struct GetGridCellRangesIterateParameters {
const IndexTransformGridPartition& info;
tensorstore::span<const DimensionIndex> grid_output_dimensions;
OutputToGridCellFn output_to_grid_cell;
IndexTransformView<> transform;
absl::FunctionRef<absl::Status(BoxView<> bounds)> func;
DimensionIndex outer_prefix_rank;
BoxView<> grid_bounds;
tensorstore::span<const IndexInterval> inner_intervals;
tensorstore::span<const StridedSet*> strided_sets_in_prefix;
tensorstore::span<const IndexArraySet*> index_array_sets_in_prefix;
};
class GetGridCellRangesIterateHelper {
public:
explicit GetGridCellRangesIterateHelper(
GetGridCellRangesIterateParameters params)
: params_(params) {
InitializeConstantGridCellIndices(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell,
tensorstore::span<Index>(&grid_bounds_origin_[0],
params_.transform.output_rank()));
for (DimensionIndex i = 0; i < params.outer_prefix_rank; ++i) {
grid_bounds_shape_[i] = 1;
}
for (DimensionIndex i = params.outer_prefix_rank + 1,
rank = params.grid_bounds.rank();
i < rank; ++i) {
grid_bounds_origin_[i] = params.grid_bounds.origin()[i];
grid_bounds_shape_[i] = params.grid_bounds.shape()[i];
}
if (params.inner_intervals.size() == 1) {
const auto& inner_interval = params.inner_intervals[0];
grid_bounds_origin_[params.outer_prefix_rank] =
inner_interval.inclusive_min();
grid_bounds_shape_[params.outer_prefix_rank] = inner_interval.size();
}
}
absl::Status Iterate() { return IterateOverIndexArraySets(0); }
private:
GetGridCellRangesIterateParameters params_;
Index grid_bounds_origin_[kMaxRank];
Index grid_bounds_shape_[kMaxRank];
absl::Status IterateOverIndexArraySets(DimensionIndex set_i) {
if (set_i == params_.index_array_sets_in_prefix.size()) {
return IterateOverStridedSets(0);
}
const IndexArraySet& index_array_set =
*params_.index_array_sets_in_prefix[set_i];
const auto grid_dimensions = index_array_set.grid_dimensions;
const DimensionIndex num_grid_dimensions = grid_dimensions.count();
for (Index partition_i = 0,
num_partitions = index_array_set.num_partitions();
partition_i < num_partitions; ++partition_i) {
const Index grid_cell_indices_offset = partition_i * num_grid_dimensions;
DimensionIndex grid_i = 0;
for (DimensionIndex grid_dim : grid_dimensions.index_view()) {
grid_bounds_origin_[grid_dim] =
index_array_set
.grid_cell_indices[grid_cell_indices_offset + grid_i++];
}
TENSORSTORE_RETURN_IF_ERROR(IterateOverIndexArraySets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status IterateOverStridedSets(DimensionIndex set_i) {
if (set_i == params_.strided_sets_in_prefix.size()) return InvokeCallback();
StridedSetGridCellIterator iterator(
params_.transform, params_.grid_output_dimensions,
params_.output_to_grid_cell, *params_.strided_sets_in_prefix[set_i]);
while (!iterator.AtEnd()) {
iterator.Next(grid_bounds_origin_);
TENSORSTORE_RETURN_IF_ERROR(IterateOverStridedSets(set_i + 1));
}
return absl::OkStatus();
}
absl::Status InvokeCallback() {
MutableBoxView<> bounds(params_.grid_bounds.rank(), grid_bounds_origin_,
grid_bounds_shape_);
if (params_.inner_intervals.size() == 1) {
return params_.func(bounds);
}
DimensionIndex outer_prefix_rank = params_.outer_prefix_rank;
for (const auto& inner_interval : params_.inner_intervals) {
bounds[outer_prefix_rank] = inner_interval;
TENSORSTORE_RETURN_IF_ERROR(params_.func(bounds));
}
return absl::OkStatus();
}
};
}
}
namespace internal {
absl::Status PartitionIndexTransformOverGrid(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
OutputToGridCellFn output_to_grid_cell, IndexTransformView<> transform,
absl::FunctionRef<
absl::Status(tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform)>
func) {
internal_grid_partition::IndexTransformGridPartition partition_info;
auto status = internal_grid_partition::PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell, partition_info);
if (!status.ok()) return status;
return internal_grid_partition::ConnectedSetIterateHelper(
{partition_info,
grid_output_dimensions,
output_to_grid_cell,
transform,
std::move(func)})
.Iterate();
}
}
namespace internal_grid_partition {
absl::Status GetGridCellRanges(
const IndexTransformGridPartition& grid_partition,
tensorstore::span<const DimensionIndex> grid_output_dimensions,
BoxView<> grid_bounds, OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(BoxView<> bounds)> callback) {
assert(grid_output_dimensions.size() == grid_bounds.rank());
if (transform.domain().box().is_empty()) {
return absl::OkStatus();
}
if (grid_output_dimensions.empty()) {
return callback({});
}
std::array<DimensionIndex, kMaxRank> dim_to_indexed_set;
dim_to_indexed_set.fill(-1);
DimensionSet one_to_one_grid_dims;
for (const auto& strided_set : grid_partition.strided_sets()) {
if (strided_set.grid_dimensions.count() != 1) {
continue;
}
const DimensionIndex grid_dim =
strided_set.grid_dimensions.index_view().front();
one_to_one_grid_dims[grid_dim] = true;
}
for (size_t i = 0; i < grid_partition.index_array_sets().size(); ++i) {
const auto& set = grid_partition.index_array_sets()[i];
if (set.grid_dimensions.count() != 1) {
continue;
}
const DimensionIndex grid_dim = set.grid_dimensions.index_view().front();
one_to_one_grid_dims[grid_dim] = true;
dim_to_indexed_set[grid_dim] = i;
}
absl::InlinedVector<IndexInterval, 1> inner_intervals;
DimensionSet grid_dimensions_outside_prefix;
DimensionIndex range_queryable_grid_dim = grid_output_dimensions.size() - 1;
for (; range_queryable_grid_dim >= 0; --range_queryable_grid_dim) {
const DimensionIndex grid_dim = range_queryable_grid_dim;
const IndexInterval grid_interval = grid_bounds[grid_dim];
if (grid_interval.size() == 1) {
inner_intervals.clear();
inner_intervals.push_back(grid_interval);
continue;
}
if (!one_to_one_grid_dims[grid_dim]) {
break;
}
grid_dimensions_outside_prefix[grid_dim] = true;
const DimensionIndex output_dim = grid_output_dimensions[grid_dim];
inner_intervals.clear();
DimensionIndex indexed_set_i = dim_to_indexed_set[grid_dim];
if (indexed_set_i == -1) {
internal_grid_partition::GetStridedGridCellRanges(
transform, output_to_grid_cell, grid_dim, output_dim,
[&](IndexInterval grid_cell_range) {
inner_intervals.push_back(grid_cell_range);
return true;
});
} else {
const auto& set = grid_partition.index_array_sets()[indexed_set_i];
const auto& grid_cell_indices = set.grid_cell_indices;
size_t i = 0;
while (i < grid_cell_indices.size()) {
size_t last_i = i;
while (last_i + 1 < grid_cell_indices.size() &&
grid_cell_indices[last_i] + 1 == grid_cell_indices[last_i + 1]) {
++last_i;
}
inner_intervals.push_back(IndexInterval::UncheckedClosed(
grid_cell_indices[i], grid_cell_indices[last_i]));
i = last_i + 1;
}
}
if (inner_intervals.size() == 1 &&
tensorstore::Contains(inner_intervals[0], grid_interval)) {
inner_intervals.clear();
inner_intervals.push_back(grid_interval);
continue;
}
--range_queryable_grid_dim;
break;
}
const StridedSet* strided_sets_in_prefix_storage[kMaxRank];
const IndexArraySet* index_array_sets_in_prefix_storage[kMaxRank];
const auto get_sets_in_prefix = [&](auto sets, auto* buffer) {
ptrdiff_t i = 0;
for (const auto& set : sets) {
if (grid_dimensions_outside_prefix[set.grid_dimensions.index_view()
.front()]) {
continue;
}
buffer[i++] = &set;
}
return tensorstore::span(buffer, i);
};
auto strided_sets_in_prefix = get_sets_in_prefix(
grid_partition.strided_sets(), strided_sets_in_prefix_storage);
auto index_array_sets_in_prefix = get_sets_in_prefix(
grid_partition.index_array_sets(), index_array_sets_in_prefix_storage);
if (range_queryable_grid_dim == grid_output_dimensions.size() - 1) {
inner_intervals.push_back(grid_bounds[range_queryable_grid_dim]);
}
internal_grid_partition::GetGridCellRangesIterateHelper iterate_helper(
internal_grid_partition::GetGridCellRangesIterateParameters{
grid_partition, grid_output_dimensions, output_to_grid_cell,
transform, callback, range_queryable_grid_dim + 1, grid_bounds,
inner_intervals, strided_sets_in_prefix, index_array_sets_in_prefix});
return iterate_helper.Iterate();
}
}
namespace internal {
absl::Status GetGridCellRanges(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
BoxView<> grid_bounds, OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> transform,
absl::FunctionRef<absl::Status(BoxView<> bounds)> callback) {
using internal_grid_partition::StridedSet;
assert(grid_output_dimensions.size() == grid_bounds.rank());
if (transform.domain().box().is_empty()) {
return absl::OkStatus();
}
if (grid_output_dimensions.empty()) {
return callback({});
}
internal_grid_partition::IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(
internal_grid_partition::PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell,
grid_partition));
return internal_grid_partition::GetGridCellRanges(
grid_partition, grid_output_dimensions, grid_bounds, output_to_grid_cell,
transform, callback);
}
}
} | #include "tensorstore/internal/grid_partition.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/grid_partition_impl.h"
#include "tensorstore/internal/irregular_grid.h"
#include "tensorstore/internal/regular_grid.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::MakeArray;
using ::tensorstore::Result;
using ::tensorstore::internal::GetGridCellRanges;
using ::tensorstore::internal::IrregularGrid;
using ::tensorstore::internal::OutputToGridCellFn;
using ::tensorstore::internal_grid_partition::IndexTransformGridPartition;
using ::tensorstore::internal_grid_partition::
PrePartitionIndexTransformOverGrid;
using ::tensorstore::internal_grid_partition::RegularGridRef;
using ::testing::ElementsAre;
namespace partition_tests {
using R = std::pair<std::vector<Index>, IndexTransform<>>;
std::vector<R> GetPartitions(
const std::vector<DimensionIndex>& grid_output_dimensions,
const std::vector<Index>& grid_cell_shape, IndexTransformView<> transform) {
std::vector<R> results;
IndexTransformGridPartition info;
RegularGridRef grid{grid_cell_shape};
TENSORSTORE_CHECK_OK(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, grid, info));
TENSORSTORE_CHECK_OK(
tensorstore::internal::PartitionIndexTransformOverGrid(
grid_output_dimensions, grid, transform,
[&](tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
auto cell_transform_direct = info.GetCellTransform(
transform, grid_cell_indices, grid_output_dimensions,
[&](DimensionIndex dim, Index cell_index) {
return grid.GetCellOutputInterval(dim, cell_index);
});
EXPECT_EQ(cell_transform_direct, cell_transform);
results.emplace_back(std::vector<Index>(grid_cell_indices.begin(),
grid_cell_indices.end()),
IndexTransform<>(cell_transform));
return absl::OkStatus();
}));
return results;
}
TEST(PartitionIndexTransformOverRegularGrid, ConstantOneDimensional) {
const auto results = GetPartitions({0}, {2},
IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({4})
.output_constant(0, 3)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({2})
.input_shape({4})
.output_single_input_dimension(0, 0)
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, ConstantTwoDimensional) {
const auto results = GetPartitions({0, 1}, {2, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({2, 3})
.input_shape({4, 5})
.output_constant(0, 3)
.output_constant(1, 7)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({2, 3})
.input_shape({4, 5})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, OneDimensionalUnitStride) {
const auto results = GetPartitions({0}, {2},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({5})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()},
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({-2})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()},
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, TwoDimensionalIdentity) {
const auto results = GetPartitions({0, 1}, {20, 10},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({30, 30})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{0, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 10})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 20})
.input_shape({20, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 0})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 10})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({20, 20})
.input_shape({10, 10})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleStridedDimension) {
const auto results =
GetPartitions({0}, {10},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({6})
.output_single_input_dimension(0, 5, 3, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()},
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({-1})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, DiagonalStridedDimensions) {
const auto results =
GetPartitions({0, 1}, {10, 8},
IndexTransformBuilder<>(1, 2)
.input_origin({-4})
.input_shape({6})
.output_single_input_dimension(0, 5, 3, 0)
.output_single_input_dimension(1, 7, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({-4})
.input_shape({3})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({-1})
.input_shape({1})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_identity_transform()
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleIndexArrayDimension) {
const auto results =
GetPartitions({0}, {3},
IndexTransformBuilder<>(1, 1)
.input_origin({100})
.input_shape({8})
.output_index_array(
0, 0, 1, MakeArray<Index>({1, 2, 3, 4, 5, 6, 7, 8}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 101}))
.Finalize()
.value()},
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({102, 103, 104}))
.Finalize()
.value()},
R{{2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({105, 106, 107}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, SingleIndexArrayDimensionStrided) {
const auto results = GetPartitions(
{0}, {10},
IndexTransformBuilder<>(1, 1)
.input_origin({100})
.input_shape({6})
.output_index_array(0, 5, 3, MakeArray<Index>({10, 3, 4, -5, -6, 11}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({104}))
.Finalize()
.value()},
R{{-1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({103}))
.Finalize()
.value()},
R{{1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({101, 102}))
.Finalize()
.value()},
R{{3},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 105}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, TwoIndexArrayDimensions) {
const auto results = GetPartitions(
{0, 1}, {10, 8},
IndexTransformBuilder<>(1, 2)
.input_origin({100})
.input_shape({6})
.output_index_array(0, 5, 3, MakeArray<Index>({10, 3, 4, -5, -6, 11}))
.output_index_array(1, 4, -2, MakeArray<Index>({5, 1, 7, -3, -2, 5}))
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-2, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({104}))
.Finalize()
.value()},
R{{-1, 1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({103}))
.Finalize()
.value()},
R{{1, -2},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({102}))
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({1})
.output_index_array(0, 0, 1, MakeArray<Index>({101}))
.Finalize()
.value()},
R{{3, -1},
IndexTransformBuilder<>(1, 1)
.input_origin({0})
.input_shape({2})
.output_index_array(0, 0, 1, MakeArray<Index>({100, 105}))
.Finalize()
.value()}));
}
TEST(PartitionIndexTransformOverRegularGrid, IndexArrayAndStridedDimensions) {
const auto results = GetPartitions(
{0, 1}, {10, 8},
IndexTransformBuilder<>(2, 2)
.input_origin({-4, 100})
.input_shape({6, 3})
.output_index_array(0, 5, 3, MakeArray<Index>({{10, 3, 4}}))
.output_single_input_dimension(1, 4, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}, {102}}))
.Finalize()
.value()},
R{{1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -1})
.input_shape({2, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}, {102}}))
.Finalize()
.value()},
R{{3, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -1})
.input_shape({1, 3})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()}));
}
std::vector<R> GetIrregularPartitions(
const std::vector<DimensionIndex>& grid_output_dimensions,
const IrregularGrid& grid, IndexTransformView<> transform) {
std::vector<R> results;
TENSORSTORE_CHECK_OK(tensorstore::internal::PartitionIndexTransformOverGrid(
grid_output_dimensions, grid, transform,
[&](tensorstore::span<const Index> grid_cell_indices,
IndexTransformView<> cell_transform) {
results.emplace_back(std::vector<Index>(grid_cell_indices.begin(),
grid_cell_indices.end()),
IndexTransform<>(cell_transform));
return absl::OkStatus();
}));
return results;
}
TEST(PartitionIndexTransformOverIrregularGrid, TwoDimensionalIdentity) {
const std::vector<DimensionIndex> grid_output_dimensions{0, 1};
std::vector<Index> dimension0{15};
std::vector<Index> dimension1{-10, 10, 100};
IrregularGrid grid({dimension0, dimension1});
std::vector<R> results =
GetIrregularPartitions(grid_output_dimensions, grid,
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({30, 30})
.output_identity_transform()
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(
R{{-1, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({15, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{-1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 10})
.input_shape({15, 20})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 0},
IndexTransformBuilder<>(2, 2)
.input_origin({15, 0})
.input_shape({15, 10})
.output_identity_transform()
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({15, 10})
.input_shape({15, 20})
.output_identity_transform()
.Finalize()
.value()}
));
}
TEST(PartitionIndexTransformOverIrregularGrid, IndexArrayAndStridedDimensions) {
std::vector<Index> dimension0{10, 15, 20, 30, 50};
std::vector<Index> dimension1{0, 1, 5, 10, 13};
IrregularGrid grid({dimension0, dimension1});
std::vector<R> results = GetIrregularPartitions(
{0, 1}, grid,
IndexTransformBuilder<>(2, 2)
.input_origin({-4, 100})
.input_shape({6, 3})
.output_index_array(0, 5, 3, MakeArray<Index>({{10, 3, 4}}))
.output_single_input_dimension(1, 4, -2, 0)
.Finalize()
.value());
EXPECT_THAT(
results,
ElementsAre(R{{0, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{0, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{0, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{101}}))
.Finalize()
.value()},
R{{1, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{1, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{1, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{102}}))
.Finalize()
.value()},
R{{3, 3},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -4})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 2},
IndexTransformBuilder<>(2, 2)
.input_origin({0, -2})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()},
R{{3, 1},
IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({1, 2})
.output_single_input_dimension(0, 1)
.output_index_array(1, 0, 1, MakeArray<Index>({{100}}))
.Finalize()
.value()}
));
}
}
namespace get_grid_cell_ranges_tests {
using R = Box<>;
Result<std::vector<R>> GetRanges(
tensorstore::span<const DimensionIndex> grid_output_dimensions,
BoxView<> grid_bounds, OutputToGridCellFn output_to_grid_cell,
IndexTransformView<> transform) {
std::vector<R> results;
IndexTransformGridPartition grid_partition;
TENSORSTORE_RETURN_IF_ERROR(PrePartitionIndexTransformOverGrid(
transform, grid_output_dimensions, output_to_grid_cell, grid_partition));
TENSORSTORE_RETURN_IF_ERROR(GetGridCellRanges(
grid_output_dimensions, grid_bounds, output_to_grid_cell, transform,
[&](BoxView<> bounds) -> absl::Status {
results.emplace_back(bounds);
return absl::OkStatus();
}));
return results;
}
TEST(GetGridCellRangesTest, Rank0) {
EXPECT_THAT(GetRanges({}, {},
RegularGridRef{{}},
IndexTransformBuilder(0, 0).Finalize().value()),
::testing::Optional(ElementsAre(R{})));
}
TEST(GetGridCellRangesTest, Rank1Unconstrained) {
EXPECT_THAT(GetRanges({{0}},
Box<>{{0}, {10}},
RegularGridRef{{{5}}},
IndexTransformBuilder(1, 1)
.input_shape({50})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R{{0}, {10}})));
}
TEST(GetGridCellRangesTest, Rank1Constrained) {
EXPECT_THAT(GetRanges({{0}},
Box<>{{0}, {10}},
RegularGridRef{{{5}}},
IndexTransformBuilder(1, 1)
.input_origin({7})
.input_shape({30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R({1}, {7}))));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedBothDims) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 10}}},
IndexTransformBuilder(2, 2)
.input_origin({6, 7})
.input_shape({8, 30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 0}, {1, 4}},
R{{2, 0}, {1, 4}}
)));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedFirstDimOnly) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({6, 0})
.input_shape({8, 50})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(R{{1, 0}, {2, 10}})));
}
TEST(GetGridCellRangesTest, Rank2ConstrainedSecondDimOnly) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({0, 7})
.input_shape({25, 30})
.output_identity_transform()
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{0, 1}, {1, 7}},
R{{1, 1}, {1, 7}},
R{{2, 1}, {1, 7}},
R{{3, 1}, {1, 7}},
R{{4, 1}, {1, 7}}
)));
}
TEST(GetGridCellRangesTest, Rank2IndexArrayFirstDimUnconstrainedSecondDim) {
EXPECT_THAT(
GetRanges(
{{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({0, 0})
.input_shape({3, 50})
.output_index_array(0, 0, 1, MakeArray<Index>({{6}, {15}, {20}}))
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 0}, {1, 10}},
R{{3, 0}, {2, 10}}
)));
}
TEST(GetGridCellRangesTest, Rank2IndexArrayFirstDimConstrainedSecondDim) {
EXPECT_THAT(
GetRanges(
{{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 5}}},
IndexTransformBuilder(2, 2)
.input_origin({0, 7})
.input_shape({3, 30})
.output_index_array(0, 0, 1, MakeArray<Index>({{6}, {15}, {20}}))
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 1}, {1, 7}},
R{{3, 1}, {1, 7}},
R{{4, 1}, {1, 7}}
)));
}
TEST(GetGridCellRangesTest, Rank2Diagonal) {
EXPECT_THAT(GetRanges({{0, 1}},
Box<>{{0, 0}, {5, 10}},
RegularGridRef{{{5, 10}}},
IndexTransformBuilder(1, 2)
.input_origin({6})
.input_shape({8})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.Finalize()
.value()),
::testing::Optional(ElementsAre(
R{{1, 0}, {1, 1}},
R{{2, 1}, {1, 1}}
)));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_partition.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grid_partition_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e97785da-47c3-461f-9298-9579bfd4fba6 | cpp | google/arolla | frame_iter | arolla/qtype/array_like/frame_iter.cc | arolla/qtype/array_like/frame_iter_test.cc | #include "arolla/qtype/array_like/frame_iter.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::StatusOr<std::vector<std::unique_ptr<BatchToFramesCopier>>>
CreateInputCopiers(absl::Span<const TypedRef> input_arrays,
absl::Span<const TypedSlot> input_scalar_slots) {
if (input_arrays.size() != input_scalar_slots.size()) {
return absl::InvalidArgumentError(
absl::StrFormat("size of input_arrays and input_scalar_slots should be "
"the same: %d vs %d",
input_arrays.size(), input_scalar_slots.size()));
}
absl::flat_hash_map<QTypePtr, std::unique_ptr<BatchToFramesCopier>>
input_copiers;
for (size_t i = 0; i < input_arrays.size(); ++i) {
QTypePtr array_type = input_arrays[i].GetType();
if (!input_copiers.contains(array_type)) {
ASSIGN_OR_RETURN(input_copiers[array_type],
CreateBatchToFramesCopier(array_type));
}
RETURN_IF_ERROR(input_copiers[array_type]->AddMapping(
input_arrays[i], input_scalar_slots[i]));
}
std::vector<std::unique_ptr<BatchToFramesCopier>> input_copiers_vector;
for (auto& [_, v] : input_copiers)
input_copiers_vector.push_back(std::move(v));
return input_copiers_vector;
}
absl::StatusOr<std::vector<std::unique_ptr<BatchFromFramesCopier>>>
CreateOutputCopiers(absl::Span<const TypedSlot> output_array_slots,
absl::Span<const TypedSlot> output_scalar_slots,
RawBufferFactory* buffer_factory) {
if (output_array_slots.size() != output_scalar_slots.size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"size of output_array_slots and output_scalar_slots should be "
"the same: %d vs %d",
output_array_slots.size(), output_scalar_slots.size()));
}
absl::flat_hash_map<QTypePtr, std::unique_ptr<BatchFromFramesCopier>>
output_copiers;
for (size_t i = 0; i < output_array_slots.size(); ++i) {
QTypePtr array_type = output_array_slots[i].GetType();
if (!output_copiers.contains(array_type)) {
ASSIGN_OR_RETURN(output_copiers[array_type],
CreateBatchFromFramesCopier(array_type, buffer_factory));
}
RETURN_IF_ERROR(output_copiers[array_type]->AddMapping(
output_scalar_slots[i], output_array_slots[i]));
}
std::vector<std::unique_ptr<BatchFromFramesCopier>> output_copiers_vector;
for (auto& [_, v] : output_copiers)
output_copiers_vector.push_back(std::move(v));
return output_copiers_vector;
}
}
absl::StatusOr<FrameIterator> FrameIterator::Create(
absl::Span<const TypedRef> input_arrays,
absl::Span<const TypedSlot> input_scalar_slots,
absl::Span<const TypedSlot> output_array_slots,
absl::Span<const TypedSlot> output_scalar_slots,
const FrameLayout* scalar_layout, FrameIterator::Options options) {
ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<BatchToFramesCopier>> input_copiers,
CreateInputCopiers(input_arrays, input_scalar_slots));
RawBufferFactory* buf_factory = options.buffer_factory;
if (!buf_factory) buf_factory = GetHeapBufferFactory();
ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<BatchFromFramesCopier>> output_copiers,
CreateOutputCopiers(output_array_slots, output_scalar_slots,
buf_factory));
std::optional<int64_t> row_count = std::nullopt;
for (const auto& copier : input_copiers) {
if (!copier->row_count() ||
(row_count && *row_count != *copier->row_count())) {
return absl::InvalidArgumentError(
absl::StrFormat("input arrays have different sizes: %d vs %d",
*row_count, *copier->row_count()));
}
row_count = copier->row_count();
}
if (!row_count.has_value()) {
if (!options.row_count.has_value()) {
return absl::InvalidArgumentError(
"options.row_count can not be missed if there is no input arrays");
}
row_count = options.row_count;
} else if (options.row_count.has_value() &&
*options.row_count != *row_count) {
return absl::InvalidArgumentError(
absl::StrFormat("sizes of input arrays don't correspond "
"to options.row_count: %d vs %d",
*row_count, *options.row_count));
}
return FrameIterator(std::move(input_copiers), std::move(output_copiers),
*row_count, options.frame_buffer_count, scalar_layout);
}
FrameIterator::FrameIterator(
std::vector<std::unique_ptr<BatchToFramesCopier>>&& input_copiers,
std::vector<std::unique_ptr<BatchFromFramesCopier>>&& output_copiers,
size_t row_count, size_t frame_buffer_count,
const FrameLayout* scalar_layout)
: row_count_(row_count),
input_copiers_(std::move(input_copiers)),
output_copiers_(std::move(output_copiers)),
scalar_layout_(scalar_layout) {
frame_buffer_count = std::min(row_count, frame_buffer_count);
dense_scalar_layout_size_ = (scalar_layout_->AllocSize() + 7) & ~7;
buffer_.resize(dense_scalar_layout_size_ * frame_buffer_count);
for (size_t i = 0; i < frame_buffer_count; ++i) {
void* alloc_ptr = GetAllocByIndex(i);
scalar_layout->InitializeAlignedAlloc(alloc_ptr);
frames_.emplace_back(alloc_ptr, scalar_layout);
const_frames_.emplace_back(alloc_ptr, scalar_layout);
}
for (auto& copier : input_copiers_) copier->Start();
for (auto& copier : output_copiers_) copier->Start(row_count);
}
FrameIterator::~FrameIterator() {
for (size_t i = 0; i < frames_.size(); ++i) {
scalar_layout_->DestroyAlloc(GetAllocByIndex(i));
}
}
absl::Status FrameIterator::StoreOutput(FramePtr output_frame) {
for (std::unique_ptr<BatchFromFramesCopier>& copier : output_copiers_) {
RETURN_IF_ERROR(copier->Finalize(output_frame));
}
return absl::OkStatus();
}
void FrameIterator::PreloadFrames(size_t frames_count) {
for (auto& copier : input_copiers_) {
copier->CopyNextBatch({frames_.data(), frames_count});
}
}
void FrameIterator::SaveOutputsOfProcessedFrames(size_t frames_count) {
for (auto& copier : output_copiers_) {
absl::Status status =
copier->CopyNextBatch({const_frames_.data(), frames_count});
DCHECK_OK(status);
}
}
} | #include "arolla/qtype/array_like/frame_iter.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/threading.h"
namespace arolla {
namespace {
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Test;
TEST(FrameIterator, Iterate) {
FrameLayout::Builder scalar_bldr;
auto scalar_f_slot1 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_i_slot1 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_i_slot2 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_f_slot2 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {
TypedSlot::FromSlot(scalar_f_slot1), TypedSlot::FromSlot(scalar_i_slot1),
TypedSlot::FromSlot(scalar_i_slot2), TypedSlot::FromSlot(scalar_f_slot2)};
DenseArray<float> arr_f1 =
CreateDenseArray<float>({1.5, std::nullopt, 2.5, 3.5});
DenseArray<int64_t> arr_i1 = CreateDenseArray<int64_t>({3, 4, 5, 6});
DenseArray<int64_t> arr_i2 =
CreateDenseArray<int64_t>({2, std::nullopt, 0, std::nullopt});
DenseArray<float> arr_f2 =
CreateDenseArray<float>({3.2, 2.2, std::nullopt, 1.2});
FrameLayout::Builder vector_bldr;
auto arr_output_f1 = vector_bldr.AddSlot<DenseArray<float>>();
auto arr_output_i1 = vector_bldr.AddSlot<DenseArray<int64_t>>();
auto arr_output_i2 = vector_bldr.AddSlot<DenseArray<int64_t>>();
auto arr_output_f2 = vector_bldr.AddSlot<DenseArray<float>>();
auto output_vector_layout = std::move(vector_bldr).Build();
std::vector<TypedRef> input_refs = {
TypedRef::FromValue(arr_f1), TypedRef::FromValue(arr_i1),
TypedRef::FromValue(arr_i2), TypedRef::FromValue(arr_f2)};
std::vector<TypedSlot> output_slots = {
TypedSlot::FromSlot(arr_output_f1), TypedSlot::FromSlot(arr_output_i1),
TypedSlot::FromSlot(arr_output_i2), TypedSlot::FromSlot(arr_output_f2)};
auto scalar_processing_fn = [&](FramePtr frame) {
OptionalValue<float> f1 = frame.Get(scalar_f_slot1);
OptionalValue<float> f2 = frame.Get(scalar_f_slot2);
if (f1.present) frame.Set(scalar_f_slot1, f1.value + 1.0);
if (f2.present) frame.Set(scalar_f_slot2, f2.value + 2.0);
OptionalValue<int64_t> i1 = frame.Get(scalar_i_slot1);
OptionalValue<int64_t> i2 = frame.Get(scalar_i_slot2);
if (i1.present) frame.Set(scalar_i_slot1, i1.value + 3);
if (i2.present) frame.Set(scalar_i_slot2, i2.value + 4);
};
auto check_output_fn = [&](FrameIterator& frame_iterator) {
MemoryAllocation alloc(&output_vector_layout);
FramePtr output_frame = alloc.frame();
EXPECT_OK(frame_iterator.StoreOutput(output_frame));
EXPECT_THAT(output_frame.Get(arr_output_f1),
ElementsAre(2.5, std::nullopt, 3.5, 4.5));
EXPECT_THAT(output_frame.Get(arr_output_f2),
ElementsAre(5.2, 4.2, std::nullopt, 3.2));
EXPECT_THAT(output_frame.Get(arr_output_i1), ElementsAre(6, 7, 8, 9));
EXPECT_THAT(output_frame.Get(arr_output_i2),
ElementsAre(6, std::nullopt, 4, std::nullopt));
};
{
ASSERT_OK_AND_ASSIGN(
auto frame_iterator,
FrameIterator::Create(input_refs, scalar_slots, output_slots,
scalar_slots, &scalar_layout,
{.frame_buffer_count = 2}));
frame_iterator.ForEachFrame(scalar_processing_fn);
check_output_fn(frame_iterator);
}
StdThreading threading(4);
for (int threads = 1; threads <= 4; ++threads) {
ASSERT_OK_AND_ASSIGN(
auto frame_iterator,
FrameIterator::Create(input_refs, scalar_slots, output_slots,
scalar_slots, &scalar_layout,
{.frame_buffer_count = 3}));
frame_iterator.ForEachFrame(scalar_processing_fn, threading, threads);
check_output_fn(frame_iterator);
}
}
TEST(FrameIterator, EmptyArrays) {
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_slot)};
FrameLayout::Builder arrays_layout_bldr;
auto arr_output = arrays_layout_bldr.AddSlot<DenseArray<float>>();
auto output_arrays_layout = std::move(arrays_layout_bldr).Build();
DenseArray<float> arr;
std::vector<TypedRef> input_refs = {TypedRef::FromValue(arr)};
std::vector<TypedSlot> output_slots = {TypedSlot::FromSlot(arr_output)};
auto scalar_processing_fn = [&](FramePtr frame) { ADD_FAILURE(); };
ASSERT_OK_AND_ASSIGN(auto frame_iterator,
FrameIterator::Create(
input_refs, scalar_slots, output_slots, scalar_slots,
&scalar_layout, {.frame_buffer_count = 2}));
frame_iterator.ForEachFrame(scalar_processing_fn);
MemoryAllocation alloc(&output_arrays_layout);
FramePtr output_frame = alloc.frame();
EXPECT_OK(frame_iterator.StoreOutput(output_frame));
EXPECT_EQ(output_frame.Get(arr_output).size(), 0);
}
TEST(FrameIterator, EmptyInputAndOutput) {
FrameLayout::Builder scalar_bldr;
auto scalar_layout = std::move(scalar_bldr).Build();
{
auto frame_iterator_or_status =
FrameIterator::Create({}, {}, {}, {}, &scalar_layout);
EXPECT_THAT(
frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("options.row_count can not be missed if there "
"is no input arrays")));
}
{
ASSERT_OK_AND_ASSIGN(auto frame_iterator,
FrameIterator::Create({}, {}, {}, {}, &scalar_layout,
{.row_count = 4}));
EXPECT_EQ(frame_iterator.row_count(), 4);
}
}
TEST(FrameIterator, IncorrectInputType) {
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<float>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_slot)};
DenseArray<int64_t> arr = CreateDenseArray<int64_t>({1, std::nullopt, 2, 3});
auto frame_iterator_or_status = FrameIterator::Create(
{TypedRef::FromValue(arr)}, scalar_slots, {}, {}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match")));
}
TEST(FrameIterator, IncorrectOutputType) {
FrameLayout::Builder vector_bldr;
auto vector_slot = vector_bldr.AddSlot<DenseArray<float>>();
auto vector_layout = std::move(vector_bldr).Build();
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<int64_t>();
auto scalar_layout = std::move(scalar_bldr).Build();
auto frame_iterator_or_status =
FrameIterator::Create({}, {}, {TypedSlot::FromSlot(vector_slot)},
{TypedSlot::FromSlot(scalar_slot)}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match")));
}
TEST(FrameIterator, WrongSize) {
FrameLayout::Builder scalar_bldr;
auto scalar_f_slot1 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_i_slot1 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_f_slot1),
TypedSlot::FromSlot(scalar_i_slot1)};
DenseArray<float> arr_f1 =
CreateDenseArray<float>({1.5, std::nullopt, 2.5, 3.5});
DenseArray<int64_t> arr_i1 = CreateDenseArray<int64_t>({3, 4, 5});
auto frame_iterator_or_status = FrameIterator::Create(
{TypedRef::FromValue(arr_f1), TypedRef::FromValue(arr_i1)}, scalar_slots,
{}, {}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("input arrays have different sizes")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/array_like/frame_iter.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/array_like/frame_iter_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
c5c247b6-696e-48cf-bbe8-95daf7a893d3 | cpp | tensorflow/tensorflow | gemm_rewriter | third_party/xla/xla/service/gpu/transforms/gemm_rewriter.cc | third_party/xla/xla/service/gpu/transforms/gemm_rewriter_test.cc | #include "xla/service/gpu/transforms/gemm_rewriter.h"
#include <algorithm>
#include <array>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_blas_lt.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = match;
absl::Status SetName(HloModule *module, HloInstruction *gemm) {
if (IsCublasLtMatmul(*gemm)) {
module->SetAndUniquifyInstrName(gemm, "cublas-lt-matmul");
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
gemm->backend_config<GpuBackendConfig>());
const GemmBackendConfig &config = gpu_config.gemm_backend_config();
const DotDimensionNumbers &dot_dims = config.dot_dimension_numbers();
bool is_batch_dot = !dot_dims.lhs_batch_dimensions().empty() ||
!dot_dims.rhs_batch_dimensions().empty();
module->SetAndUniquifyInstrName(
gemm, is_batch_dot ? "cublas-batch-gemm" : "cublas-gemm");
return absl::OkStatus();
}
bool SupportsEpilogueFusion(PrimitiveType type) {
switch (type) {
case F8E4M3FN:
case F8E5M2:
case F16:
case BF16:
case F32:
case F64:
return true;
default:
return false;
}
}
bool IsF8Type(const HloInstruction *instr) {
return primitive_util::IsF8Type(instr->shape().element_type());
}
Shape PadShapeToMultipleOf16(const Shape old_shape,
const absl::Span<const int64_t> batch_dims) {
Shape padded_shape = old_shape;
for (int i = 0; i < old_shape.rank(); ++i) {
if (!absl::c_linear_search(batch_dims, i)) {
int64_t padded_dimension =
RoundUpTo<int64_t>(old_shape.dimensions(i), 16);
padded_shape.set_dimensions(i, padded_dimension);
}
}
return padded_shape;
}
HloInstruction *PadOperandToTargetShape(const Shape &target,
HloInstruction *x) {
if (ShapeUtil::Equal(target, x->shape()) ||
!ShapeUtil::SameElementType(x->shape(), target)) {
return x;
}
PaddingConfig padding_config;
for (int i = 0; i < x->shape().rank(); ++i) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(0);
dimension->set_edge_padding_high(target.dimensions(i) -
x->shape().dimensions(i));
dimension->set_interior_padding(0);
}
HloInstruction *zero = x->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(x->shape().element_type())));
return x->AddInstruction(
HloInstruction::CreatePad(target, x, zero, padding_config));
}
HloInstruction *PadOperandToMultipleOf16(absl::Span<const int64_t> batch_dims,
HloInstruction *x) {
Shape padded_shape = PadShapeToMultipleOf16(x->shape(), batch_dims);
return PadOperandToTargetShape(padded_shape, x);
}
absl::StatusOr<HloInstruction *> InvertAndConvertScalar(HloInstruction *scalar,
bool invert) {
DCHECK(ShapeUtil::IsScalar(scalar->shape()));
if (invert) {
Literal one_literal = LiteralUtil::One(scalar->shape().element_type());
HloInstruction *one = scalar->parent()->AddInstruction(
HloInstruction::CreateConstant(one_literal.Clone()));
TF_ASSIGN_OR_RETURN(scalar, MakeBinaryHlo(HloOpcode::kDivide, one, scalar,
&scalar->metadata()));
}
if (scalar->shape().element_type() != F32) {
scalar = MakeConvertToHlo(scalar, F32, &scalar->metadata());
}
return scalar;
}
using InstrPath = std::vector<std::pair<HloInstruction *, int>>;
std::optional<InstrPath> FindF8SubgraphRecursive(
HloInstruction *instr, absl::flat_hash_set<int> &visited_instrs) {
if (!visited_instrs.emplace(instr->unique_id()).second) {
return std::nullopt;
}
if (IsF8Type(instr)) {
return InstrPath{{instr, -1}};
}
if (instr->operand_count() == 1 || instr->opcode() == HloOpcode::kDivide ||
instr->opcode() == HloOpcode::kDynamicSlice ||
instr->opcode() == HloOpcode::kPad) {
std::optional<InstrPath> subgraph =
FindF8SubgraphRecursive(instr->mutable_operand(0), visited_instrs);
if (subgraph) {
subgraph->emplace_back(std::make_pair(instr, 0));
}
return subgraph;
} else if (instr->opcode() == HloOpcode::kMultiply ||
instr->opcode() == HloOpcode::kSelect) {
for (int k = 0; k < 2; ++k) {
int operand_idx = k + (instr->opcode() == HloOpcode::kSelect);
std::optional<InstrPath> subgraph = FindF8SubgraphRecursive(
instr->mutable_operand(operand_idx), visited_instrs);
if (subgraph) {
subgraph->emplace_back(std::make_pair(instr, operand_idx));
return subgraph;
}
}
}
return std::nullopt;
}
struct MatchedFp8Param {
HloInstruction *fp8_input = nullptr;
HloInstruction *scale = nullptr;
bool mult_scale = false;
InstrPath commutative_ops;
};
std::optional<MatchedFp8Param> MatchFp8Param(HloInstruction *instr) {
absl::flat_hash_set<int> visited_instrs;
std::optional<InstrPath> maybe_subgraph =
FindF8SubgraphRecursive(instr, visited_instrs);
if (!maybe_subgraph) {
return std::nullopt;
}
InstrPath &subgraph = maybe_subgraph.value();
MatchedFp8Param param;
if (subgraph.size() == 1) {
CHECK(IsF8Type(subgraph[0].first));
param.fp8_input = subgraph[0].first;
return param;
}
int num_dequant_ops;
if (subgraph.size() > 2 &&
Match(subgraph[2].first,
m::MultiplyAnyOrder(m::Convert(m::Op(¶m.fp8_input)),
m::Broadcast(m::Op(¶m.scale))))) {
param.mult_scale = true;
num_dequant_ops = 2;
} else if (subgraph.size() > 2 &&
Match(subgraph[2].first,
m::Divide(m::Convert(m::Op(¶m.fp8_input)),
m::Broadcast(m::Op(¶m.scale))))) {
param.mult_scale = false;
num_dequant_ops = 2;
} else if (subgraph.size() > 1 &&
Match(subgraph[1].first, m::Convert(m::Op(¶m.fp8_input)))) {
param.scale = nullptr;
num_dequant_ops = 1;
} else {
VLOG(1) << "Possible intended FP8 GEMM operating on "
<< instr->ToShortString() << " not rewritten into FP8 Custom Call.";
return std::nullopt;
}
auto preserves_element_type = [](const HloInstruction *instr) -> bool {
return ShapeUtil::SameElementType(instr->shape(),
instr->operand(0)->shape());
};
auto use_spmd_partitioning = [](const HloInstruction *instr) -> bool {
return instr->GetModule()->config().use_spmd_partitioning();
};
int start = 1 + num_dequant_ops;
for (int i = start; i < subgraph.size(); ++i) {
if (!Match(
subgraph[i].first,
m::AnyOf<HloInstruction>(
m::Bitcast().WithPredicate(preserves_element_type),
m::Broadcast(), m::Copy(), m::DynamicSlice(), m::Pad(),
m::Reshape(), m::Select(), m::Slice(), m::Transpose(),
m::AllGather().WithPredicate(use_spmd_partitioning),
m::AllToAll().WithPredicate(use_spmd_partitioning),
m::CollectivePermute().WithPredicate(use_spmd_partitioning)))) {
VLOG(1) << "Possible intended FP8 GEMM operating on "
<< instr->ToShortString()
<< " not rewritten into FP8 Custom Call.";
return std::nullopt;
}
if (Match(subgraph[i].first, m::Select()) &&
!Match(subgraph[i].first->operand(subgraph[i].second == 2 ? 1 : 2),
m::Broadcast(m::ConstantScalar(0)))) {
VLOG(1) << "Possible intended FP8 GEMM operating on "
<< instr->ToShortString()
<< " not rewritten into FP8 Custom Call. Select requires a zero "
"operand to be exchanged with dequantization.";
return std::nullopt;
}
}
param.commutative_ops = {subgraph.begin() + start, subgraph.end()};
return param;
}
HloInstruction *TransposeMatrix(HloInstruction *instr, int64_t contracting_dim,
absl::Span<const int64_t> batch_dims) {
auto input_shape = instr->shape();
std::vector<int64_t> permutation(input_shape.dimensions_size(), -1);
for (int64_t batch_dim : batch_dims) {
permutation[batch_dim] = batch_dim;
}
int non_contracting_dim;
for (int i = 0; i < input_shape.dimensions_size(); ++i) {
if (permutation[i] == -1 && contracting_dim != i) {
non_contracting_dim = i;
}
}
if (Layout::Equal()(input_shape.layout(),
LayoutUtil::GetDefaultLayoutForShape(input_shape))) {
permutation[non_contracting_dim] = contracting_dim;
permutation[contracting_dim] = non_contracting_dim;
Shape new_shape = ShapeUtil::PermuteDimensions(permutation, input_shape);
*new_shape.mutable_layout() = input_shape.layout();
return instr->AddInstruction(
HloInstruction::CreateTranspose(new_shape, instr, permutation));
}
Shape normalized_input_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
input_shape);
auto a0 = MakeBitcastHlo(instr, normalized_input_shape);
std::vector<int64_t> layout_permuation(
input_shape.layout().minor_to_major().begin(),
input_shape.layout().minor_to_major().end());
absl::c_reverse(layout_permuation);
auto inv_perm = InversePermutation(layout_permuation);
int new_contracting_dim = inv_perm[contracting_dim];
int new_non_contracting_dim = inv_perm[non_contracting_dim];
absl::c_iota(permutation, 0);
std::swap(permutation[new_contracting_dim],
permutation[new_non_contracting_dim]);
Shape transpose_shape =
ShapeUtil::PermuteDimensions(permutation, a0->shape());
*transpose_shape.mutable_layout() = a0->shape().layout();
HloInstruction *normalized_transpose = instr->AddInstruction(
HloInstruction::CreateTranspose(transpose_shape, a0, permutation));
Shape final_shape = ShapeUtil::PermuteDimensions(inv_perm, transpose_shape);
*final_shape.mutable_layout() = input_shape.layout();
return MakeBitcastHlo(normalized_transpose, final_shape);
}
HloInstruction *MaybeConstantFoldBias(HloInstruction *bias) {
constexpr int kMaxMaterializeBiasBytes = 8 * 1024 * 1024;
auto is_nonscalar = [](const HloInstruction *instr) {
return !ShapeUtil::IsEffectiveScalar(instr->shape());
};
auto broadcast_of_nonscalar =
m::Broadcast(m::Constant().WithPredicate(is_nonscalar));
if (ShapeUtil::ByteSizeOf(bias->shape()) <= kMaxMaterializeBiasBytes &&
(Match(bias, broadcast_of_nonscalar) ||
Match(bias, m::Reshape(broadcast_of_nonscalar)) ||
Match(bias, m::Transpose(broadcast_of_nonscalar)) ||
Match(bias, m::Bitcast(broadcast_of_nonscalar)))) {
HloEvaluator evaluator(0);
Literal result;
if (evaluator.TryEvaluate(
bias, &result,
true)) {
return bias->parent()->AddInstruction(
HloInstruction::CreateConstant(std::move(result)));
}
}
return bias;
}
auto Gemm(HloInstruction **instr) {
return m::CustomCall(instr, {kGemmCallTarget});
}
auto CublasLtMatmul(HloInstruction **instr) {
return m::CustomCall(instr, {kCublasLtMatmulCallTarget});
}
auto CublasLtMatmulF8(HloInstruction **instr) {
return m::CustomCall(instr, {kCublasLtMatmulF8CallTarget});
}
auto CublasLtMatmulMaybeF8(HloInstruction **instr) {
return m::CustomCall(
instr, {kCublasLtMatmulCallTarget, kCublasLtMatmulF8CallTarget});
}
auto GemmOrCublasLtMatmul(HloInstruction **instr) {
return m::CustomCall(instr, {kGemmCallTarget, kCublasLtMatmulCallTarget});
}
auto GemmOrCublasLtMatmulMaybeF8(HloInstruction **instr) {
return m::CustomCall(instr, {kGemmCallTarget, kCublasLtMatmulCallTarget,
kCublasLtMatmulF8CallTarget});
}
auto BcastConstScalar(HloInstruction **instr, double value) {
return m::Broadcast(instr, m::ConstantScalar(value));
}
auto BcastConstScalar(double value) { return BcastConstScalar(nullptr, value); }
auto BcastConstScalarNear(double value) {
return m::Broadcast(m::ConstantScalar().WithPredicate(
[expected = value](const HloInstruction *instr) {
std::optional<double> actual =
xla::Cast<const HloConstantInstruction>(instr)
->literal()
.GetAsDouble({});
if (!actual.has_value()) return false;
double epsilon;
switch (instr->shape().element_type()) {
case F16:
epsilon = 128 * std::numeric_limits<Eigen::half>::epsilon();
break;
case BF16:
epsilon = 128 * std::numeric_limits<bfloat16>::epsilon();
break;
case F32:
epsilon = 128 * std::numeric_limits<float>::epsilon();
break;
case F64:
epsilon = 128 * std::numeric_limits<double>::epsilon();
break;
default:
return false;
}
return abs(*actual - expected) < (abs(*actual + expected) * epsilon);
}));
}
template <typename Pattern>
auto OptionalSlice(HloInstruction **optional_slice, Pattern pattern) {
return m::AnyOf<HloInstruction>(m::Slice(optional_slice, pattern),
std::move(pattern));
}
template <typename Pattern>
auto OptionalConvert(HloInstruction **optional_convert, Pattern pattern) {
return m::AnyOf<HloInstruction>(m::Convert(optional_convert, pattern),
std::move(pattern));
}
template <typename Pattern>
auto OptionalBitcast(HloInstruction **optional_bitcast, Pattern pattern) {
return m::AnyOf<HloInstruction>(m::Bitcast(optional_bitcast, pattern),
std::move(pattern));
}
class GemmRewriterVisitor : public DfsHloRewriteVisitor {
public:
explicit GemmRewriterVisitor(const se::GpuComputeCapability &gpu_version,
se::SemanticVersion toolkit_version,
const GemmRewriterOptions options)
: gpu_version_(gpu_version),
toolkit_version_(toolkit_version),
options_(options) {}
absl::Status HandleDot(HloInstruction *instr) override {
if (!IsMatrixMultiplication(*instr) &&
!IsMatrixVectorMultiplication(*instr)) {
return absl::OkStatus();
}
if (Cast<HloDotInstruction>(instr)->sparse_operands()) {
return absl::OkStatus();
}
int64_t gemm_rewrite_size_threshold =
instr->GetModule()
->config()
.debug_options()
.xla_gpu_gemm_rewrite_size_threshold();
TF_ASSIGN_OR_RETURN(bool is_matmul_tiny,
IsMatrixMultiplicationTooSmallForRewriting(
*instr, gemm_rewrite_size_threshold));
if (is_matmul_tiny && IsDotSupportedByClassicalEmitters(*instr)) {
return absl::OkStatus();
}
CHECK(!instr->IsRank2Transpose());
if (instr->operand(0)->IsRank2Transpose() ||
instr->operand(1)->IsRank2Transpose()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_backend_config,
instr->backend_config<GpuBackendConfig>());
GemmBackendConfig &gemm_backend_config =
*gpu_backend_config.mutable_gemm_backend_config();
gemm_backend_config.set_alpha_real(1.0);
gemm_backend_config.set_alpha_imag(0.0);
gemm_backend_config.set_beta(0.0);
*gemm_backend_config.mutable_dot_dimension_numbers() =
instr->dot_dimension_numbers();
*gemm_backend_config.mutable_precision_config() = instr->precision_config();
HloInstruction *lhs = instr->mutable_operand(0);
HloInstruction *rhs = instr->mutable_operand(1);
auto attributes = instr->frontend_attributes().map();
gemm_backend_config.set_grad_x(attributes["grad_x"] == "true");
gemm_backend_config.set_grad_y(attributes["grad_y"] == "true");
int64_t lhs_batch_dims_size =
instr->dot_dimension_numbers().lhs_batch_dimensions_size();
bool is_lhs_vector =
lhs->shape().dimensions_size() == lhs_batch_dims_size + 1;
bool is_rhs_vector =
rhs->shape().dimensions_size() == lhs_batch_dims_size + 1;
int64_t lhs_stride =
is_lhs_vector ? lhs->shape().dimensions(lhs_batch_dims_size)
: lhs->shape().dimensions(lhs_batch_dims_size) *
lhs->shape().dimensions(lhs_batch_dims_size + 1);
int64_t rhs_stride =
is_rhs_vector ? rhs->shape().dimensions(lhs_batch_dims_size)
: rhs->shape().dimensions(lhs_batch_dims_size) *
rhs->shape().dimensions(lhs_batch_dims_size + 1);
gemm_backend_config.set_lhs_stride(lhs_stride);
gemm_backend_config.set_rhs_stride(rhs_stride);
switch (options_.dtype) {
case GemmRewriterOptions::DType::kFp8Only: {
TF_ASSIGN_OR_RETURN(
bool supported_by_cublaslt,
GemmIsSupportedByCublasLt(*instr, gemm_backend_config));
std::optional<MatchedFp8Param> a, b;
if (supported_by_cublaslt && instr->opcode() == HloOpcode::kDot &&
(a = MatchFp8Param(
const_cast<HloInstruction *>(instr->operand(0)))) &&
(b = MatchFp8Param(
const_cast<HloInstruction *>(instr->operand(1))))) {
if (IsRocm(gpu_version_) &&
toolkit_version_ < stream_executor::SemanticVersion{6, 2, 0} &&
instr->shape().element_type() != F16 &&
instr->shape().element_type() != F32) {
TF_ASSIGN_OR_RETURN(
instr, TurnF8DotWithUnsupportedOutputTypeIntoF32(instr));
}
TF_ASSIGN_OR_RETURN(bool created_call,
CreateF8CustomCall(instr, gpu_backend_config,
a.value(), b.value()));
if (created_call) {
return absl::OkStatus();
}
}
if (IsF8Type(instr->operand(0))) {
TF_ASSIGN_OR_RETURN(instr, TurnF8DotIntoF16Dot(instr));
}
break;
}
case GemmRewriterOptions::DType::kNonFp8Only: {
TF_ASSIGN_OR_RETURN(
absl::string_view gemm_custom_call_target,
GetNonFp8GemmCustomCallTarget(*instr, gemm_backend_config));
const Shape &output_shape = instr->shape();
HloInstruction *gemm_call =
instr->AddInstruction(HloInstruction::CreateCustomCall(
output_shape,
{instr->mutable_operand(0), instr->mutable_operand(1)},
gemm_custom_call_target));
TF_RETURN_IF_ERROR(gemm_call->set_backend_config(gpu_backend_config));
TF_RETURN_IF_ERROR(ReplaceInstruction(instr, gemm_call));
} break;
};
return absl::OkStatus();
}
absl::Status HandleMultiply(HloInstruction *instr) override {
HloInstruction *alpha, *existing_gemm;
if (Match(instr,
m::MultiplyAnyOrder(
GemmOrCublasLtMatmulMaybeF8(&existing_gemm).WithOneUser(),
m::Broadcast(m::ConstantScalar(&alpha)).WithOneUser()))) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
existing_gemm->backend_config<GpuBackendConfig>());
GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config();
if (existing_gemm->shape().element_type() == S32) {
return absl::OkStatus();
}
if (config.beta() == 0.0 && existing_gemm->user_count() == 1) {
complex128 prev_alpha = {config.alpha_real(), config.alpha_imag()};
complex128 new_alpha =
*alpha->literal().GetAsComplex128({}) * prev_alpha;
config.set_alpha_real(new_alpha.real());
config.set_alpha_imag(new_alpha.imag());
TF_RETURN_IF_ERROR(existing_gemm->set_backend_config(gpu_config));
return ReplaceInstruction(instr, existing_gemm);
}
}
HloInstruction *d_scale;
if (Match(instr, m::MultiplyAnyOrder(
CublasLtMatmulF8(&existing_gemm).WithOneUser(),
m::Broadcast(m::Op(&d_scale)).WithOneUser()))) {
return F8ScaleD(instr, existing_gemm, d_scale);
}
HloInstruction *cdf, *slice_or_bitcast = nullptr;
if (Match(instr, m::MultiplyAnyOrder(
m::AnyOf<HloInstruction>(
m::Slice(&slice_or_bitcast,
CublasLtMatmulMaybeF8(&existing_gemm)),
m::Bitcast(&slice_or_bitcast,
CublasLtMatmulMaybeF8(&existing_gemm)),
CublasLtMatmulMaybeF8(&existing_gemm)),
m::Op(&cdf).WithOneUser())) &&
Match(cdf,
m::MultiplyAnyOrder(
BcastConstScalar(0.5),
m::AddAnyOrder(
BcastConstScalar(1.0),
m::Tanh(
m::MultiplyAnyOrder(
BcastConstScalarNear(sqrt(M_2_PI)),
m::AddAnyOrder(
m::Op().Is(slice_or_bitcast ? slice_or_bitcast
: existing_gemm),
m::MultiplyAnyOrder(
BcastConstScalarNear(0.044715),
m::MultiplyAnyOrder(
m::Op().Is(slice_or_bitcast
? slice_or_bitcast
: existing_gemm),
m::MultiplyAnyOrder(
m::Op().Is(slice_or_bitcast
? slice_or_bitcast
: existing_gemm),
m::Op().Is(slice_or_bitcast
? slice_or_bitcast
: existing_gemm))
.WithOneUser())
.WithOneUser())
.WithOneUser())
.WithOneUser())
.WithOneUser())
.WithOneUser())))) {
return FuseGeluActivation(instr, existing_gemm, slice_or_bitcast);
}
return absl::OkStatus();
}
absl::Status HandleDivide(HloInstruction *instr) override {
HloInstruction *existing_gemm, *d_scale;
if (Match(instr, m::Divide(CublasLtMatmulF8(&existing_gemm).WithOneUser(),
m::Broadcast(m::Op(&d_scale)).WithOneUser()))) {
return F8ScaleD(instr, existing_gemm, d_scale);
}
return absl::OkStatus();
}
absl::Status HandleAdd(HloInstruction *instr) override {
if (options_.bias_mode == GemmRewriterOptions::BiasMode::kNoBias) {
return absl::OkStatus();
}
HloInstruction *bias, *existing_gemm = nullptr;
HloInstruction *optional_slice = nullptr;
HloInstruction *optional_convert = nullptr;
HloInstruction *optional_bitcast = nullptr;
if (Match(instr,
m::AddAnyOrder(
OptionalBitcast(
&optional_bitcast,
OptionalSlice(
&optional_slice,
CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser())
.WithOneUser())
.WithOneUser(),
m::Broadcast(&bias,
OptionalConvert(&optional_convert, m::Op()))))) {
TF_ASSIGN_OR_RETURN(
bool was_fused,
FuseVectorBiasAdd(instr, bias, existing_gemm, optional_slice,
optional_convert, optional_bitcast));
if (was_fused) {
return absl::OkStatus();
}
}
if (Match(
instr,
m::AddAnyOrder(
m::Bitcast(CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser())
.WithOneUser(),
m::Broadcast(&bias, m::Op()).WithOneUser()))) {
TF_ASSIGN_OR_RETURN(
HloInstruction * new_add,
MakeBinaryHlo(HloOpcode::kAdd, existing_gemm,
MakeBitcastHlo(bias, existing_gemm->shape())));
TF_RETURN_IF_ERROR(
ReplaceInstruction(instr, MakeBitcastHlo(new_add, instr->shape())));
instr = new_add;
}
auto is_not_broadcast = [](const HloInstruction *instr) {
return instr->opcode() != HloOpcode::kBroadcast;
};
if (Match(instr,
m::AddAnyOrder(
m::Bitcast(
GemmOrCublasLtMatmulMaybeF8(&existing_gemm).WithOneUser())
.WithOneUser(),
m::Op(&bias).WithPredicate(is_not_broadcast)))) {
HloInstruction *new_bitcast =
MakeBitcastHlo(bias, existing_gemm->shape(), &bias->metadata());
TF_ASSIGN_OR_RETURN(HloInstruction * new_add,
MakeBinaryHlo(HloOpcode::kAdd, existing_gemm,
new_bitcast, &bias->metadata()));
TF_RETURN_IF_ERROR(
ReplaceInstruction(instr, MakeBitcastHlo(new_add, instr->shape())));
instr = new_add;
}
if (Match(instr,
m::AddAnyOrder(
m::AnyOf<HloInstruction>(
GemmOrCublasLtMatmul(&existing_gemm).WithOneUser(),
m::Convert(
GemmOrCublasLtMatmul(&existing_gemm).WithOneUser())
.WithOneUser()),
m::Op(&bias).WithPredicate(is_not_broadcast)))) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_backend_config,
existing_gemm->backend_config<GpuBackendConfig>());
const GemmBackendConfig &gemm_backend_config =
gpu_backend_config.gemm_backend_config();
TF_ASSIGN_OR_RETURN(
bool types_are_supported,
IsLegacyCublasMatmul(*existing_gemm)
? TypesAreSupportedByLegacyCublas(*existing_gemm,
gemm_backend_config, instr)
: TypesAreSupportedByCublasLt(*existing_gemm, gemm_backend_config,
instr));
bool has_no_consumer =
instr->shape().element_type() ==
existing_gemm->shape().element_type() ||
instr->user_count() == 0 ||
(instr->user_count() == 1 &&
instr->users()[0]->opcode() == HloOpcode::kTuple &&
instr->users()[0]->user_count() == 0);
if (types_are_supported && has_no_consumer) {
return FuseMatrixBiasAdd(instr, bias, existing_gemm);
}
}
HloInstruction *optional_bitcast_matrix = nullptr;
HloInstruction *optional_slice_matrix = nullptr;
if (Match(instr,
m::AddAnyOrder(
OptionalBitcast(
&optional_bitcast_matrix,
OptionalSlice(&optional_slice_matrix,
GemmOrCublasLtMatmulMaybeF8(&existing_gemm)
.WithOneUser()))
.WithOneUser(),
m::Op(&bias).WithPredicate(is_not_broadcast)))) {
if (!IsF8Type(bias)) {
return FuseMatrixBiasAdd(instr, bias, existing_gemm,
optional_bitcast_matrix,
optional_slice_matrix);
}
}
return absl::OkStatus();
}
absl::Status HandleMaximum(HloInstruction *instr) override {
HloInstruction *existing_gemm, *zeros;
HloInstruction *optional_slice_or_bitcast = nullptr;
if (Match(instr,
m::MaximumAnyOrder(
m::AnyOf<HloInstruction>(
m::Slice(
&optional_slice_or_bitcast,
CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser()),
m::Bitcast(
&optional_slice_or_bitcast,
CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser()),
CublasLtMatmulMaybeF8(&existing_gemm))
.WithOneUser(),
m::Broadcast(&zeros, m::ConstantScalar(0))))) {
TF_RETURN_IF_ERROR(FuseReluActivation(instr, zeros, existing_gemm,
optional_slice_or_bitcast));
}
return absl::OkStatus();
}
absl::Status HandleConvert(HloInstruction *instr) override {
HloInstruction *clamp_lower, *clamp_upper, *existing_gemm,
*d_scale = nullptr, *binary = nullptr;
if (Match(instr,
m::Convert(
m::Clamp(
m::Broadcast(m::ConstantScalar(&clamp_lower)),
m::AnyOf<HloInstruction>(
CublasLtMatmulF8(&existing_gemm),
m::Divide(&binary, CublasLtMatmulF8(&existing_gemm),
m::Broadcast(m::Op(&d_scale))),
m::MultiplyAnyOrder(&binary,
CublasLtMatmulF8(&existing_gemm),
m::Broadcast(m::Op(&d_scale)))),
m::Broadcast(m::ConstantScalar(&clamp_upper)))
.WithOneUser()))) {
return F8ConvertD(
instr, existing_gemm, d_scale, clamp_lower, clamp_upper,
(binary && binary->opcode() == HloOpcode::kMultiply));
}
return absl::OkStatus();
}
static bool IsCuda(const se::GpuComputeCapability &gpu_version) {
return std::holds_alternative<se::CudaComputeCapability>(gpu_version);
}
static absl::StatusOr<se::CudaComputeCapability> GetCudaComputeCapability(
const se::GpuComputeCapability &gpu_version) {
auto *cuda_cc = std::get_if<se::CudaComputeCapability>(&gpu_version);
if (cuda_cc == nullptr) {
return absl::InvalidArgumentError("Compute Capability is not CUDA.");
}
return *cuda_cc;
}
static bool IsRocm(const se::GpuComputeCapability &gpu_version) {
return std::holds_alternative<se::RocmComputeCapability>(gpu_version);
}
static absl::StatusOr<se::RocmComputeCapability> GetRocmComputeCapability(
const se::GpuComputeCapability &gpu_version) {
auto rocm_cc = std::get_if<se::RocmComputeCapability>(&gpu_version);
if (rocm_cc == nullptr) {
return absl::InvalidArgumentError("Compute Capability is not ROCm.");
}
return *rocm_cc;
}
absl::StatusOr<bool> CreateF8CustomCall(HloInstruction *instr,
GpuBackendConfig &gpu_backend_config,
MatchedFp8Param a,
MatchedFp8Param b) {
GemmBackendConfig &gemm_backend_config =
*gpu_backend_config.mutable_gemm_backend_config();
if (IsCuda(gpu_version_)) {
TF_ASSIGN_OR_RETURN(auto cuda_compute_capability,
GetCudaComputeCapability(gpu_version_));
if (!cuda_compute_capability.IsAtLeast(8, 9)) {
VLOG(1) << "FP8 Custom Calls require Ada, Hopper, or later "
"architectures. Got: "
<< cuda_compute_capability.ToString()
<< " and toolkit version: " << toolkit_version_;
return false;
}
if (toolkit_version_ < stream_executor::SemanticVersion{12, 0, 0}) {
VLOG(1) << "FP8 Custom Calls require CUDA 12.0 or newer.";
return false;
}
}
if (IsRocm(gpu_version_)) {
TF_ASSIGN_OR_RETURN(auto rocm_compute_capability,
GetRocmComputeCapability(gpu_version_));
if (!rocm_compute_capability.has_fp8_support()) {
VLOG(1) << "FP8 Custom Calls require MI300, or later architectures.";
return false;
}
if (toolkit_version_ < stream_executor::SemanticVersion{6, 0, 0}) {
VLOG(1) << "FP8 Custom Calls require ROCm 6.0 or newer.";
return false;
}
}
PrimitiveType a_type = a.fp8_input->shape().element_type();
PrimitiveType b_type = b.fp8_input->shape().element_type();
if (IsCuda(gpu_version_)) {
if (a_type == F8E5M2 && b_type == F8E5M2) {
VLOG(1)
<< "Failed to rewrite " << instr->ToShortString()
<< " into FP8 Custom Call. The element type of one of the operands "
"must be F8E4M3FN.";
return false;
}
if ((a_type != F8E5M2 && a_type != F8E4M3FN) ||
(b_type != F8E5M2 && b_type != F8E4M3FN)) {
VLOG(1) << "Failed to rewrite " << instr->ToShortString()
<< " into FP8 Custom Call. The input types must be F8E5M2 or "
"F8E4M3FN, but got "
<< PrimitiveType_Name(a_type) << " and "
<< PrimitiveType_Name(b_type);
return false;
}
}
if (IsRocm(gpu_version_)) {
if (a_type == F8E5M2FNUZ && b_type == F8E5M2FNUZ) {
VLOG(1)
<< "Failed to rewrite " << instr->ToShortString()
<< " into FP8 Custom Call. The element type of one of the operands "
"must be F8E4M3FNUZ.";
return false;
}
if ((a_type != F8E5M2FNUZ && a_type != F8E4M3FNUZ) ||
(b_type != F8E5M2FNUZ && b_type != F8E4M3FNUZ)) {
VLOG(1)
<< "Failed to rewrite " << instr->ToShortString()
<< " into FP8 Custom Call. The input types must be F8E5M2FNUZ or "
"F8E4M3FNUZ, but got "
<< PrimitiveType_Name(a_type) << " and "
<< PrimitiveType_Name(b_type);
return false;
}
}
absl::Span<const int64_t> a_batch_dims =
gemm_backend_config.dot_dimension_numbers().lhs_batch_dimensions();
absl::Span<const int64_t> b_batch_dims =
gemm_backend_config.dot_dimension_numbers().rhs_batch_dimensions();
const size_t num_batch_dims = a_batch_dims.size();
std::array<bool, 2> mult_scale{a.mult_scale, b.mult_scale};
std::array<HloInstruction *, 2> scales{a.scale, b.scale}, inv_scales,
scales_f32;
HloInstruction *one_constant = nullptr;
auto one = [&one_constant, instr]() -> HloInstruction * {
if (!one_constant) {
one_constant = instr->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(F32)));
}
return one_constant;
};
for (int i = 0; i < scales.size(); ++i) {
if (scales[i]) {
if (!ShapeUtil::IsScalar(scales[i]->shape())) {
VLOG(1) << "Failed to rewrite " << instr->ToShortString()
<< " into FP8 Custom Call. The scaling factors must be "
"scalars.";
return false;
}
if (!mult_scale[i]) {
inv_scales[i] = instr->AddInstruction(HloInstruction::CreateBinary(
scales[i]->shape(), HloOpcode::kDivide, one(), scales[i]));
}
scales_f32[i] = mult_scale[i] ? scales[i] : inv_scales[i];
if (scales_f32[i]->shape().element_type() != F32) {
scales_f32[i] = instr->AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeScalarShape(F32), scales_f32[i]));
}
} else {
scales_f32[i] = one();
}
}
PrimitiveType d_type = instr->shape().element_type();
bool supported_d_type = (d_type == BF16 || d_type == F16 || d_type == F32);
if (IsCuda(gpu_version_) && (d_type == F8E4M3FN || d_type == F8E5M2)) {
supported_d_type = true;
}
if (IsRocm(gpu_version_) &&
toolkit_version_ >= stream_executor::SemanticVersion{6, 2, 0} &&
(d_type == F8E4M3FNUZ || d_type == F8E5M2FNUZ)) {
supported_d_type = true;
}
if (!supported_d_type) {
VLOG(1) << "Failed to rewrite " << instr->ToShortString()
<< " into FP8 Custom Call. Output element type must be "
<< (IsCuda(gpu_version_) ? "F8E4M3FN, F8E5M2, BF16, F16 or F32. "
: toolkit_version_ >=
stream_executor::SemanticVersion{6, 2, 0}
? "F8E4M3FNUZ, F8E5M2FNUZ, BF16, F16 or F32. "
: "BF16, F16 or F32. ")
<< "Actual element type is " << PrimitiveType_Name(d_type);
return false;
}
absl::Span<const int64_t> a_contracting_dims =
gemm_backend_config.dot_dimension_numbers()
.lhs_contracting_dimensions();
absl::Span<const int64_t> b_contracting_dims =
gemm_backend_config.dot_dimension_numbers()
.rhs_contracting_dimensions();
if (a_contracting_dims.size() != 1 || b_contracting_dims.size() != 1) {
VLOG(1) << "Failed to rewrite " << instr->ToShortString()
<< " into FP8 Custom Call. A and B must have one contracting "
"dimension.";
return false;
}
for (const MatchedFp8Param ¶m : {a, b}) {
const HloInstruction *input = param.commutative_ops.empty()
? param.fp8_input
: param.commutative_ops.back().first;
if (input->shape().rank() != num_batch_dims + 2) {
VLOG(1) << "Failed to rewrite " << instr->ToShortString()
<< "into FP8 Custom Call. Inputs must have exactly one "
"contracting and one non-contracting dimension.";
return false;
}
}
auto shift_ops = [&instr](HloInstruction *&x, InstrPath &x_ops) -> void {
for (std::pair<HloInstruction *, int> op : x_ops) {
std::vector<HloInstruction *> operands = {x};
if (op.first->opcode() == HloOpcode::kDynamicSlice) {
for (int i = 1; i < op.first->operand_count(); ++i) {
operands.emplace_back(op.first->mutable_operand(i));
}
}
if (op.first->opcode() == HloOpcode::kPad) {
HloInstruction *convert =
instr->AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(op.first->operand(1)->shape(),
x->shape().element_type()),
op.first->mutable_operand(1)));
operands.emplace_back(convert);
}
if (op.first->opcode() == HloOpcode::kSelect) {
operands.emplace(operands.begin(), op.first->mutable_operand(0));
int operand_idx = op.second == 2 ? 1 : 2;
HloInstruction *convert =
instr->AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::ChangeElementType(
op.first->operand(operand_idx)->shape(),
x->shape().element_type()),
op.first->mutable_operand(operand_idx)));
operands.emplace(operands.begin() + operand_idx, convert);
}
x = instr->AddInstruction(op.first->CloneWithNewOperands(
ShapeUtil::MakeShapeWithDenseLayout(
x->shape().element_type(), op.first->shape().dimensions(),
op.first->shape().layout().minor_to_major()),
operands));
}
return;
};
shift_ops(a.fp8_input, a.commutative_ops);
shift_ops(b.fp8_input, b.commutative_ops);
TF_ASSIGN_OR_RETURN(GemmConfig gemm_config,
GemmConfig::For(instr, gemm_backend_config));
DotDimensionNumbers *dim_nums =
gemm_backend_config.mutable_dot_dimension_numbers();
if (gemm_config.lhs_layout.order == MatrixLayout::Order::kColumnMajor) {
CHECK(a_contracting_dims[0] == num_batch_dims ||
a_contracting_dims[0] == num_batch_dims + 1);
if (a_contracting_dims[0] == num_batch_dims) {
dim_nums->set_lhs_contracting_dimensions(0, num_batch_dims + 1);
} else {
dim_nums->set_lhs_contracting_dimensions(0, num_batch_dims);
}
a.fp8_input =
TransposeMatrix(a.fp8_input, a_contracting_dims[0], a_batch_dims);
}
if (gemm_config.rhs_layout.order == MatrixLayout::Order::kRowMajor) {
CHECK(b_contracting_dims[0] == num_batch_dims ||
b_contracting_dims[0] == num_batch_dims + 1);
if (b_contracting_dims[0] == num_batch_dims) {
dim_nums->set_rhs_contracting_dimensions(0, num_batch_dims + 1);
} else {
dim_nums->set_rhs_contracting_dimensions(0, num_batch_dims);
}
b.fp8_input =
TransposeMatrix(b.fp8_input, b_contracting_dims[0], b_batch_dims);
}
a.fp8_input = PadOperandToMultipleOf16(a_batch_dims, a.fp8_input);
b.fp8_input = PadOperandToMultipleOf16(b_batch_dims, b.fp8_input);
std::vector<int64_t> out_batch_dims(num_batch_dims);
std::iota(out_batch_dims.begin(), out_batch_dims.end(), 0);
Shape new_output_shape =
PadShapeToMultipleOf16(instr->shape(), out_batch_dims);
std::vector<HloInstruction *> operands_list = {
a.fp8_input, b.fp8_input, scales_f32[0], scales_f32[1]};
HloInstruction *new_custom_call =
instr->AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeShapeWithDenseLayout(
instr->shape().element_type(), new_output_shape.dimensions(),
instr->shape().layout().minor_to_major()),
operands_list, kCublasLtMatmulF8CallTarget));
TF_RETURN_IF_ERROR(new_custom_call->set_backend_config(gpu_backend_config));
TF_RETURN_IF_ERROR(SetName(instr->GetModule(), new_custom_call));
HloInstruction *slice = nullptr;
if (new_output_shape.dimensions() != instr->shape().dimensions()) {
std::vector<int64_t> start_indices(instr->shape().rank(), 0);
std::vector<int64_t> strides(instr->shape().rank(), 1);
slice = instr->AddInstruction(HloInstruction::CreateSlice(
instr->shape(), new_custom_call, start_indices,
instr->shape().dimensions(), strides));
}
TF_RETURN_IF_ERROR(
ReplaceInstruction(instr, slice ? slice : new_custom_call));
VLOG(1) << instr->ToString() << " rewritten into FP8 Custom Call.";
return true;
}
absl::Status F8ScaleD(HloInstruction *instr, HloInstruction *existing_gemm,
HloInstruction *d_scale) {
if (!ShapeUtil::IsScalar(d_scale->shape())) {
return absl::OkStatus();
}
if (!existing_gemm->operand(2)->IsConstant() ||
existing_gemm->operand(2)->literal().GetAsDouble({}) != 1.) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto gpu_backend_config,
existing_gemm->backend_config<GpuBackendConfig>());
const GemmBackendConfig &config = gpu_backend_config.gemm_backend_config();
if ((config.epilogue() != GemmBackendConfig::DEFAULT &&
config.epilogue() != GemmBackendConfig::RELU) ||
config.beta() != 0.) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
d_scale,
InvertAndConvertScalar(d_scale, instr->opcode() == HloOpcode::kDivide));
TF_RETURN_IF_ERROR(existing_gemm->ReplaceOperandWith(2, d_scale));
TF_RETURN_IF_ERROR(ReplaceInstruction(instr, existing_gemm));
VLOG(1) << "Scaling of FP8 GEMM fused into Custom Call.";
return absl::OkStatus();
}
absl::Status F8ConvertD(HloInstruction *instr, HloInstruction *existing_gemm,
HloInstruction *d_scale, HloInstruction *clamp_lower,
HloInstruction *clamp_upper,
bool mult_scale = false) {
if (instr->shape().element_type() == F8E4M3FN) {
if (!clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::lowest())) ||
!clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::max()))) {
return absl::OkStatus();
}
} else if (instr->shape().element_type() == F8E5M2) {
if (!clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::lowest())) ||
!clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::max()))) {
return absl::OkStatus();
}
} else {
return absl::OkStatus();
}
if (d_scale && !ShapeUtil::IsScalar(d_scale->shape())) {
return absl::OkStatus();
}
const std::vector<HloInstruction *> gemm_users = existing_gemm->users();
HloInstruction *reduce_damax = nullptr;
if (gemm_users.size() == 2) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
existing_gemm->backend_config<GpuBackendConfig>());
const GemmBackendConfig &config = gpu_config.gemm_backend_config();
for (int i = 0; i < gemm_users.size(); ++i) {
HloInstruction *maybe_reduce = nullptr;
if (gemm_users[i]->opcode() == HloOpcode::kAbs) {
if (gemm_users[i]->users().size() != 1) continue;
maybe_reduce = gemm_users[i]->users()[0];
} else {
if (config.epilogue() != GemmBackendConfig::BIAS_RELU &&
config.epilogue() != GemmBackendConfig::RELU)
continue;
maybe_reduce = gemm_users[i];
}
if (maybe_reduce->opcode() == HloOpcode::kReduce &&
maybe_reduce->operands().size() == 2 &&
maybe_reduce->operand(1)->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsScalar(maybe_reduce->operand(1)->shape())) {
HloInstruction *reduce = maybe_reduce;
HloComputation *reduce_comp = reduce->to_apply();
HloInstruction *reduce_comp_root = reduce_comp->root_instruction();
if (reduce->operand(1)->literal().GetAsDouble({}) <= 0. &&
reduce_comp_root->opcode() == HloOpcode::kMaximum &&
reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter &&
reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter) {
reduce_damax = reduce;
}
}
}
if (!reduce_damax) {
return absl::OkStatus();
}
} else if (gemm_users.size() > 2) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto gpu_backend_config,
existing_gemm->backend_config<GpuBackendConfig>());
const GemmBackendConfig &gemm_backend_config =
gpu_backend_config.gemm_backend_config();
if (gemm_backend_config.beta() != 0.0) {
if (existing_gemm->operand(2)->shape().element_type() != BF16 &&
existing_gemm->operand(2)->shape().element_type() != F16) {
VLOG(1) << "The scaling and conversion of the result of "
<< existing_gemm->ToShortString()
<< " is not fused into the FP8 Custom Call because it "
"conflicts with the existing fusion of the addition of a "
"matrix bias with element type other than BF16 or F16.";
return absl::OkStatus();
} else {
xla::Cast<HloCustomCallInstruction>(existing_gemm)
->set_output_to_operand_aliasing({});
}
}
if (d_scale) {
TF_ASSIGN_OR_RETURN(d_scale,
InvertAndConvertScalar(d_scale, !mult_scale));
} else {
d_scale = instr->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(F32)));
}
existing_gemm->AppendOperand(d_scale);
if (reduce_damax) {
return F8AddDAmax(instr, existing_gemm, reduce_damax);
}
std::unique_ptr<HloInstruction> new_gemm =
existing_gemm->CloneWithNewShape(instr->shape());
TF_RETURN_IF_ERROR(ReplaceWithNewInstruction(instr, std::move(new_gemm)));
VLOG(1) << "Conversion" << (reduce_damax ? " and amax calculation" : "")
<< " fused into FP8 GEMM.";
return absl::OkStatus();
}
absl::Status F8AddDAmax(HloInstruction *instr, HloInstruction *existing_gemm,
HloInstruction *reduce_damax) {
Shape damax_shape = ShapeUtil::MakeScalarShape(F32);
Shape tuple_shape =
ShapeUtil::MakeTupleShape({instr->shape(), damax_shape});
HloInstruction *gemm_and_damax =
instr->AddInstruction(existing_gemm->CloneWithNewShape(tuple_shape));
TF_ASSIGN_OR_RETURN(auto gpu_config,
gemm_and_damax->backend_config<GpuBackendConfig>());
GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config();
config.set_damax_output(true);
TF_RETURN_IF_ERROR(gemm_and_damax->set_backend_config(gpu_config));
HloInstruction *d =
instr->AddInstruction(HloInstruction::CreateGetTupleElement(
instr->shape(), gemm_and_damax, 0));
HloInstruction *damax = instr->AddInstruction(
HloInstruction::CreateGetTupleElement(damax_shape, gemm_and_damax, 1));
HloInstruction *damax_converted = instr->AddInstruction(
HloInstruction::CreateConvert(reduce_damax->shape(), damax));
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce_damax, damax_converted));
TF_RETURN_IF_ERROR(ReplaceInstruction(instr, d));
return absl::OkStatus();
}
absl::Status FuseMatrixBiasAdd(HloInstruction *instr, HloInstruction *bias,
const HloInstruction *gemm,
HloInstruction *bitcast = nullptr,
HloInstruction *slice = nullptr) {
TF_RET_CHECK(Shape::Equal().IgnoreElementType()(bias->shape(),
bitcast ? bitcast->shape()
: slice ? slice->shape()
: gemm->shape()));
if (gemm->shape().element_type() == S32) {
return absl::OkStatus();
}
if (slice) {
int slice_op_dim = slice->operand(0)->shape().rank();
if (slice->slice_starts() != std::vector<int64_t>(slice_op_dim, 0) ||
slice->slice_strides() != std::vector<int64_t>(slice_op_dim, 1)) {
return absl::OkStatus();
}
}
bool can_overwrite_bias = [bias]() {
if (bias->user_count() > 1) {
return false;
}
if (bias->opcode() != HloOpcode::kParameter) {
return true;
}
if (!bias->parent()->IsEntryComputation()) {
return false;
}
const auto &in_out_alias_config =
bias->GetModule()->input_output_alias_config();
return in_out_alias_config.ParameterHasAlias(bias->parameter_number(),
{});
}();
bool want_to_fuse_bias = IsCublasLtMatmulF8(*gemm) ||
IsCublasLtMatmul(*gemm) || can_overwrite_bias;
auto gpu_config = gemm->backend_config<GpuBackendConfig>().value();
GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config();
bool supported_epilogue =
((config.epilogue() == GemmBackendConfig::DEFAULT) ||
(config.epilogue() == GemmBackendConfig::BIAS));
if ((config.beta() != 0) || !want_to_fuse_bias ||
(gemm->user_count() != 1) || !supported_epilogue) {
return absl::OkStatus();
}
config.set_beta(1.0);
std::vector<HloInstruction *> operands(gemm->operands().begin(),
gemm->operands().end());
HloInstruction *maybe_constant_folded_bias = MaybeConstantFoldBias(bias);
if (bitcast) {
maybe_constant_folded_bias =
instr->AddInstruction(HloInstruction::CreateBitcast(
slice->shape(), maybe_constant_folded_bias));
}
maybe_constant_folded_bias =
PadOperandToTargetShape(gemm->shape(), maybe_constant_folded_bias);
operands.insert(operands.begin() + 2, maybe_constant_folded_bias);
std::unique_ptr<HloInstruction> fused_op =
gemm->CloneWithNewOperands(gemm->shape(), operands);
fused_op->mutable_shape()->set_element_type(bias->shape().element_type());
TF_RETURN_IF_ERROR(fused_op->set_backend_config(gpu_config));
if (IsLegacyCublasMatmul(*fused_op) || can_overwrite_bias) {
xla::Cast<HloCustomCallInstruction>(fused_op.get())
->set_output_to_operand_aliasing({{{}, {2, {}}}});
}
TF_RETURN_IF_ERROR(SetName(instr->GetModule(), fused_op.get()));
if (slice) {
fused_op = slice->CloneWithNewOperands(
slice->shape(),
{slice->parent()->AddInstruction(std::move(fused_op))});
}
if (bitcast) {
fused_op = bitcast->CloneWithNewOperands(
bitcast->shape(),
{bitcast->parent()->AddInstruction(std::move(fused_op))});
}
return ReplaceWithNewInstruction(instr, std::move(fused_op));
}
absl::StatusOr<bool> FuseVectorBiasAdd(HloInstruction *instr,
HloInstruction *broadcast,
HloInstruction *gemm,
HloInstruction *slice = nullptr,
HloInstruction *convert = nullptr,
HloInstruction *bitcast = nullptr) {
if (!bitcast) {
TF_RET_CHECK(ShapeUtil::Compatible(
broadcast->shape(), (slice ? slice->shape() : gemm->shape())));
}
if (!SupportsEpilogueFusion(gemm->shape().element_type())) {
return false;
}
HloInstruction *bias = broadcast->mutable_operand(0);
TF_ASSIGN_OR_RETURN(auto gpu_config,
gemm->backend_config<GpuBackendConfig>());
GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config();
const DotDimensionNumbers &dot_dims = config.dot_dimension_numbers();
size_t num_col_dims = gemm->operand(1)->shape().rank() -
dot_dims.rhs_batch_dimensions_size() -
dot_dims.rhs_contracting_dimensions_size();
if ((gemm->user_count() != 1) ||
(config.epilogue() != GemmBackendConfig::DEFAULT) ||
(bias->shape().rank() != num_col_dims)) {
return false;
}
absl::Span<const int64_t> broadcast_dims = broadcast->dimensions();
for (size_t i = 0; i < num_col_dims; ++i) {
int64_t dim =
(bitcast ? bitcast : gemm)->shape().layout().minor_to_major(i);
auto it = absl::c_find(broadcast_dims, dim);
if (it == broadcast_dims.end()) {
return false;
}
int64_t vector_dim = it - broadcast_dims.begin();
if (bias->shape().layout().minor_to_major(i) != vector_dim) {
return false;
}
}
std::vector<HloInstruction *> operands(gemm->operands().begin(),
gemm->operands().end());
if (gemm->custom_call_target() == kCublasLtMatmulF8CallTarget &&
config.beta() != 0.0) {
return true;
}
if (gemm->custom_call_target() == kCublasLtMatmulF8CallTarget &&
bias->shape().element_type() == F32) {
if (convert == nullptr) {
return false;
}
HloInstruction *bias_f16_or_bf16 = convert->mutable_operand(0);
auto compatible_bias_type = [](const PrimitiveType bias_type,
const PrimitiveType output_type) {
if (bias_type == BF16) {
return output_type == F8E4M3FN || output_type == F8E5M2 ||
output_type == F32 || output_type == BF16;
} else if (bias_type == F16) {
return output_type == F16 || output_type == F8E4M3FN ||
output_type == F8E5M2;
}
return false;
};
if (compatible_bias_type(bias_f16_or_bf16->shape().element_type(),
gemm->shape().element_type())) {
bias = bias_f16_or_bf16;
} else {
VLOG(1) << "Epilogue fusion of FP32 vector bias into FP8 GEMM is "
"currently not supported. See the cublasLT support matrix.";
return false;
}
}
if (gemm->custom_call_target() == kCublasLtMatmulF8CallTarget && bitcast) {
bias = PadOperandToMultipleOf16(
config.dot_dimension_numbers().rhs_batch_dimensions(), bias);
}
operands.push_back(bias);
config.set_epilogue(GemmBackendConfig::BIAS);
std::unique_ptr<HloInstruction> result =
gemm->CloneWithNewOperands(gemm->shape(), operands);
TF_RETURN_IF_ERROR(result->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(SetName(result->GetModule(), result.get()));
if (slice) {
result = slice->CloneWithNewOperands(
slice->shape(), {slice->parent()->AddInstruction(std::move(result))});
}
if (bitcast) {
result = bitcast->CloneWithNewOperands(
bitcast->shape(),
{bitcast->parent()->AddInstruction(std::move(result))});
}
TF_RETURN_IF_ERROR(ReplaceWithNewInstruction(instr, std::move(result)));
return true;
}
absl::Status FuseReluActivation(HloInstruction *instr,
HloInstruction *broadcast,
HloInstruction *gemm,
HloInstruction *slice_or_bitcast = nullptr) {
TF_RET_CHECK(ShapeUtil::Compatible(
broadcast->shape(),
(slice_or_bitcast ? slice_or_bitcast->shape() : gemm->shape())));
if (!SupportsEpilogueFusion(gemm->shape().element_type())) {
return absl::OkStatus();
}
if (gemm->user_count() != 1) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(auto gpu_config,
gemm->backend_config<GpuBackendConfig>());
GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config();
if (config.epilogue() == GemmBackendConfig::DEFAULT) {
config.set_epilogue(GemmBackendConfig::RELU);
} else if (config.epilogue() == GemmBackendConfig::BIAS) {
config.set_epilogue(GemmBackendConfig::BIAS_RELU);
} else {
return absl::OkStatus();
}
std::unique_ptr<HloInstruction> result = gemm->Clone();
TF_RETURN_IF_ERROR(result->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(SetName(result->GetModule(), result.get()));
if (slice_or_bitcast) {
result = slice_or_bitcast->CloneWithNewOperands(
slice_or_bitcast->shape(),
{slice_or_bitcast->parent()->AddInstruction(std::move(result))});
}
return ReplaceWithNewInstruction(instr, std::move(result));
}
absl::Status FuseGeluActivation(HloInstruction *multiply,
HloInstruction *gemm,
HloInstruction *slice_or_bitcast = nullptr) {
if (!SupportsEpilogueFusion(gemm->shape().element_type())) {
return absl::OkStatus();
}
if (IsCuda(gpu_version_) &&
toolkit_version_ < stream_executor::SemanticVersion{12, 4, 0} &&
IsCublasLtMatmulF8(*gemm)) {
return absl::OkStatus();
}
bool has_aux = gemm->user_count() > 4;
TF_ASSIGN_OR_RETURN(auto gpu_config,
gemm->backend_config<GpuBackendConfig>());
GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config();
if (config.epilogue() == GemmBackendConfig::DEFAULT) {
config.set_epilogue(has_aux ? GemmBackendConfig::GELU_AUX
: GemmBackendConfig::GELU);
} else if (config.epilogue() == GemmBackendConfig::BIAS) {
config.set_epilogue(has_aux ? GemmBackendConfig::BIAS_GELU_AUX
: GemmBackendConfig::BIAS_GELU);
} else {
return absl::OkStatus();
}
std::unique_ptr<HloInstruction> output = gemm->CloneWithNewShape(
has_aux ? ShapeUtil::MakeTupleShape({gemm->shape(), gemm->shape()})
: gemm->shape());
TF_RETURN_IF_ERROR(output->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(SetName(multiply->GetModule(), output.get()));
if (slice_or_bitcast) {
output = slice_or_bitcast->CloneWithNewOperands(
slice_or_bitcast->shape(),
{gemm->parent()->AddInstruction(std::move(output))});
}
if (has_aux) {
HloInstruction *tuple_output =
gemm->parent()->AddInstruction(std::move(output));
TF_RETURN_IF_ERROR(ReplaceWithNewInstruction(
gemm, HloInstruction::CreateGetTupleElement(tuple_output, 1)));
output = HloInstruction::CreateGetTupleElement(tuple_output, 0);
}
return ReplaceWithNewInstruction(multiply, std::move(output));
}
private:
se::GpuComputeCapability gpu_version_;
stream_executor::SemanticVersion toolkit_version_;
GemmRewriterOptions options_;
absl::StatusOr<absl::string_view> GetNonFp8GemmCustomCallTarget(
const HloInstruction &instr,
const GemmBackendConfig &gemm_backend_config) const {
if (!instr.GetModule()
->config()
.debug_options()
.xla_gpu_enable_cublaslt()) {
return absl::string_view(kGemmCallTarget);
}
const HloInstruction *lhs = instr.operand(0);
const HloInstruction *rhs = instr.operand(1);
if (lhs->shape().element_type() == S8 ||
rhs->shape().element_type() == S8) {
return absl::string_view(kGemmCallTarget);
}
TF_ASSIGN_OR_RETURN(bool gemm_is_supported_by_cublas_lt,
GemmIsSupportedByCublasLt(instr, gemm_backend_config));
if (gemm_is_supported_by_cublas_lt) {
return absl::string_view(kCublasLtMatmulCallTarget);
}
return absl::string_view(kGemmCallTarget);
}
absl::StatusOr<bool> TypesAreSupportedByLegacyCublas(
const HloInstruction &instr, const GemmBackendConfig &gemm_backend_config,
const HloInstruction *bias = nullptr) const {
const PrimitiveType a_dtype = instr.operand(0)->shape().element_type();
const PrimitiveType b_dtype = instr.operand(1)->shape().element_type();
const PrimitiveType output_type =
bias ? bias->shape().element_type() : instr.shape().element_type();
const std::array<PrimitiveType, 12> supported_type = {
PrimitiveType::S8, PrimitiveType::F16, PrimitiveType::BF16,
PrimitiveType::F32, PrimitiveType::S32, PrimitiveType::F64,
PrimitiveType::C64, PrimitiveType::C128};
if (!absl::c_linear_search(supported_type, output_type)) return false;
TF_ASSIGN_OR_RETURN(const se::blas::DataType output_dtype,
se::gpu::AsBlasDataType(output_type));
TF_ASSIGN_OR_RETURN(
const se::blas::ComputationType compute_type,
se::gpu::GetBlasComputationType(
instr.precision_config().algorithm(), a_dtype, output_type,
stream_executor::blas::kDefaultComputePrecision));
se::blas::DataType scale_type =
se::gpu::GetScaleType(output_dtype, compute_type);
using se::blas::ComputationType;
using se::blas::DataType;
const std::array<
std::tuple<ComputationType, DataType ,
PrimitiveType , PrimitiveType ,
DataType >,
32>
supported_type_combinations = {{
{ComputationType::kF16, DataType::kHalf, PrimitiveType::F16,
PrimitiveType::F16, DataType::kHalf},
{ComputationType::kI32, DataType::kInt32, PrimitiveType::S8,
PrimitiveType::S8, DataType::kInt32},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16,
PrimitiveType::BF16, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F16,
PrimitiveType::F16, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::S8,
PrimitiveType::S8, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16,
PrimitiveType::BF16, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F16,
PrimitiveType::F16, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
{ComputationType::kF32, DataType::kComplexFloat, PrimitiveType::C64,
PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kF16AsF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
{ComputationType::kF16AsF32, DataType::kComplexFloat,
PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kBF16AsF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
{ComputationType::kBF16AsF32, DataType::kComplexFloat,
PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kTF32AsF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
{ComputationType::kTF32AsF32, DataType::kComplexFloat,
PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kF64, DataType::kDouble, PrimitiveType::F64,
PrimitiveType::F64, DataType::kDouble},
{ComputationType::kF64, DataType::kComplexDouble,
PrimitiveType::C128, PrimitiveType::C128,
DataType::kComplexDouble},
}};
return absl::c_linear_search(
supported_type_combinations,
std::make_tuple(compute_type, scale_type, a_dtype, b_dtype,
output_dtype));
}
absl::StatusOr<bool> TypesAreSupportedByCublasLt(
const HloInstruction &instr, const GemmBackendConfig &backend_config,
const HloInstruction *bias = nullptr) const {
const PrimitiveType a_dtype = instr.operand(0)->shape().element_type();
const PrimitiveType b_dtype = instr.operand(1)->shape().element_type();
const PrimitiveType output_type =
bias ? bias->shape().element_type() : instr.shape().element_type();
const std::array<PrimitiveType, 12> supported_type = {
PrimitiveType::F8E5M2FNUZ, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN,
PrimitiveType::S8, PrimitiveType::F16,
PrimitiveType::BF16, PrimitiveType::F32,
PrimitiveType::S32, PrimitiveType::F64,
PrimitiveType::C64, PrimitiveType::C128};
if (!absl::c_linear_search(supported_type, output_type)) return false;
TF_ASSIGN_OR_RETURN(const se::blas::DataType output_dtype,
se::gpu::AsBlasDataType(output_type));
const int max_precision = *absl::c_max_element(
backend_config.precision_config().operand_precision());
const PrecisionConfig::Algorithm algorithm =
backend_config.precision_config().algorithm();
if (!algorithm_util::IsSupportedByCublasOrCublasLt(algorithm, gpu_version_))
return false;
TF_ASSIGN_OR_RETURN(
const se::blas::ComputationType compute_type,
se::gpu::GetBlasComputationType(
algorithm, a_dtype, instr.shape().element_type(), max_precision));
se::blas::DataType scale_type =
se::gpu::GetScaleType(output_dtype, compute_type);
using se::blas::ComputationType;
using se::blas::DataType;
using TypeCombinations = std::initializer_list<std::tuple<
ComputationType, DataType , PrimitiveType ,
PrimitiveType , DataType >>;
const TypeCombinations supported_cublas_type_combinations = {
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E4M3FN, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E4M3FN, DataType::kF8E4M3FN},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E4M3FN, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E4M3FN, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E5M2, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E5M2, DataType::kF8E4M3FN},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E5M2, DataType::kF8E5M2},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E5M2, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN,
PrimitiveType::F8E5M2, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2,
PrimitiveType::F8E4M3FN, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2,
PrimitiveType::F8E4M3FN, DataType::kF8E4M3FN},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2,
PrimitiveType::F8E4M3FN, DataType::kF8E5M2},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2,
PrimitiveType::F8E4M3FN, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2,
PrimitiveType::F8E4M3FN, DataType::kFloat},
{ComputationType::kF32, DataType::kComplexFloat, PrimitiveType::C64,
PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kF16AsF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
{ComputationType::kF16AsF32, DataType::kComplexFloat,
PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kBF16AsF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
{ComputationType::kBF16AsF32, DataType::kComplexFloat,
PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kTF32AsF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
{ComputationType::kTF32AsF32, DataType::kComplexFloat,
PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat},
{ComputationType::kF64, DataType::kDouble, PrimitiveType::F64,
PrimitiveType::F64, DataType::kDouble},
{ComputationType::kF64, DataType::kComplexDouble, PrimitiveType::C128,
PrimitiveType::C128, DataType::kComplexDouble},
};
if (IsCuda(gpu_version_) &&
absl::c_linear_search(supported_cublas_type_combinations,
std::tuple{compute_type, scale_type, a_dtype,
b_dtype, output_dtype})) {
return true;
}
const TypeCombinations supported_hipblas_type_combinations = {
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kF8E4M3FNUZ},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E5M2FNUZ, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E5M2FNUZ, DataType::kF8E4M3FNUZ},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E5M2FNUZ, DataType::kF8E5M2FNUZ},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E5M2FNUZ, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ,
PrimitiveType::F8E5M2FNUZ, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kF8E4M3FNUZ},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kF8E5M2FNUZ},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ,
PrimitiveType::F8E4M3FNUZ, DataType::kFloat},
};
if (IsRocm(gpu_version_) &&
absl::c_linear_search(supported_hipblas_type_combinations,
std::tuple{compute_type, scale_type, a_dtype,
b_dtype, output_dtype})) {
return true;
}
const TypeCombinations supported_type_combinations = {
{ComputationType::kF16, DataType::kHalf, PrimitiveType::F16,
PrimitiveType::F16, DataType::kHalf},
{ComputationType::kI32, DataType::kInt32, PrimitiveType::S8,
PrimitiveType::S8, DataType::kInt32},
{ComputationType::kI32, DataType::kFloat, PrimitiveType::S8,
PrimitiveType::S8, DataType::kInt8},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16,
PrimitiveType::BF16, DataType::kBF16},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F16,
PrimitiveType::F16, DataType::kHalf},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::S8,
PrimitiveType::S8, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16,
PrimitiveType::BF16, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F16,
PrimitiveType::F16, DataType::kFloat},
{ComputationType::kF32, DataType::kFloat, PrimitiveType::F32,
PrimitiveType::F32, DataType::kFloat},
};
return absl::c_linear_search(
supported_type_combinations,
std::make_tuple(compute_type, scale_type, a_dtype, b_dtype,
output_dtype));
}
absl::StatusOr<bool> GemmIsSupportedByCublasLt(
const HloInstruction &instr,
const GemmBackendConfig &gemm_backend_config) const {
const HloInstruction *lhs = instr.operand(0);
const Shape &output_shape = instr.shape();
TF_ASSIGN_OR_RETURN(
bool types_are_supported_by_cublas_lt,
TypesAreSupportedByCublasLt(instr, gemm_backend_config));
if (!types_are_supported_by_cublas_lt) {
return false;
}
constexpr int64_t kMaxBatchCount = 65535;
const auto &batch_dimensions =
gemm_backend_config.dot_dimension_numbers().lhs_batch_dimensions();
int batch_count = (batch_dimensions.empty() ? 0 : 1);
for (auto batch_dimension : batch_dimensions) {
batch_count *= lhs->shape().dimensions(batch_dimension);
}
if (batch_count > kMaxBatchCount) {
return false;
}
if (auto isrocm = std::get_if<se::RocmComputeCapability>(&gpu_version_);
isrocm) {
if (!isrocm->has_hipblaslt()) {
return false;
}
}
constexpr int kMaxDimensionSize{4194240};
if (output_shape.element_type() != C64) {
return true;
}
if (std::holds_alternative<se::CudaComputeCapability>(gpu_version_)) {
if (std::get<se::CudaComputeCapability>(gpu_version_).IsAtLeastAmpere()) {
return true;
}
}
TF_ASSIGN_OR_RETURN(GemmConfig gemm_config,
GemmConfig::For(&instr, gemm_backend_config));
return gemm_config.rhs_layout.num_cols <= kMaxDimensionSize;
}
absl::StatusOr<HloInstruction *> TurnF8DotWithUnsupportedOutputTypeIntoF32(
HloInstruction *instr) {
Shape output_f32_shape = instr->shape();
output_f32_shape.set_element_type(F32);
HloInstruction *f32_dot =
instr->AddInstruction(instr->CloneWithNewShape(output_f32_shape));
HloInstruction *convert = instr->AddInstruction(
HloInstruction::CreateConvert(instr->shape(), f32_dot));
TF_RETURN_IF_ERROR(ReplaceInstruction(instr, convert));
return f32_dot;
}
absl::StatusOr<HloInstruction *> TurnF8DotIntoF16Dot(HloInstruction *instr) {
DCHECK(IsF8Type(instr->operand(0)));
DCHECK(IsF8Type(instr->operand(1)));
PrimitiveType conv_type =
instr->shape().element_type() == BF16 ? BF16 : F16;
for (int i = 0; i < 2; ++i) {
Shape operand_f16_shape = instr->operand(i)->shape();
operand_f16_shape.set_element_type(conv_type);
HloInstruction *convert =
instr->AddInstruction(HloInstruction::CreateConvert(
operand_f16_shape, instr->mutable_operand(i)));
TF_RETURN_IF_ERROR(instr->ReplaceOperandWith(i, convert));
}
if (IsF8Type(instr)) {
Shape output_f16_shape = instr->shape();
output_f16_shape.set_element_type(F16);
HloInstruction *f16_dot =
instr->AddInstruction(instr->CloneWithNewShape(output_f16_shape));
HloInstruction *convert_to_f8 = instr->AddInstruction(
HloInstruction::CreateConvert(instr->shape(), f16_dot));
TF_RETURN_IF_ERROR(ReplaceInstruction(instr, convert_to_f8));
return f16_dot;
} else {
return instr;
}
}
};
class GemmWorkspaceRewriteVisitor : public DfsHloRewriteVisitor {
public:
explicit GemmWorkspaceRewriteVisitor(
const se::GpuComputeCapability &gpu_version)
: gpu_version_(gpu_version) {}
absl::Status HandleCustomCall(HloInstruction *instr) override {
bool has_aux_output = false;
if (instr->custom_call_target() == kCublasLtMatmulCallTarget ||
instr->custom_call_target() == kCublasLtMatmulF8CallTarget) {
TF_ASSIGN_OR_RETURN(const auto gpu_config,
instr->backend_config<xla::gpu::GpuBackendConfig>());
const xla::gpu::GemmBackendConfig &config =
gpu_config.gemm_backend_config();
xla::gpu::GemmBackendConfig_Epilogue epilogue = config.epilogue();
TF_ASSIGN_OR_RETURN(
has_aux_output,
xla::gpu::gpublas_lt::EpilogueHasAuxiliaryOutput(epilogue));
if (!((instr->shape().IsTuple() &&
instr->shape().tuple_shapes_size() ==
has_aux_output + config.damax_output() + 1) ||
instr->shape().IsArray())) {
return absl::OkStatus();
}
} else if (instr->custom_call_target() != kGemmCallTarget ||
!instr->shape().IsArray()) {
return absl::OkStatus();
}
auto *cuda_cc = std::get_if<se::CudaComputeCapability>(&gpu_version_);
int64_t workspace = cuda_cc == nullptr ? GemmConfig::kDefaultWorkspace
: cuda_cc->IsAtLeastHopper()
? GemmConfig::kHopperWorkspace
: GemmConfig::kDefaultWorkspace;
if (instr->custom_call_target() == kGemmCallTarget) {
int64_t operands_byte_size = 0;
for (auto &operand : instr->operands()) {
operands_byte_size += ShapeUtil::ByteSizeOf(operand->shape());
}
workspace = std::min(workspace, operands_byte_size);
}
std::vector<Shape> output_shapes = instr->shape().IsArray()
? std::vector<Shape>{instr->shape()}
: instr->shape().tuple_shapes();
output_shapes.emplace_back(ShapeUtil::MakeShape(S8, {workspace}));
Shape output_shape = ShapeUtil::MakeTupleShape(output_shapes);
HloInstruction *new_call = instr->AddInstruction(
instr->CloneWithNewOperands(output_shape, instr->operands()));
auto *custom_call = xla::Cast<HloCustomCallInstruction>(new_call);
if (!custom_call->output_to_operand_aliasing().empty()) {
custom_call->set_output_to_operand_aliasing({{{0}, {2, {}}}});
}
if (instr->shape().IsTuple()) {
for (auto user : instr->users()) {
auto user_get_tuple =
dynamic_cast<HloGetTupleElementInstruction *>(user);
TF_RET_CHECK(user_get_tuple);
HloInstruction *get_output =
instr->AddInstruction(HloInstruction::CreateGetTupleElement(
new_call, user_get_tuple->tuple_index()));
TF_RETURN_IF_ERROR(ReplaceInstruction(user_get_tuple, get_output));
}
return absl::OkStatus();
} else {
HloInstruction *get_output = instr->AddInstruction(
HloInstruction::CreateGetTupleElement(new_call, 0));
return ReplaceInstruction(instr, get_output);
}
}
private:
se::GpuComputeCapability gpu_version_;
};
absl::StatusOr<bool> RunOnComputation(HloComputation *computation,
se::GpuComputeCapability gpu_version,
se::SemanticVersion toolkit_version,
GemmRewriterOptions options) {
GemmRewriterVisitor visitor(gpu_version, toolkit_version, options);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
GemmWorkspaceRewriteVisitor workspace_visitor(gpu_version);
TF_RETURN_IF_ERROR(computation->Accept(&workspace_visitor));
return visitor.changed();
}
}
GemmRewriter::GemmRewriter(se::GpuComputeCapability gpu_version,
se::SemanticVersion toolkit_version,
GemmRewriterOptions options)
: gpu_version_(gpu_version),
toolkit_version_(toolkit_version),
options_(options) {}
absl::StatusOr<bool> GemmRewriter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
bool changed = false;
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result,
RunOnComputation(computation, gpu_version_,
toolkit_version_, options_));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/gemm_rewriter.h"
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_interface.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/gpu_executable.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/test.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class GemmRewriteTest : public GpuCodegenTest {
const auto& device_desc() const {
return backend().default_stream_executor()->GetDeviceDescription();
}
protected:
const se::GpuComputeCapability& Capability() const {
return device_desc().gpu_compute_capability();
}
stream_executor::SemanticVersion GetToolkitVersion() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.runtime_version();
}
bool IsCuda() const {
return std::holds_alternative<se::CudaComputeCapability>(Capability());
}
bool IsRocm() const {
return std::holds_alternative<se::RocmComputeCapability>(Capability());
}
se::GpuComputeCapability CudaHopperOrRocmMI300() {
if (IsCuda()) {
return se::CudaComputeCapability::Hopper();
} else {
return se::RocmComputeCapability{"gfx942"};
}
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(false);
debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0);
return debug_options;
}
bool SkipGpuBlasLtTest() {
return !IsCuda() &&
!std::get<se::RocmComputeCapability>(Capability()).has_hipblaslt() &&
GetDebugOptionsForTest().xla_gpu_enable_cublaslt();
}
bool HasFp8Support() const {
if (IsCuda()) {
return std::get<se::CudaComputeCapability>(Capability()).IsAtLeast(8, 9);
}
return std::get<se::RocmComputeCapability>(Capability()).has_fp8_support();
}
bool HasCudaComputeCapability(const se::CudaComputeCapability& cc) const {
return IsCuda() &&
std::get<se::CudaComputeCapability>(Capability()).IsAtLeast(cc);
}
};
TEST_F(GemmRewriteTest, CheckCustomCallTarget) {
if (SkipGpuBlasLtTest()) {
GTEST_SKIP() << "BlasLt is not supported on this GPU architecture";
}
const char* hlo_text = R"(
HloModule SimpleGemm
ENTRY AddDotsFunc {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
ROOT dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
DebugOptions debug_options = GetDebugOptionsForTest();
if (debug_options.xla_gpu_enable_cublaslt()) {
MatchOptimizedHlo(hlo_text,
R"(; CHECK: custom_call_target="__cublas$lt$matmul")");
} else {
MatchOptimizedHlo(hlo_text,
R"(; CHECK: custom_call_target="__cublas$gemm")");
}
}
TEST_F(GemmRewriteTest, TestBatchedAutotuning) {
if (HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP()
<< "There is no autotuning starting with the Nvidia Ampere generation";
}
const char* hlo_text = R"(
HloModule ComplexDotMultipleNonContracting
ENTRY %test {
%lhs = f32[7,17,10,13]{3,2,1,0} parameter(0)
%rhs = f32[7,9,10,13,6]{4,3,2,1,0} parameter(1)
ROOT %dot = f32[10,7,17,9,6]{4,3,2,1,0} dot(%lhs, %rhs), lhs_batch_dims={2,0}, rhs_batch_dims={2,0}, lhs_contracting_dims={3}, rhs_contracting_dims={3}
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: selected_algorithm
)");
}
TEST_F(GemmRewriteTest, SimpleRewriteDeterministic) {
if (SkipGpuBlasLtTest()) {
GTEST_SKIP() << "BlasLt is not supported on this GPU architecture";
}
const char* hlo_text = R"(
HloModule SimpleGemm
ENTRY AddDotsFunc {
x = f32[128,128] parameter(0)
y = f32[128,128] parameter(1)
ROOT dot_a = f32[128,128] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
ErrorSpec error_spec = [&] {
DebugOptions debug_options = GetDebugOptionsForTest();
if (debug_options.xla_gpu_enable_cublaslt()) {
return ErrorSpec{1e-3, 1e-3};
} else {
return ErrorSpec{1e-3, 1e-3};
}
}();
auto get_module = [&]() {
HloModuleConfig config;
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_exclude_nondeterministic_ops(true);
config.set_debug_options(debug_options);
return ParseAndReturnVerifiedModule(hlo_text, config);
};
se::StreamExecutorMemoryAllocator allocator(
backend().default_stream_executor());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloModule> optimized_module,
backend().compiler()->RunHloPasses(
*get_module(), backend().default_stream_executor(), &allocator));
absl::StatusOr<bool> filecheck_result =
RunFileCheck(optimized_module->ToString(),
R"(
; CHECK: custom_call_target="__cublas${{(lt\$matmul|gemm)}}"
)");
TF_ASSERT_OK(filecheck_result.status());
EXPECT_TRUE(filecheck_result.value());
EXPECT_TRUE(RunAndCompare(*get_module(), error_spec));
}
TEST_F(GemmRewriteTest, BF16GemmCodeGen) {
const char* hlo_text = R"(
HloModule bf16codegendgemm
ENTRY bf16gemm {
%parameter.1 = bf16[3]{0} parameter(0)
%parameter.2 = bf16[3]{0} parameter(1)
ROOT %dot.3 = bf16[] dot(bf16[3]{0} %parameter.1, bf16[3]{0} %parameter.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, operand_precision={highest,highest}
}
)";
if (HasCudaComputeCapability(se::CudaComputeCapability::Hopper())) {
MatchOptimizedHlo(hlo_text, R"(
; CHECK: [[P0:%[^ ]+]] = bf16[3]{0} parameter(0)
; CHECK: [[P1:%[^ ]+]] = bf16[3]{0} parameter(1)
; CHECK: [[INSTR_2:%[^ ]+]] = bf16[3]{0} multiply([[P0]], [[P1]])
; CHECK: [[INSTR_3:%[^ ]+]] = f32[3]{0} convert([[INSTR_2]])
; CHECK: [[INSTR_4:%[^ ]+]] = f32[] constant(0)
; CHECK: [[INSTR_5:%[^ ]+]] = f32[] reduce([[INSTR_3]], [[INSTR_4]]), dimensions={0}, to_apply=[[INSTR_6:%[^ ]+]]
; CHECK: ROOT [[INSTR_7:%[^ ]+]] = bf16[] convert([[INSTR_5]])
)");
} else {
MatchOptimizedHlo(hlo_text, R"(
; CHECK: [[P1:%[^ ]+]] = bf16[3]{0} parameter(1)
; CHECK: [[INSTR_1:%[^ ]+]] = f32[3]{0} convert([[P1]])
; CHECK: [[P0:%[^ ]+]] = bf16[3]{0} parameter(0)
; CHECK: [[INSTR_3:%[^ ]+]] = f32[3]{0} convert([[P0]])
; CHECK: [[INSTR_4:%[^ ]+]] = f32[3]{0} multiply([[INSTR_1]], [[INSTR_3]])
; CHECK: [[INSTR_5:%[^ ]+]] = f32[] constant(0)
; CHECK: [[INSTR_6:%[^ ]+]] = f32[] reduce([[INSTR_4]], [[INSTR_5]]), dimensions={0}, to_apply=[[INSTR_7:%[^ ]+]]
; CHECK: ROOT [[INSTR_8:%[^ ]+]] = bf16[] convert([[INSTR_6]])
)");
}
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-4}));
}
TEST_F(GemmRewriteTest, BF16Transpose) {
const char* hlo_text = R"(
HloModule broadcast
ENTRY broadcast {
p = bf16[9] parameter(0)
ROOT out = bf16[1,9] broadcast(p), dimensions={1}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK: bf16[1,9]{1,0} bitcast
; CHECK: bf16[1,9]{1,0} copy
)");
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
}
class ParameterizedGemmRewriteTest
: public GemmRewriteTest,
public ::testing::WithParamInterface<bool> {
public:
ParameterizedGemmRewriteTest() {
const bool kUsingCublasLt = GetParam();
replacements_[kCustomCallTargetPlaceholder] =
kUsingCublasLt ? "__cublas$lt$matmul" : "__cublas$gemm";
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cublaslt(GetParam());
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
void MatchOptimizedHlo(absl::string_view hlo, const absl::string_view pattern,
bool print_operand_shape = false) {
GemmRewriteTest::MatchOptimizedHlo(
hlo, absl::StrReplaceAll(pattern, replacements_), print_operand_shape);
}
absl::string_view CustomCallTarget() {
return replacements_[kCustomCallTargetPlaceholder];
}
protected:
void SetUp() override {
if (SkipGpuBlasLtTest()) {
GTEST_SKIP() << "BlasLt is not supported on this GPU architecture";
}
}
protected:
absl::flat_hash_map<absl::string_view, absl::string_view> replacements_;
private:
static constexpr const char* kCustomCallTargetPlaceholder{
"<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>"};
};
TEST_P(ParameterizedGemmRewriteTest, Simple) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
ROOT dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, SimpleRewrite) {
const char* hlo_text = R"(
HloModule SimpleGemm
ENTRY AddDotsFunc {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
ROOT dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, MultipleContractingDims) {
const char* hlo_text = R"(
HloModule MultipleContractingCheckGemm
ENTRY AddDotsFunc {
x = f32[3,4,2] parameter(0)
y = f32[3,4,5] parameter(1)
ROOT dot_a = f32[2,5] dot(x, y), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, operand_precision={highest,highest}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-NOT: copy
;
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[3,4,2], {{.*}}: f32[3,4,5]) -> f32[2,5] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,4,2]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4,5]{2,1,0} parameter(1)
; CHECK-DAG: [[BITCAST0:%[^ ]+]] = f32[2,12]{0,1} bitcast([[P0]])
; CHECK-DAG: [[BITCAST1:%[^ ]+]] = f32[12,5]{1,0} bitcast([[P1]])
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[BITCAST0]], [[BITCAST1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, ArgTransposeFoldCheck) {
const char* hlo_text = R"(
HloModule ArgTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[3,2] parameter(0)
y = f32[3,4] parameter(1)
x_transposed = f32[2,3] transpose(x), dimensions={1, 0}
ROOT dot_a = f32[2,4] dot(x_transposed, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[3,2], {{.*}}: f32[3,4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,2]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["0"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, BatchedArgRowColTransposeFoldCheck) {
const char* hlo_text = R"(
HloModule BatchedArgRowColTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[5,3,2] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={0, 2, 1}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[5,3,2], {{.*}}: f32[5,3,4]) -> f32[5,2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[5,3,2]{2,1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":["0"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, BatchRowTransposeFoldCheck) {
const char* hlo_text = R"(
HloModule BatchRowTransposeFoldCheck
ENTRY AddDotsFunc {
x = f32[2,5,3] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={1, 0, 2}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,5,3], {{.*}}: f32[5,3,4]) -> f32[5,2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,5,3]{2,1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["2"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":["1"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, BatchFromMinorDimTransposeIsNotFolded) {
const char* hlo_text = R"(
HloModule BatchFromMinorDimTransposeDoesntFold
ENTRY AddDotsFunc {
x = f32[3,2,5] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={2, 1, 0}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[3,2,5], {{.*}}: f32[5,3,4]) -> f32[5,2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,2,5]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1)
; CHECK-DAG: [[FUSION:%[^ ]+]] = f32[5,2,3]{2,1,0} transpose([[P0]])
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[FUSION]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["2"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":["0"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, LargeBatch) {
const char* hlo_text = R"(
HloModule BatchedArgRowColTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[20000,4,3,2] parameter(0)
y = f32[20000,4,3,4] parameter(1)
ROOT dot_a = f32[20000,4,2,4] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[20000,4,3,2], {{.*}}: f32[20000,4,3,4]) -> f32[20000,4,2,4] {
; CHECK: [[P0:%[^ ]+]] = f32[20000,4,3,2]{3,2,1,0} parameter(0)
; CHECK: [[BC0:%[^ ]+]] = f32[80000,3,2]{2,1,0} bitcast([[P0]])
; CHECK: [[P1:%[^ ]+]] = f32[20000,4,3,4]{3,2,1,0} parameter(1)
; CHECK: [[BC1:%[^ ]+]] = f32[80000,3,4]{2,1,0} bitcast([[P1]])
; CHECK: [[GEMM:%[^ ]+]] = (f32[80000,2,4]{2,1,0}, s8[{{[0-9]+}}]{0}) custom-call([[BC0]], [[BC1]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":["0"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK: }
; CHECK: [[OUT:%[^ ]+]] = f32[80000,2,4]{2,1,0} get-tuple-element([[GEMM]]), index=0
; CHECK: ROOT {{[^ ]+}} = f32[20000,4,2,4]{3,2,1,0} bitcast([[OUT]])
)");
}
TEST_P(ParameterizedGemmRewriteTest, InstrTransposeFoldCheck) {
const char* hlo_text = R"(
HloModule InstrTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[4,2] transpose(dot_a), dimensions={1, 0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[4,2] {
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P1]], [[P0]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["0"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, BatchedInstrLayoutTransposed) {
const char* hlo_text = R"(
HloModule BatchedInstrLayoutCheck
ENTRY AddDotsFunc {
x = f32[5,2,3] parameter(0)
y = f32[5,3,4] parameter(1)
dot_a = f32[5,2,4] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
ROOT out = f32[2,5,4] transpose(dot_a), dimensions={1, 0, 2}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[5,2,3], {{.*}}: f32[5,3,4]) -> f32[2,5,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[5,2,3]{2,1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["2"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":["0"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: ROOT [[OUT:%[^ ]+]] = f32[2,5,4]{2,1,0} bitcast
)");
}
TEST_P(ParameterizedGemmRewriteTest, BatchedInstrLayoutBatchNotInMinorDim) {
const char* hlo_text = R"(
HloModule BatchedInstrLayoutBatchNotInMinorDim
ENTRY AddDotsFunc {
x = f32[5,2,3] parameter(0)
y = f32[5,3,4] parameter(1)
dot_a = f32[5,2,4] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
ROOT out = f32[2,4,5] transpose(dot_a), dimensions={1, 2, 0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[5,2,3], {{.*}}: f32[5,3,4]) -> f32[2,4,5] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[5,2,3]{2,1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["2"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":["0"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: ROOT [[OUT:%[^ ]+]] = f32[2,4,5]{2,1,0} [[OP:[^ ]+]]
)");
}
TEST_P(ParameterizedGemmRewriteTest, AlphaSimpleRewrite) {
const char* hlo_text = R"(
HloModule AlphaSimpleRewrite
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
k = f32[] constant(3.0)
k_broadcast = f32[2, 2] broadcast(k), dimensions={}
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
ROOT dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, F64C64_CublasLtSupportTest) {
{
const char* hlo_text = R"(
HloModule F64_rewrite
ENTRY AddDotsFunc {
x = f64[2,2] parameter(0)
y = f64[2,2] parameter(1)
k = f64[] constant(3.0)
k_broadcast = f64[2, 2] broadcast(k), dimensions={}
dot_a = f64[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT dot_a_multiplied = f64[2, 2] multiply(dot_a, k_broadcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-5}));
}
{
const char* hlo_text = R"(
HloModule C64_rewrite
ENTRY AddDotsFunc {
x = c64[2,2] parameter(0)
y = c64[2,2] parameter(1)
k = c64[] constant((3.0, 3.0))
k_broadcast = c64[2, 2] broadcast(k), dimensions={}
dot_a = c64[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT dot_a_multiplied = c64[2, 2] multiply(dot_a, k_broadcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-5}));
}
}
TEST_P(ParameterizedGemmRewriteTest, ComplexAlphaSimpleRewrite) {
if (!IsCuda() && GetDebugOptionsForTest().xla_gpu_enable_cublaslt()) {
GTEST_SKIP() << "TODO: Unsupported C64 gpublas-lt datatype on ROCM";
}
const char* hlo_text = R"(
HloModule ComplexAlphaSimpleRewrite
ENTRY AddDotsFunc {
x = c64[2,2] parameter(0)
y = c64[2,2] parameter(1)
k = c64[] constant((3.0, 3.0))
k_broadcast = c64[2, 2] broadcast(k), dimensions={}
dot_a = c64[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT dot_a_multiplied = c64[2, 2] multiply(dot_a, k_broadcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: c64[2,2], {{.*}}: c64[2,2]) -> c64[2,2] {
; CHECK-NEXT: [[P0:%[^ ]+]] = c64[2,2]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = c64[2,2]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":3
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, AlphaMultipleUsersNoRewrite) {
const char* hlo_text = R"(
HloModule AlphaMultipleUsersNoRewrite
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
k = f32[] constant(3.0)
k_broadcast = f32[2, 2] broadcast(k), dimensions={}
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast)
ROOT out = f32[2,2] add(dot_a_multiplied, dot_a)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{[^ ]+}} = {{.*}} custom-call({{[^,]+}}, {{[^)]+}}),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, AlphaVectorNoRewrite) {
const char* hlo_text = R"(
HloModule AlphaVectorNoRewrite
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
alpha = f32[2] constant({1, 2})
alpha_broadcast = f32[2,2] broadcast(alpha), dimensions={1}
dot = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT dot_a_multiplied = f32[2, 2] multiply(dot, alpha_broadcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedGemmRewriteTest, BF16Gemm) {
const char* hlo_text = R"(
HloModule bf16gemm
ENTRY bf16gemm {
%parameter.1 = bf16[12,4]{1,0} parameter(0)
%parameter.2 = bf16[4,8]{1,0} parameter(1)
ROOT %dot.8 = bf16[12,8] dot(bf16[12,4] %parameter.1, bf16[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} custom-call(bf16[16,8]{1,0} {{.*}}, bf16[8,8]{1,0} {{.*}}), custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>"
)",
true);
} else {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
}
TEST_P(ParameterizedGemmRewriteTest, BF16GemmStrided) {
const char* hlo_text = R"(
HloModule bf16gemm
ENTRY bf16gemm {
%parameter.1 = bf16[3,3,4] parameter(0)
%parameter.2 = bf16[3,3,2] parameter(1)
ROOT %dot.3 = bf16[3,4,2]{2,1,0} dot(bf16[3,3,4]{2,1,0} %parameter.1, bf16[3,3,2]{2,1,0} %parameter.2), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}, operand_precision={highest,highest}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} custom-call(bf16[3,8,8]{2,1,0} {{.*}}, bf16[3,8,8]{2,1,0} {{.*}}), custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>"
)",
true);
} else {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
}
TEST_P(ParameterizedGemmRewriteTest, Int8Gemm) {
const char* hlo_text = R"(
HloModule int8gemm
ENTRY int8gemm {
%parameter.1 = s8[12,4]{1,0} parameter(0)
%parameter.2 = s8[4,8]{1,0} parameter(1)
ROOT %dot.8 = s32[12,8] dot(s8[12,4] %parameter.1, s8[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} custom-call(s8[12,4]{1,0} [[A:%[^ ]+]], s8[4,8]{0,1} [[B:%[^ ]+]]), custom_call_target="__cublas$gemm"
)",
true);
} else {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} dot(s32[12,4]{1,0} [[A:%[^ ]+]], s32[4,8]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0}
)",
true);
}
}
TEST_F(GemmRewriteTest, Int8GemmRankGreaterThanTwo) {
if (!IsCuda()) {
GTEST_SKIP() << "DoBlasGemmWithAlgorithm is not yet implemented on ROCm";
}
const char* hlo_text = R"(
HloModule int8gemm
ENTRY main.4 {
Arg_0.1 = s8[1,8,2]{2,1,0} parameter(0)
Arg_1.2 = s8[2,4]{1,0} parameter(1)
ROOT dot.3 = s32[1,8,4]{2,1,0} dot(Arg_0.1, Arg_1.2),
lhs_contracting_dims={2}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: [[GEMM:%[^ ]+]] = (s32[8,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call(s8[8,4]{1,0} %{{.*}}, s8[4,4]{0,1} %{{.*}}), custom_call_target="__cublas$gemm",
)",
true);
}
}
TEST_P(ParameterizedGemmRewriteTest, Int8GemmNoAlphaRewrite) {
const char* hlo_text = R"(
HloModule int8gemm
ENTRY int8gemm {
%parameter.1 = s8[12,4]{1,0} parameter(0)
%parameter.2 = s8[4,8]{1,0} parameter(1)
k = s32[] constant(2)
k_broadcast = s32[12,8] broadcast(k), dimensions={}
%dot.8 = s32[12,8] dot(s8[12,4] %parameter.1, s8[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT dot_multiplied = s32[12,8] multiply(%dot.8, k_broadcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} custom-call(s8[12,4]{1,0} [[A:%[^ ]+]], s8[4,8]{0,1} [[B:%[^ ]+]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
)",
true);
} else {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} dot(s32[12,4]{1,0} [[A:%[^ ]+]], s32[4,8]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0}
)",
true);
}
}
TEST_P(ParameterizedGemmRewriteTest, Int8GemmNoBetaRewrite) {
const char* hlo_text = R"(
HloModule int8gemm
ENTRY int8gemm {
%parameter.1 = s8[12,4]{1,0} parameter(0)
%parameter.2 = s8[4,8]{1,0} parameter(1)
bias = s32[12,8] parameter(2)
%dot.8 = s32[12,8] dot(s8[12,4] %parameter.1, s8[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = s32[12,8] add(%dot.8, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} custom-call(s8[12,4]{1,0} [[A:%[^ ]+]], s8[4,8]{0,1} [[B:%[^ ]+]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
)",
true);
} else {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} dot(s32[12,4]{1,0} [[A:%[^ ]+]], s32[4,8]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0}
)",
true);
}
}
TEST_P(ParameterizedGemmRewriteTest, Int8GemmNotMultipleOfFour) {
if (!IsCuda()) {
GTEST_SKIP() << "DoBlasGemmWithAlgorithm is not yet implemented on ROCm";
}
const char* hlo_text = R"(
HloModule int8gemm
ENTRY int8gemm {
%parameter.1 = s8[13,4]{1,0} parameter(0)
%parameter.2 = s8[4,9]{1,0} parameter(1)
ROOT %dot.9 = s32[13,9] dot(s8[13,4] %parameter.1, s8[4,9] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} custom-call(s8[16,4]{1,0} [[A:%[^ ]+]], s8[4,12]{0,1} [[B:%[^ ]+]]), custom_call_target="__cublas$gemm"
)",
true);
} else {
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: {{.*}} dot(s32[13,4]{1,0} [[A:%[^ ]+]], s32[4,9]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0}
)",
true);
}
}
TEST_P(ParameterizedGemmRewriteTest, GemmTypeCombinationCheck) {
if (!IsCuda()) {
GTEST_SKIP() << "DoBlasGemmWithAlgorithm is not yet implemented on ROCm";
}
std::vector<std::tuple<absl::string_view, absl::string_view, bool>>
type_combinations = {{"s8", "s8", true},
{"s32", "s32", true},
{"bf16", "bf16", true},
{"f16", "f16", true},
{"f32", "f32", true},
{"f64", "f64", true},
{"c64", "c64", true},
{"c128", "c128", true},
{"s8", "s32", true},
{"s8", "f32", true},
{"f16", "f32", true},
{"bf16", "f32", true}};
if (!IsCuda() ||
HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
std::vector<std::tuple<absl::string_view, absl::string_view, bool>>
more_type_combinations = {
{"s8", "bf16", false}, {"s8", "f16", false},
{"s8", "f64", false}, {"s8", "c64", false},
{"s8", "c128", false},
{"s32", "f32", false}, {"s32", "f64", false},
{"s32", "c64", false}, {"s32", "c128", false},
{"f16", "bf16", false}, {"f16", "f64", false},
{"f16", "c64", false}, {"f16", "c128", false},
{"bf16", "f16", false}, {"bf16", "f64", false},
{"bf16", "c64", false}, {"bf16", "c128", false},
{"f32", "f64", false}, {"f32", "c64", false},
{"f32", "c128", false},
{"f64", "c64", false}, {"f64", "c128", false},
};
type_combinations.insert(type_combinations.end(),
more_type_combinations.begin(),
more_type_combinations.end());
}
for (const auto& type_combination : type_combinations) {
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<ABType>>"] = std::get<0>(type_combination);
replacements["<<DType>>"] = std::get<1>(type_combination);
const char* hlo_template = R"(
HloModule type_combo
ENTRY type_combo {
%parameter.1 = <<ABType>>[4,4]{1,0} parameter(0)
%parameter.2 = <<ABType>>[4,4]{1,0} parameter(1)
ROOT %dot = <<DType>>[4,4] dot(%parameter.1, %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const auto hlo_text = absl::StrReplaceAll(hlo_template, replacements);
if (std::get<2>(type_combination)) {
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
} else {
EXPECT_FALSE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
}
}
}
TEST_P(ParameterizedGemmRewriteTest, UpcastingBf16ToF64) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
Arg_0.1 = bf16[4,3]{1,0} parameter(0)
Arg_1.2 = bf16[3,6]{1,0} parameter(1)
ROOT dot.3 = f64[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0)));
}
TEST_P(ParameterizedGemmRewriteTest, UpcastingC64ToC128) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
Arg_0.1 = c64[4,3]{1,0} parameter(0)
Arg_1.2 = c64[3,6]{1,0} parameter(1)
ROOT dot.3 = c128[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0)));
}
TEST_P(ParameterizedGemmRewriteTest, UpcastingF16ToF32) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
Arg_0.1 = f16[4,3]{1,0} parameter(0)
Arg_1.2 = f16[3,6]{1,0} parameter(1)
ROOT dot.3 = f32[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest, highest}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0)));
}
TEST_P(ParameterizedGemmRewriteTest, UpcastingF16ToF64) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
Arg_0.1 = f16[4,3]{1,0} parameter(0)
Arg_1.2 = f16[3,6]{1,0} parameter(1)
ROOT dot.3 = f64[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0)));
}
TEST_P(ParameterizedGemmRewriteTest, UpcastingF32ToF64) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
Arg_0.1 = f32[4,3]{1,0} parameter(0)
Arg_1.2 = f32[3,6]{1,0} parameter(1)
ROOT dot.3 = f64[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0)));
}
TEST_P(ParameterizedGemmRewriteTest, DoNotUpconvertOutput) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
param_0 = f16[240,88]{1,0} parameter(0)
param_1 = f16[88,4]{1,0} parameter(1)
dot = f16[240,4]{1,0} dot(param_0, param_1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
constant_255 = f16[] constant(255)
broadcast = f16[240,4]{1,0} broadcast(constant_255), dimensions={}
multiply = f16[240,4]{1,0} multiply(dot, broadcast)
ROOT result = f32[240,4]{1,0} convert(multiply)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(
m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0))));
}
TEST_P(ParameterizedGemmRewriteTest, UnsupportedMixTypeGemm) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
param_0 = f32[240,88]{1,0} parameter(0)
param_1 = f32[88,4]{1,0} parameter(1)
dot = f32[240,4]{1,0} dot(param_0, param_1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
constant_255 = f32[] constant(255)
broadcast = f32[240,4]{1,0} broadcast(constant_255), dimensions={}
multiply = f32[240,4]{1,0} multiply(dot, broadcast)
ROOT result = u8[240,4]{1,0} convert(multiply)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(
m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0))));
}
TEST_P(ParameterizedGemmRewriteTest, CheckIsGemmAliasedBeforeFusion) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
Arg_0.1 = f16[8,16]{1,0} parameter(0)
Arg_1.2 = f16[16,32]{1,0} parameter(1)
dot.8 = f16[8,32]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
Arg_2.3 = f16[8,32]{1,0} parameter(2)
constant.5 = f16[] constant(1)
broadcast.6 = f16[8,32]{1,0} broadcast(constant.5), dimensions={}
add.7 = f16[8,32]{1,0} add(Arg_2.3, broadcast.6)
add.9 = f16[8,32]{1,0} add(dot.8, add.7)
convert.10 = f32[8,32]{1,0} convert(add.9)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Convert(
m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0))));
}
INSTANTIATE_TEST_SUITE_P(CublasTestsBothLegacyAndLt,
ParameterizedGemmRewriteTest, ::testing::Bool());
class LegacyCublasGemmRewriteTest : public GemmRewriteTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(false);
debug_options.set_xla_gpu_enable_cublaslt(false);
return debug_options;
}
};
TEST_F(LegacyCublasGemmRewriteTest, MatrixVectorMultiplication) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f32[2048] parameter(0)
p1 = f32[2048, 16384] parameter(1)
ROOT d = f32[16384] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})";
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0},
stream_executor::SemanticVersion{12, 4, 0}),
R"(
; CHECK: %[[P0:.+]] = f32[2048]{0} parameter(0)
; CHECK: %[[P1:.+]] = f32[2048,16384]{1,0} parameter(1)
; CHECK: %[[CUSTOM_CALL:.+]] = (f32[16384]{0}, s8[4194304]{0}) custom-call(%[[P0]], %[[P1]]), custom_call_target="__cublas$gemm"
)");
}
TEST_F(LegacyCublasGemmRewriteTest, MatrixVectorMultiplicationWithBatch) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f32[10, 10, 2048] parameter(0)
p1 = f32[10, 10, 2048, 16384] parameter(1)
ROOT d = f32[10, 10, 16384] dot(p0, p1),
lhs_batch_dims={0, 1}, rhs_batch_dims={0, 1},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
})";
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0},
stream_executor::SemanticVersion{12, 4, 0}),
R"(
; CHECK: %[[P0:.+]] = f32[10,10,2048]{2,1,0} parameter(0)
; CHECK: %[[P1:.+]] = f32[10,10,2048,16384]{3,2,1,0} parameter(1)
; CHECK: %[[CUSTOM_CALL:.+]] = (f32[10,10,16384]{2,1,0}, s8[4194304]{0}) custom-call(%[[P0]], %[[P1]]), custom_call_target="__cublas$gemm"
)");
}
TEST_F(LegacyCublasGemmRewriteTest, SparseDotNotSupported) {
const char* hlo_text = R"(
HloModule test
ENTRY main {
lhs = f16[5,16] parameter(0)
rhs = f16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
auto hlo_pass = GemmRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0},
stream_executor::SemanticVersion{12, 4, 0});
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&hlo_pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_F(LegacyCublasGemmRewriteTest, AlphaBetaRewrite) {
const char* hlo_text = R"(
HloModule NonZeroAlphaBeta
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
param_2 = f32[2,2] parameter(2)
bias = f32[2,2] negate(param_2)
k = f32[] constant(3.0)
k_broadcast = f32[2, 2] broadcast(k), dimensions={}
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast)
ROOT out = f32[2,2] add(dot_a_multiplied, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-DAG: [[X:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-DAG: [[Y:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK: [[O:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], {{[^,)]+}}),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: output_to_operand_aliasing={
; CHECK-SAME: {0}: (2, {})
; CHECK-SAME: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: ROOT [[OUT:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element([[O]]), index=0
)");
}
TEST_F(LegacyCublasGemmRewriteTest, BiasMultipleUsersNoOverwrite) {
const char* hlo_text = R"(
HloModule BiasMultipleUsersNoOverwrite
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] parameter(2)
k = f32[] constant(3.0)
k_broadcast = f32[2, 2] broadcast(k), dimensions={}
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast)
biased_out = f32[2,2] add(dot_a_multiplied, bias)
ROOT out = f32[2,2] add(biased_out, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-NEXT: [[CUSTOM_CALL:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, BiasParameterNoOverwrite) {
const char* hlo_text = R"(
HloModule BiasParameterNoOverwrite
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] parameter(2)
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,2] add(dot_a, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, BiasTupleParameterOverwrite) {
const char* hlo_text = R"(
HloModule BiasTupleParameterOverwrite
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
param_2 = (f32[2,2], f32[3,3]) parameter(2)
bias = f32[2,2] get-tuple-element(param_2), index=0
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,2] add(dot_a, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: (f32[2,2], f32[3,3])) -> f32[2,2] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = (f32[2,2]{1,0}, f32[3,3]{1,0}) parameter(2)
; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element([[P2]]), index=0
; CHECK-DAG: [[BIAS_COPY:%[^ ]+]] = f32[2,2]{1,0} copy([[BIAS]])
; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[BIAS_COPY]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: output_to_operand_aliasing={
; CHECK-SAME: {0}: (2, {})
; CHECK-SAME: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, AliasedBiasOverwrite) {
const char* hlo_text = R"(
HloModule AliasedBiasOverwrite, input_output_alias={ {}: (2, {}, must-alias) }
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] parameter(2)
k = f32[] constant(3.0)
k_broadcast = f32[2, 2] broadcast(k), dimensions={}
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast)
ROOT out = f32[2,2] add(dot_a_multiplied, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-DAG: [[X:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-DAG: [[Y:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} parameter(2)
; CHECK: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], [[BIAS]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: output_to_operand_aliasing={
; CHECK-SAME: {0}: (2, {})
; CHECK-SAME: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, LargerBiasMultipleUsersNoRewrite) {
const char* hlo_text = R"(
HloModule LargerBiasMultipleUsersNoRewrite
ENTRY AddDotsFunc {
x = f32[1024,1024] parameter(0)
y = f32[1024,1024] parameter(1)
bias = f32[1024,1024] parameter(2)
dot_a = f32[1024,1024] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
biased_out = f32[1024,1024] add(dot_a, bias)
ROOT out = f32[1024,1024] add(biased_out, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024]) -> f32[1024,1024] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[1024,1024]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[1024,1024]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[1024,1024]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, BF16GemmWithBias) {
const char* hlo_text = R"(
HloModule BF16GemmWithBias
ENTRY BF16GemmWithBias {
x = bf16[8,8]{1,0} parameter(0)
y = bf16[8,8]{1,0} parameter(1)
dot.5 = bf16[8,8]{1,0} dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
param_2 = bf16[8,8]{1,0} parameter(2)
bias = bf16[8,8]{1,0} negate(param_2)
ROOT add.6 = bf16[8,8]{1,0} add(dot.5, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2e-3, 2e-3}));
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %BF16GemmWithBias ({{.*}}: bf16[8,8], {{.*}}: bf16[8,8], {{.*}}: bf16[8,8]) -> bf16[8,8] {
; CHECK-DAG: [[X:%[^ ]+]] = bf16[8,8]{1,0} parameter(0)
; CHECK-DAG: [[Y:%[^ ]+]] = bf16[8,8]{1,0} parameter(1)
; CHECK: [[GEMM:%[^ ]+]] = (bf16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], {{[^,)]+}}),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: output_to_operand_aliasing={
; CHECK-SAME: {0}: (2, {})
; CHECK-SAME: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, MatrixBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
param_2 = f32[2,4] parameter(2)
bias = f32[2,4] negate(param_2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,4] add(dot_a, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,4]) -> f32[2,4] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK: [[GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], {{[^,)]+}}),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: output_to_operand_aliasing={
; CHECK-SAME: {0}: (2, {})
; CHECK-SAME: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasWhereBiasIsNotAParameter) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
w = f32[2,3] parameter(0)
x = f32[3,4] parameter(1)
first_dot = f32[2,4] dot(w, x), lhs_contracting_dims={1}, rhs_contracting_dims={0}
y = f32[2,3] parameter(2)
z = f32[3,4] parameter(3)
second_dot = f32[2,4] dot(y, z), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,4] add(second_dot, first_dot)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = f32[2,3]{1,0} parameter(2)
; CHECK-DAG: [[P3:%[^ ]+]] = f32[3,4]{1,0} parameter(3)
; CHECK-NEXT: [[FIRST_GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[FIRST_GEMM_OUT:%[^ ]+]] = f32[2,4]{1,0} get-tuple-element([[FIRST_GEMM]]), index=0
; CHECK-NEXT: [[SECOND_GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P2]], [[P3]], [[FIRST_GEMM_OUT]]),
; CHECK: custom_call_target="__cublas$gemm",
; CHECK: output_to_operand_aliasing={
; CHECK-SAME: {0}: (2, {})
; CHECK-SAME: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixType) {
std::vector<std::tuple<absl::string_view, absl::string_view>>
type_combinations = {
{"f16", "f32"},
{"bf16", "f32"},
};
const char* hlo_text_template = R"(
HloModule test
ENTRY test {
x = <<ABType>>[16,32] parameter(0)
y = <<ABType>>[32,16] parameter(1)
z = <<DType>>[16,16] parameter(2)
dot_a = <<ABType>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias = <<DType>>[16,16] negate(z)
convert = <<DType>>[16,16] convert(dot_a)
ROOT out = <<DType>>[16,16] add(convert, bias)
}
)";
for (const auto& type_combination : type_combinations) {
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<ABType>>"] = std::get<0>(type_combination);
replacements["<<DType>>"] = std::get<1>(type_combination);
const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements);
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (std::get<0>(type_combination) == "bf16" && IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
continue;
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
EXPECT_THAT(optimized_module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1),
m::Negate(m::Parameter(2))),
0)));
}
}
TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixTypeBatched) {
std::vector<std::tuple<absl::string_view, absl::string_view>>
type_combinations = {
{"f16", "f32"},
{"bf16", "f32"},
};
const char* hlo_text_template = R"(
HloModule test
ENTRY test {
x = <<ABType>>[4,16,32] parameter(0)
y = <<ABType>>[4,32,16] parameter(1)
z = <<DType>>[4,16,16] parameter(2)
dot_a = <<ABType>>[4,16,16] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
bias = <<DType>>[4,16,16] negate(z)
convert = <<DType>>[4,16,16] convert(dot_a)
ROOT out = <<DType>>[4,16,16] add(convert, bias)
})";
for (const auto& type_combination : type_combinations) {
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<ABType>>"] = std::get<0>(type_combination);
replacements["<<DType>>"] = std::get<1>(type_combination);
const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements);
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (std::get<0>(type_combination) == "bf16" && IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
continue;
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
EXPECT_THAT(optimized_module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1),
m::Negate(m::Parameter(2))),
0)));
}
}
TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixTypeNotSupported) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP()
<< "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[16,32] parameter(0)
y = bf16[32,16] parameter(1)
z = f64[16,16] parameter(2)
dot_a = bf16[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias = f64[16,16] negate(z)
convert = f64[16,16] convert(dot_a)
ROOT out = f64[16,16] add(convert, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
MatchOptimizedHlo(hlo_text, R"(
; CHECK: %[[custom_call:.*]] = {{.*}} custom-call{{.*}}__cublas$gemm
; CHECK: %[[gte:.*]] = {{.*}} get-tuple-element{{.*}}%[[custom_call]]
; CHECK: ROOT {{.*}} fusion({{.*}}%[[gte]]
)");
}
TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixTypeAddWithMoreConsumers) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[16,32] parameter(0)
y = bf16[32,16] parameter(1)
z = f32[16,16] parameter(2)
dot_a = bf16[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias = f32[16,16] negate(z)
convert = f32[16,16] convert(dot_a)
add_bias = f32[16,16] add(convert, bias)
ROOT out = f32[16,16] negate(add_bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
MatchOptimizedHlo(hlo_text, R"(
; CHECK: %[[custom_call:.*]] = {{.*}} custom-call{{.*}}__cublas$gemm
; CHECK: %[[gte:.*]] = {{.*}} get-tuple-element{{.*}}%[[custom_call]]
; CHECK: ROOT {{.*}} fusion({{.*}}%[[gte]]
)");
}
TEST_F(LegacyCublasGemmRewriteTest, MergeBitcastAndAdd) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[4] parameter(2)
dot = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[4] add(f32[4] bitcast(dot), bias)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Bitcast(
m::GetTupleElement(
m::CustomCall(
{"__cublas$gemm"}, m::Parameter(0), m::Parameter(1),
m::Bitcast(m::Parameter(2)).WithShape(F32, {2, 2})),
0))
.WithShape(F32, {4})));
}
TEST_F(LegacyCublasGemmRewriteTest, FoldConstantBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] broadcast(f32[2] constant({0, 0})), dimensions={0}
dot1 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
param_2 = f32[2,2] parameter(2)
bias1 = f32[2,2] negate(param_2)
sum1 = add(dot1, bias1)
dot2 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
sum2 = add(dot2, f32[2,2] reshape(bias))
dot3 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias3 = f32[2,2] transpose(bias), dimensions={1,0}
sum3 = add(dot3, bias3)
dot4 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
sum4 = add(dot4, f32[2,2] bitcast(bias))
ROOT root = tuple(sum1, sum2, sum3, sum4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(m::Parameter(0), m::Parameter(1),
m::Negate(m::Parameter(2))),
0),
m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()),
0),
m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()),
0),
m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()),
0))));
}
class CublasLtGemmRewriteTest : public GemmRewriteTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cublaslt(true);
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
protected:
void SetUp() override {
if (SkipGpuBlasLtTest()) {
GTEST_SKIP() << "BlasLt is not supported on this GPU architecture";
}
}
};
TEST_F(CublasLtGemmRewriteTest, AlphaBetaRewrite) {
const char* hlo_text = R"(
HloModule NonZeroAlphaBeta
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] parameter(2)
k = f32[] constant(3.0)
k_broadcast = f32[2, 2] broadcast(k), dimensions={}
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast)
ROOT out = f32[2,2] add(dot_a_multiplied, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-DAG: [[X:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-DAG: [[Y:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} parameter(2)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], [[BIAS]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK-NEXT ROOT [[OUT:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element(%cublas-lt-matmul.2.0), index=0
)");
}
TEST_F(CublasLtGemmRewriteTest, BiasMultipleUsersNoOverwrite) {
const char* hlo_text = R"(
HloModule BiasMultipleUsersNoOverwrite
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] parameter(2)
k = f32[] constant(3.0)
k_broadcast = f32[2, 2] broadcast(k), dimensions={}
dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast)
biased_out = f32[2,2] add(dot_a_multiplied, bias)
ROOT out = f32[2,2] add(biased_out, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1)
; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} parameter(2)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[BIAS]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK-NOT: output_to_operand_aliasing
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, LargerBiasMultipleUsersNoRewrite) {
const char* hlo_text = R"(
HloModule LargerBiasMultipleUsersNoRewrite
ENTRY AddDotsFunc {
x = f32[1024,1024] parameter(0)
y = f32[1024,1024] parameter(1)
bias = f32[1024,1024] parameter(2)
dot_a = f32[1024,1024] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
biased_out = f32[1024,1024] add(dot_a, bias)
ROOT out = f32[1024,1024] add(biased_out, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024]) -> f32[1024,1024] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[1024,1024]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[1024,1024]{1,0} parameter(1)
; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[1024,1024]{1,0} parameter(2)
; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[1024,1024]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[BIAS]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK-NEXT: [[GEMM:%[^ ]+]] = f32[1024,1024]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[1024,1024]{1,0} add([[GEMM]], [[BIAS]])
)");
}
TEST_F(CublasLtGemmRewriteTest, BF16GemmWithBias) {
const char* hlo_text = R"(
HloModule test
ENTRY BF16GemmWithBias {
x = bf16[8,8]{1,0} parameter(0)
y = bf16[8,8]{1,0} parameter(1)
dot.5 = bf16[8,8]{1,0} dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias = bf16[8,8]{1,0} parameter(2)
ROOT add.6 = bf16[8,8]{1,0} add(dot.5, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %BF16GemmWithBias ({{.*}}: bf16[8,8], {{.*}}: bf16[8,8], {{.*}}: bf16[8,8]) -> bf16[8,8] {
; CHECK-DAG: [[X:%[^ ]+]] = bf16[8,8]{1,0} parameter(0)
; CHECK-DAG: [[Y:%[^ ]+]] = bf16[8,8]{1,0} parameter(1)
; CHECK-DAG: [[BIAS:%[^ ]+]] = bf16[8,8]{1,0} parameter(2)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = (bf16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], [[BIAS]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, MatrixBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[2,4] parameter(2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,4] add(dot_a, z)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2,4]{1,0} parameter(2)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasWhereBiasIsNotAParameter) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
w = f32[2,3] parameter(0)
x = f32[3,4] parameter(1)
first_dot = f32[2,4] dot(w, x), lhs_contracting_dims={1}, rhs_contracting_dims={0}
y = f32[2,3] parameter(2)
z = f32[3,4] parameter(3)
second_dot = f32[2,4] dot(y, z), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,4] add(second_dot, first_dot)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = f32[2,3]{1,0} parameter(2)
; CHECK-DAG: [[P3:%[^ ]+]] = f32[3,4]{1,0} parameter(3)
; CHECK-NEXT: [[FIRST_GEMM_TUPLE:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[FIRST_GEMM:%[^ ]+]] = f32[2,4]{1,0} get-tuple-element([[FIRST_GEMM_TUPLE]]), index=0
; CHECK-NEXT: [[SECOND_GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P2]], [[P3]], [[FIRST_GEMM]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: output_to_operand_aliasing={
; CHECK: {0}: (2, {})
; CHECK: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[4] parameter(2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z), dimensions={1}
ROOT out = f32[2,4] add(dot_a, z_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasMultipleUsers) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[4,4] parameter(0)
y = f32[4,4] parameter(1)
z = f32[4] parameter(2)
c = f32[] constant(5)
dot_a = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
z_bcast = f32[4,4] broadcast(z), dimensions={1}
add_a = f32[4,4] add(dot_a, z_bcast)
c_bcast = f32[4,4] broadcast(c), dimensions={}
dot_b = f32[4,4] dot(dot_a, c_bcast), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
ROOT out = f32[4,4] dot(add_a, dot_b), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: [[FUSED_COMPUTATION:%[^ ]+]] ([[DUMMY0:[^ ]+]]: f32[4,4], [[DUMMY1:[^ ]+]]: f32[4]) -> f32[4,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,4]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,4]{1,0} broadcast([[P1]]), dimensions={1}
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[4,4]{1,0} add([[P0]], [[P2]])
}
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[4,4], {{.*}}: f32[4,4], {{.*}}: f32[4]) -> f32[4,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,4]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,4]{1,0} parameter(1)
; CHECK-NEXT: [[MATMUL0_TUPLE:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK-NEXT: [[MATMUL0:%[^ ]+]] = f32[4,4]{1,0} get-tuple-element([[MATMUL0_TUPLE]]), index=0
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[4,4]{1,0} fusion([[MATMUL0]], [[P2]]), kind=kLoop, calls=[[FUSED_COMPUTATION]]
; CHECK: [[MATMUL1_TUPLE:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[MATMUL0]]
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK-NEXT: [[MATMUL1:%[^ ]+]] = f32[4,4]{1,0} get-tuple-element([[MATMUL1_TUPLE]]), index=0
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[FUSION]], [[MATMUL1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, BatchedVectorBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3,4] parameter(0)
y = f32[4,5,6] parameter(1)
z = f32[3,5,6] parameter(2)
dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest}
z_bcast = f32[2,3,5,6] broadcast(z), dimensions={1,2,3}
ROOT out = f32[2,3,5,6] add(dot_a, z_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6], {{.*}}: f32[3,5,6]) -> f32[2,3,5,6] {
; CHECK: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call(
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: output_to_operand_aliasing={
; CHECK: {0}: (2, {})
; CHECK: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]])
)");
}
TEST_F(CublasLtGemmRewriteTest, BatchedSharedVectorBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3,4] parameter(0)
y = f32[4,5,6] parameter(1)
z = f32[6] parameter(2)
dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest}
z_bcast = f32[2,3,5,6] broadcast(z), dimensions={3}
ROOT out = f32[2,3,5,6] add(dot_a, z_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6], {{.*}}: f32[6]) -> f32[2,3,5,6] {
; CHECK: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call(
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: output_to_operand_aliasing={
; CHECK: {0}: (2, {})
; CHECK: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]])
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasIncorrectAxisFusedAsMatrix) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[2] parameter(2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z), dimensions={0}
add = f32[2,4] add(dot_a, z_bcast)
ROOT out = f32[4,2] transpose(add), dimensions={1,0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2]) -> f32[4,2] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2]{0} parameter(2)
; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[2,4]{0,1}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[2,4]{0,1} get-tuple-element([[MATMUL_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[4,2]{1,0} bitcast([[MATMUL]])
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasSliced) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[4,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[3] parameter(2)
dot_a = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
slice_a = f32[2,3] slice(dot_a), slice={[0:2], [0:3]}
z_bcast = f32[2,3] broadcast(z), dimensions={1}
ROOT out = f32[2,3] add(slice_a, z_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[4,3], {{.*}}: f32[3,4], {{.*}}: f32[3]) -> f32[2,3] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[3]{0} parameter(2)
; CHECK-NEXT: [[MATMUL:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
; CHECK-NEXT: [[GETTUPLE:%[^ ]+]] = f32[4,4]{1,0} get-tuple-element([[MATMUL]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3]{1,0} slice([[GETTUPLE]]), slice={[0:2], [0:3]}
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasSlicedMultipleUsers) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[2] parameter(2)
c = f32[] constant(5)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
slice_a = f32[2,2] slice(dot_a), slice={[0:2], [0:2]}
z_bcast = f32[2,2] broadcast(z), dimensions={1}
add_a = f32[2,2] add(slice_a, z_bcast)
c_bcast = f32[2,2] broadcast(c), dimensions={}
dot_b = f32[2,2] dot(slice_a, c_bcast), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,2] dot(add_a, dot_b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2]) -> f32[2,2] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = f32[2]{0} parameter(2)
; CHECK-NEXT: [[MATMUL0_TUPLE:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[MATMUL1_TUPLE:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call(
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[MATMUL1:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element([[MATMUL1_TUPLE]]), index=0
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call{{.*}}[[MATMUL1]]
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasTransposed) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[2] parameter(2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] parameter(3)
ROOT out = f32[2,4] add(dot_a, z_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2_BCAST:%[^ ]+]] = f32[2,4]{1,0} parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2_BCAST]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasThenMatrixBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[4] parameter(2)
z2 = f32[2,4] parameter(3)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z), dimensions={1}
add0 = f32[2,4] add(dot_a, z_bcast)
ROOT add1 = f32[2,4] add(add0, z2)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4], {{.*}}: f32[2,4]) -> f32[2,4] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-DAG: [[VECTOR_BIAS:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-DAG: [[MATRIX_BIAS:%[^ ]+]] = f32[2,4]{1,0} parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[MATRIX_BIAS]], [[VECTOR_BIAS]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, BF16VectorBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[16,24] parameter(0)
y = bf16[24,32] parameter(1)
z = bf16[32] parameter(2)
dot_a = bf16[16,32] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = bf16[16,32] broadcast(z), dimensions={1}
ROOT out = bf16[16,32] add(dot_a, z_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{3e-3, 1e-3}));
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: bf16[16,24], {{.*}}: bf16[24,32], {{.*}}: bf16[32]) -> bf16[16,32] {
; CHECK-NEXT: [[P0:%[^ ]+]] = bf16[16,24]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = bf16[24,32]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = bf16[32]{0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[16,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
)");
}
TEST_F(CublasLtGemmRewriteTest, BF16VectorBiasPadded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Padding of GEMM bf16 operands only implemented on "
"architectures with bf16 Tensor Cores.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[2,3] parameter(0)
y = bf16[3,4] parameter(1)
z = bf16[4] parameter(2)
dot_a = bf16[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = bf16[2,4] broadcast(z), dimensions={1}
ROOT out = bf16[2,4] add(dot_a, z_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-DAG: ENTRY %test ({{.*}}: bf16[2,3], {{.*}}: bf16[3,4], {{.*}}: bf16[4]) -> bf16[2,4] {
; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_6x0_5
; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_5x0_4
)");
}
TEST_F(CublasLtGemmRewriteTest, ReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = f32[] constant(0)
c_bcast = f32[2,4] broadcast(c), dimensions={}
ROOT out = f32[2,4] maximum(dot_a, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, BatchedReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3,4] parameter(0)
y = f32[4,5,6] parameter(1)
dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest}
c = f32[] constant(0)
c_bcast = f32[2,3,5,6] broadcast(c), dimensions={}
ROOT out = f32[2,3,5,6] maximum(dot_a, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6]) -> f32[2,3,5,6] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3,4]{2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[6,4]{1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,5,6]{2,1,0} parameter(1)
; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[4,30]{1,0}
; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
; CHECK: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]])
)");
}
TEST_F(CublasLtGemmRewriteTest, ReluActivationSliced) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = f32[] constant(0)
c_bcast = f32[2,2] broadcast(c), dimensions={}
slice_a = f32[2,2] slice(dot_a), slice={[0:2], [0:2]}
ROOT out = f32[2,2] maximum(slice_a, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,2] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
; CHECK: [[MATMUL:%[^ ]+]] = f32[2,4]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,2]{1,0} slice([[MATMUL]]), slice={[0:2], [0:2]}
)");
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[2,4] parameter(2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
add = f32[2,4] add(dot_a, z)
c = f32[] constant(0)
c_bcast = f32[2,4] broadcast(c), dimensions={}
ROOT out = f32[2,4] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2,4]{1,0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, SquareMatrixBiasReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[4,4] parameter(0)
y = f32[4,4] parameter(1)
z = f32[4,4] parameter(2)
dot_a = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
add = f32[4,4] add(dot_a, z)
c = f32[] constant(0)
c_bcast = f32[4,4] broadcast(c), dimensions={}
ROOT out = f32[4,4] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[4,4], {{.*}}: f32[4,4], {{.*}}: f32[4,4]) -> f32[4,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,4]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,4]{1,0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[4] parameter(2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z), dimensions={1}
add = f32[2,4] add(dot_a, z_bcast)
c = f32[] constant(0)
c_bcast = f32[2,4] broadcast(c), dimensions={}
ROOT out = f32[2,4] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, BatchedVectorBiasReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3,4] parameter(0)
y = f32[4,5,6] parameter(1)
z = f32[3,5,6] parameter(2)
dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest}
z_bcast = f32[2,3,5,6] broadcast(z), dimensions={1,2,3}
add = f32[2,3,5,6] add(dot_a, z_bcast)
c = f32[] constant(0)
c_bcast = f32[2,3,5,6] broadcast(c), dimensions={}
ROOT out = f32[2,3,5,6] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6], {{.*}}: f32[3,5,6]) -> f32[2,3,5,6] {
; CHECK: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call(
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]])
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasTransposedReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[2] parameter(2)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z), dimensions={0}
add = f32[2,4] add(dot_a, z_bcast)
c = f32[] constant(0)
c_bcast = f32[2,4] broadcast(c), dimensions={}
maximum = f32[2,4] maximum(add, c_bcast)
ROOT out = f32[4,2] transpose(maximum), dimensions={1,0}
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2]) -> f32[4,2] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2]{0} parameter(2)
; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[2,4]{0,1}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_RELU"
; CHECK: }
; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[2,4]{0,1} get-tuple-element([[MATMUL_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[4,2]{1,0} bitcast([[MATMUL]])
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasThenMatrixBiasReluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z_vec = f32[4] parameter(2)
z_matrix = f32[2,4] parameter(3)
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z_vec), dimensions={1}
add0 = f32[2,4] add(dot_a, z_bcast)
add1 = f32[2,4] add(add0, z_matrix)
c = f32[] constant(0)
c_bcast = f32[2,4] broadcast(c), dimensions={}
ROOT out = f32[2,4] maximum(add1, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4], {{.*}}: f32[2,4]) -> f32[2,4] {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4]{1,0} parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P3]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
mul.0 = f32[2,4] multiply(dot, dot)
mul.1 = f32[2,4] multiply(dot, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[2,4] broadcast(const.0), dimensions={}
mul.2 = f32[2,4] multiply(mul.1, bcast.0)
add.0 = f32[2,4] add(dot, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[2,4] broadcast(const.1), dimensions={}
mul.3 = f32[2,4] multiply(add.0, bcast.1)
tanh = f32[2,4] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[2,4] broadcast(const.2), dimensions={}
add.2 = f32[2,4] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[2,4] broadcast(const.3), dimensions={}
mul.4 = f32[2,4] multiply(add.2, bcast.3)
ROOT out = f32[2,4] multiply(dot, mul.4)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"GELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationWrongConstant) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
mul.0 = f32[2,4] multiply(dot, dot)
mul.1 = f32[2,4] multiply(dot, mul.0)
const.0 = f32[] constant(0.05)
bcast.0 = f32[2,4] broadcast(const.0), dimensions={}
mul.2 = f32[2,4] multiply(mul.1, bcast.0)
add.0 = f32[2,4] add(dot, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[2,4] broadcast(const.1), dimensions={}
mul.3 = f32[2,4] multiply(add.0, bcast.1)
tanh = f32[2,4] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[2,4] broadcast(const.2), dimensions={}
add.2 = f32[2,4] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[2,4] broadcast(const.3), dimensions={}
mul.4 = f32[2,4] multiply(add.2, bcast.3)
ROOT out = f32[2,4] multiply(dot, mul.4)
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-NOT: GELU
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasThenApproxGeluActivation) {
#if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60000
auto rocm_switch = false;
#else
auto rocm_switch = true;
#endif
if (!IsCuda() && rocm_switch) {
GTEST_SKIP() << "TODO: Unsupported blas-lt epilogue on ROCM";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[4] parameter(2)
dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z), dimensions={1}
add = f32[2,4] add(dot, z_bcast)
mul.0 = f32[2,4] multiply(add, add)
mul.1 = f32[2,4] multiply(add, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[2,4] broadcast(const.0), dimensions={}
mul.2 = f32[2,4] multiply(mul.1, bcast.0)
add.0 = f32[2,4] add(add, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[2,4] broadcast(const.1), dimensions={}
mul.3 = f32[2,4] multiply(add.0, bcast.1)
tanh = f32[2,4] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[2,4] broadcast(const.2), dimensions={}
add.2 = f32[2,4] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[2,4] broadcast(const.3), dimensions={}
mul.4 = f32[2,4] multiply(add.2, bcast.3)
ROOT out = f32[2,4] multiply(add, mul.4)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_GELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationWithAux) {
if (!IsCuda()) {
GTEST_SKIP() << "TODO: Unsupported blas-lt epilogue on ROCM";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
mul.0 = f32[2,4] multiply(dot, dot)
mul.1 = f32[2,4] multiply(dot, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[2,4] broadcast(const.0), dimensions={}
mul.2 = f32[2,4] multiply(mul.1, bcast.0)
add.0 = f32[2,4] add(dot, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[2,4] broadcast(const.1), dimensions={}
mul.3 = f32[2,4] multiply(add.0, bcast.1)
tanh = f32[2,4] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[2,4] broadcast(const.2), dimensions={}
add.2 = f32[2,4] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[2,4] broadcast(const.3), dimensions={}
mul.4 = f32[2,4] multiply(add.2, bcast.3)
mul.5 = f32[2,4] multiply(dot, mul.4)
ROOT out = (f32[2,4], f32[2,4]) tuple(mul.5, dot)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> (f32[2,4], f32[2,4]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"GELU_AUX"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasThenApproxGeluActivationWithAux) {
if (!IsCuda()) {
GTEST_SKIP() << "TODO: Unsupported blas-lt epilogue on ROCM";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[4] parameter(2)
dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f32[2,4] broadcast(z), dimensions={1}
add = f32[2,4] add(dot, z_bcast)
mul.0 = f32[2,4] multiply(add, add)
mul.1 = f32[2,4] multiply(add, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[2,4] broadcast(const.0), dimensions={}
mul.2 = f32[2,4] multiply(mul.1, bcast.0)
add.0 = f32[2,4] add(add, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[2,4] broadcast(const.1), dimensions={}
mul.3 = f32[2,4] multiply(add.0, bcast.1)
tanh = f32[2,4] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[2,4] broadcast(const.2), dimensions={}
add.2 = f32[2,4] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[2,4] broadcast(const.3), dimensions={}
mul.4 = f32[2,4] multiply(add.2, bcast.3)
mul.5 = f32[2,4] multiply(add, mul.4)
ROOT out = (f32[2,4], f32[2,4]) tuple(mul.5, add)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> (f32[2,4], f32[2,4]) {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_GELU_AUX"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationBF16) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Padding of GEMM bf16 operands only implemented on "
"architectures with bf16 Tensor Cores.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[2,3] parameter(0)
y = bf16[3,4] parameter(1)
dot = bf16[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
mul.0 = bf16[2,4] multiply(dot, dot)
mul.1 = bf16[2,4] multiply(dot, mul.0)
const.0 = bf16[] constant(0.044715)
bcast.0 = bf16[2,4] broadcast(const.0), dimensions={}
mul.2 = bf16[2,4] multiply(mul.1, bcast.0)
add.0 = bf16[2,4] add(dot, mul.2)
const.1 = bf16[] constant(0.797884583)
bcast.1 = bf16[2,4] broadcast(const.1), dimensions={}
mul.3 = bf16[2,4] multiply(add.0, bcast.1)
tanh = bf16[2,4] tanh(mul.3)
const.2 = bf16[] constant(1)
bcast.2 = bf16[2,4] broadcast(const.2), dimensions={}
add.2 = bf16[2,4] add(tanh, bcast.2)
const.3 = bf16[] constant(0.5)
bcast.3 = bf16[2,4] broadcast(const.3), dimensions={}
mul.4 = bf16[2,4] multiply(add.2, bcast.3)
ROOT out = bf16[2,4] multiply(dot, mul.4)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{5e-5, 1e-5}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-DAG: ENTRY %test ({{.*}}: bf16[2,3], {{.*}}: bf16[3,4]) -> bf16[2,4] {
; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_6x0_5
; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_5x0_4
)");
}
TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationBitcast) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_bitcast = f32[2,2,2] bitcast(dot)
mul.0 = f32[2,2,2] multiply(dot_bitcast, dot_bitcast)
mul.1 = f32[2,2,2] multiply(dot_bitcast, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[2,2,2] broadcast(const.0), dimensions={}
mul.2 = f32[2,2,2] multiply(mul.1, bcast.0)
add.0 = f32[2,2,2] add(dot_bitcast, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[2,2,2] broadcast(const.1), dimensions={}
mul.3 = f32[2,2,2] multiply(add.0, bcast.1)
tanh = f32[2,2,2] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[2,2,2] broadcast(const.2), dimensions={}
add.2 = f32[2,2,2] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[2,2,2] broadcast(const.3), dimensions={}
mul.4 = f32[2,2,2] multiply(add.2, bcast.3)
ROOT out = f32[2,2,2] multiply(dot_bitcast, mul.4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Bitcast(m::GetTupleElement(
m::CustomCall({"__cublas$lt$matmul"},
m::Parameter(0).WithShape(F32, {2, 3}),
m::Parameter(1).WithShape(F32, {3, 4})),
0))
.WithShape(F32, {2, 2, 2})));
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasF16) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[8,16] parameter(0)
y = f16[16,8] parameter(1)
z = f16[8,8] parameter(2)
dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f16[8,8] add(dot_a, z)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f16[8,16], {{.*}}: f16[16,8], {{.*}}: f16[8,8]) -> f16[8,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f16[8,16]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f16[16,8]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[8,8]{1,0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasF32UnpaddedWithBitcast) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3]{1,0} parameter(0)
y = f32[3,4]{1,0} parameter(1)
z = f32[2]{0} parameter(2)
dot_a = f32[2,4]{0,1} dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bitc = f32[4,2]{1,0} bitcast(f32[2,4]{0,1} dot_a)
z_bcast = f32[4,2] broadcast(z), dimensions={1}
ROOT add = f32[4,2]{1,0} add(bitc, z_bcast)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Bitcast(m::GetTupleElement(
m::CustomCall({"__cublas$lt$matmul"}, m::Parameter(0),
m::Parameter(1),
m::Parameter(2).WithShape(F32, {2})),
0)
.WithShape(F32, {2, 4}))
.WithShape(F32, {4, 2})));
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasF16Unpadded) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[8,16] parameter(0)
y = f16[16,8] parameter(1)
z = f16[8] parameter(2)
dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f16[8,8] broadcast(z), dimensions={1}
ROOT add = f16[8,8] add(dot_a, z_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{8e-3, 2e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: pad("
; CHECK: custom-call
; CHECK-SAME: custom_call_target="__cublas$lt$matmul"
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasF16Padded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
GTEST_SKIP() << "Padding of GEMM operands only implemented on "
"architectures with Tensor Cores.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[6,12] parameter(0)
y = f16[12,6] parameter(1)
z = f16[6] parameter(2)
dot_a = f16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f16[6,6] broadcast(z), dimensions={1}
ROOT add = f16[6,6] add(dot_a, z_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-DAG: ENTRY %test ({{.*}}: f16[6,12], {{.*}}: f16[12,6], {{.*}}: f16[6]) -> f16[6,6] {
; CHECK-DAG: f16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4
; CHECK-DAG: f16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2
)");
}
TEST_F(CublasLtGemmRewriteTest, ReluActivationF16Unpadded) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[8,16] parameter(0)
y = f16[16,8] parameter(1)
dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = f16[] constant(0)
c_bcast = f16[8,8] broadcast(c), dimensions={}
ROOT out = f16[8,8] maximum(dot_a, c_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: pad("
; CHECK: custom-call
; CHECK-SAME: custom_call_target="__cublas$lt$matmul"
)");
}
TEST_F(CublasLtGemmRewriteTest, ReluActivationF16Padded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
GTEST_SKIP() << "Padding of GEMM operands only implemented on "
"architectures with Tensor Cores.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[6,12] parameter(0)
y = f16[12,6] parameter(1)
dot_a = f16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = f16[] constant(0)
c_bcast = f16[6,6] broadcast(c), dimensions={}
ROOT out = f16[6,6] maximum(dot_a, c_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-DAG: ENTRY %test ({{.*}}: f16[6,12], {{.*}}: f16[12,6]) -> f16[6,6] {
; CHECK-DAG: f16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4
; CHECK-DAG: f16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2
)");
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasReluActivationF16) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[8,16] parameter(0)
y = f16[16,8] parameter(1)
z = f16[8,8] parameter(2)
dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
add = f16[8,8] add(dot_a, z)
c = f16[] constant(0)
c_bcast = f16[8,8] broadcast(c), dimensions={}
ROOT out = f16[8,8] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f16[8,16], {{.*}}: f16[16,8], {{.*}}: f16[8,8]) -> f16[8,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f16[8,16]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f16[16,8]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[8,8]{1,0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationF16Unpadded) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[8,16] parameter(0)
y = f16[16,8] parameter(1)
z = f16[8] parameter(2)
dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f16[8,8] broadcast(z), dimensions={1}
add = f16[8,8] add(dot_a, z_bcast)
c = f16[] constant(0)
c_bcast = f16[8,8] broadcast(c), dimensions={}
ROOT out = f16[8,8] maximum(add, c_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: pad("
; CHECK: custom-call
; CHECK-SAME: custom_call_target="__cublas$lt$matmul"
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationF16Padded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Volta())) {
GTEST_SKIP() << "Padding of GEMM operands only implemented on "
"architectures with Tensor Cores.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f16[6,12] parameter(0)
y = f16[12,6] parameter(1)
z = f16[6] parameter(2)
dot_a = f16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f16[6,6] broadcast(z), dimensions={1}
add = f16[6,6] add(dot_a, z_bcast)
c = f16[] constant(0)
c_bcast = f16[6,6] broadcast(c), dimensions={}
ROOT out = f16[6,6] maximum(add, c_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-DAG: ENTRY %test ({{.*}}: f16[6,12], {{.*}}: f16[12,6], {{.*}}: f16[6]) -> f16[6,6] {
; CHECK-DAG: f16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4
; CHECK-DAG: f16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2
)");
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasBF16) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[8,16] parameter(0)
y = bf16[16,8] parameter(1)
z = bf16[8,8] parameter(2)
dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = bf16[8,8] add(dot_a, z)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: bf16[8,16], {{.*}}: bf16[16,8], {{.*}}: bf16[8,8]) -> bf16[8,8] {
; CHECK-DAG: [[P0:%[^ ]+]] = bf16[8,16]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = bf16[16,8]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = bf16[8,8]{1,0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasBitcastBF16) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[8,16] parameter(0)
y = bf16[16,8] parameter(1)
bias = bf16[2,4,8] parameter(2)
dot = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bitcast = bf16[2,4,8] bitcast(dot)
ROOT out = bf16[2,4,8] add(bitcast, bias)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Bitcast(
m::GetTupleElement(
m::CustomCall(
{"__cublas$lt$matmul"},
m::Parameter(0).WithShape(BF16, {8, 16}),
m::Parameter(1).WithShape(BF16, {16, 8}),
m::Bitcast(m::Parameter(2)).WithShape(BF16, {8, 8})),
0))
.WithShape(BF16, {2, 4, 8})));
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasBF16Unpadded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP()
<< "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[8,16] parameter(0)
y = bf16[16,8] parameter(1)
z = bf16[8] parameter(2)
dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = bf16[8,8] broadcast(z), dimensions={1}
ROOT add = bf16[8,8] add(dot_a, z_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{8e-3, 2e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: pad("
; CHECK: custom-call
; CHECK-SAME: custom_call_target="__cublas$lt$matmul"
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasBF16Padded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Padding of GEMM operands in bfloat16 only implemented on "
"Ampere and newer architectures.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[6,12] parameter(0)
y = bf16[12,6] parameter(1)
z = bf16[6] parameter(2)
dot_a = bf16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = bf16[6,6] broadcast(z), dimensions={1}
ROOT add = bf16[6,6] add(dot_a, z_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-DAG: ENTRY %test ({{.*}}: bf16[6,12], {{.*}}: bf16[12,6], {{.*}}: bf16[6]) -> bf16[6,6] {
; CHECK-DAG: bf16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4
; CHECK-DAG: bf16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2
)");
}
TEST_F(CublasLtGemmRewriteTest, ReluActivationBF16Unpadded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP()
<< "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[8,16] parameter(0)
y = bf16[16,8] parameter(1)
dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = bf16[] constant(0)
c_bcast = bf16[8,8] broadcast(c), dimensions={}
ROOT out = bf16[8,8] maximum(dot_a, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: pad("
; CHECK: custom-call
; CHECK-SAME: custom_call_target="__cublas$lt$matmul"
)");
}
TEST_F(CublasLtGemmRewriteTest, ReluActivationBF16Padded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Padding of GEMM operands in bfloat16 only implemented on "
"Ampere and newer architectures.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[6,12] parameter(0)
y = bf16[12,6] parameter(1)
dot_a = bf16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = bf16[] constant(0)
c_bcast = bf16[6,6] broadcast(c), dimensions={}
ROOT out = bf16[6,6] maximum(dot_a, c_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-DAG: ENTRY %test ({{.*}}: bf16[6,12], {{.*}}: bf16[12,6]) -> bf16[6,6] {
; CHECK-DAG: bf16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4
; CHECK-DAG: bf16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationBF16Unpadded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP()
<< "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[8,16] parameter(0)
y = bf16[16,8] parameter(1)
z = bf16[8] parameter(2)
dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = bf16[8,8] broadcast(z), dimensions={1}
add = bf16[8,8] add(dot_a, z_bcast)
c = bf16[] constant(0)
c_bcast = bf16[8,8] broadcast(c), dimensions={}
ROOT out = bf16[8,8] maximum(add, c_bcast)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{8e-3, 2e-3}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-NOT: pad("
; CHECK: custom-call
; CHECK-SAME: custom_call_target="__cublas$lt$matmul"
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationBF16Padded) {
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Padding of GEMM operands in bfloat16 only implemented on "
"Ampere and newer architectures.";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[6,12] parameter(0)
y = bf16[12,6] parameter(1)
z = bf16[6] parameter(2)
dot_a = bf16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = bf16[6,6] broadcast(z), dimensions={1}
add = bf16[6,6] add(dot_a, z_bcast)
c = bf16[] constant(0)
c_bcast = bf16[6,6] broadcast(c), dimensions={}
ROOT out = bf16[6,6] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
MatchOptimizedHlo(hlo_text, R"(
; CHECK-DAG: ENTRY %test ({{.*}}: bf16[6,12], {{.*}}: bf16[12,6], {{.*}}: bf16[6]) -> bf16[6,6] {
; CHECK-DAG: bf16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4
; CHECK-DAG: bf16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2
)");
}
TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationF64) {
if (!IsCuda()) {
GTEST_SKIP() << "TODO: Unsupported blas-lt F64 datatype on ROCM";
}
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f64[2,3] parameter(0)
y = f64[3,4] parameter(1)
z = f64[4] parameter(2)
dot_a = f64[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
z_bcast = f64[2,4] broadcast(z), dimensions={1}
add = f64[2,4] add(dot_a, z_bcast)
c = f64[] constant(0)
c_bcast = f64[2,4] broadcast(c), dimensions={}
ROOT out = f64[2,4] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-10, 1e-10}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f64[2,3], {{.*}}: f64[3,4], {{.*}}: f64[4]) -> f64[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f64[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f64[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f64[4]{0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f64[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, AlphaSimpleRewriteBiasAddActivation) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,3] parameter(0)
y = f32[3,4] parameter(1)
z = f32[4] parameter(2)
k = f32[] constant(3.0)
k_bcast = f32[2,4] broadcast(k), dimensions={}
dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest}
dot_a_multiplied = f32[2, 4] multiply(dot_a, k_bcast)
z_bcast = f32[2,4] broadcast(z), dimensions={1}
add = f32[2,4] add(dot_a_multiplied, z_bcast)
c = f32[] constant(0)
c_bcast = f32[2,4] broadcast(c), dimensions={}
ROOT out = f32[2,4] maximum(add, c_bcast)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]),
; CHECK: custom_call_target="__cublas$lt$matmul",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_RELU"
; CHECK: }
)");
}
TEST_F(CublasLtGemmRewriteTest, FoldConstantBias) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] broadcast(f32[2] constant({0, 0})), dimensions={0}
dot1 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias1 = f32[2,2] parameter(2)
sum1 = add(dot1, bias1)
dot2 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
sum2 = add(dot2, f32[2,2] reshape(bias))
dot3 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias3 = f32[2,2] transpose(bias), dimensions={1,0}
sum3 = add(dot3, bias3)
dot4 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
sum4 = add(dot4, f32[2,2] bitcast(bias))
ROOT root = tuple(sum1, sum2, sum3, sum4)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(Capability(), GetToolkitVersion());
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Parameter()),
0),
m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()),
0),
m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()),
0),
m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()),
0))));
}
TEST_F(CublasLtGemmRewriteTest, MultipleMaximumUsers) {
const char* hlo_text = R"(
HloModule multiple_maximum_users
relu {
Arg_0 = f32[3,896,54]{2,1,0} parameter(0)
constant = f32[] constant(0)
broadcast = f32[3,896,54]{2,1,0} broadcast(constant), dimensions={}
ROOT maximum = f32[3,896,54]{2,1,0} maximum(Arg_0, broadcast)
}
ENTRY main {
constant = f32[] constant(1)
broadcast_1 = f32[3,896,1024]{2,1,0} broadcast(constant), dimensions={}
Arg_2 = f32[1024,54]{1,0} parameter(2)
dot = f32[3,896,54]{2,1,0} dot(broadcast_1, Arg_2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_1 = f32[54]{0} parameter(1)
broadcast_2 = f32[3,896,54]{2,1,0} broadcast(Arg_1), dimensions={2}
add = f32[3,896,54]{2,1,0} add(dot, broadcast_2)
call = f32[3,896,54]{2,1,0} call(add), to_apply=relu
Arg_0 = f32[1]{0} parameter(0)
reshape_1 = f32[1,1,1]{2,1,0} reshape(Arg_0)
broadcast_3 = f32[1,1,1]{2,1,0} broadcast(reshape_1), dimensions={0,1,2}
reshape_2 = f32[] reshape(broadcast_3)
broadcast_4 = f32[3,896,54]{2,1,0} broadcast(reshape_2), dimensions={}
multiply = f32[3,896,54]{2,1,0} multiply(call, broadcast_4)
ROOT tuple = (f32[3,896,54]{2,1,0}, f32[3,896,54]{2,1,0}) tuple(multiply, call)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-4}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: custom_call_target="__cublas$lt$matmul",
)");
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeOutOfPlace) {
if (!IsCuda()) {
GTEST_SKIP() << "TODO: Unsupported mixed datatypes on ROCM";
}
std::vector<std::tuple<absl::string_view, absl::string_view>>
type_combinations = {
{"f16", "f32"},
{"bf16", "f32"},
};
const char* hlo_text_template = R"(
HloModule test
ENTRY test {
x = <<ABType>>[16,32] parameter(0)
y = <<ABType>>[32,16] parameter(1)
z = <<DType>>[16,16] parameter(2)
dot_a = <<ABType>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert = <<DType>>[16,16] convert(dot_a)
ROOT out = <<DType>>[16,16] add(convert, z)
})";
for (const auto& type_combination : type_combinations) {
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<ABType>>"] = std::get<0>(type_combination);
replacements["<<DType>>"] = std::get<1>(type_combination);
const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements);
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (std::get<0>(type_combination) == "bf16" && IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
continue;
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
EXPECT_THAT(
optimized_module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)));
}
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeOutOfPlaceBatched) {
if (!IsCuda()) {
GTEST_SKIP() << "TODO: Unsupported mixed datatypes on ROCM";
}
std::vector<std::tuple<absl::string_view, absl::string_view>>
type_combinations = {
{"f16", "f32"},
{"bf16", "f32"},
};
const char* hlo_text_template = R"(
HloModule test
ENTRY test {
x = <<ABType>>[4,16,32] parameter(0)
y = <<ABType>>[4,32,16] parameter(1)
z = <<DType>>[4,16,16] parameter(2)
dot_a = <<ABType>>[4,16,16] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
convert = <<DType>>[4,16,16] convert(dot_a)
ROOT out = <<DType>>[4,16,16] add(convert, z)
})";
for (const auto& type_combination : type_combinations) {
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<ABType>>"] = std::get<0>(type_combination);
replacements["<<DType>>"] = std::get<1>(type_combination);
const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements);
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (std::get<0>(type_combination) == "bf16" && IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
continue;
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
EXPECT_THAT(
optimized_module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)));
}
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeInPlace) {
if (!IsCuda()) {
GTEST_SKIP() << "TODO: Unsupported mixed datatypes on ROCM";
}
std::vector<std::tuple<absl::string_view, absl::string_view>>
type_combinations = {
{"f16", "f32"},
{"bf16", "f32"},
};
const char* hlo_text_template = R"(
HloModule test
ENTRY test {
x = <<ABType>>[16,32] parameter(0)
y = <<ABType>>[32,16] parameter(1)
z = <<DType>>[16,16] parameter(2)
dot_a = <<ABType>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias = <<DType>>[16,16] negate(z)
convert = <<DType>>[16,16] convert(dot_a)
ROOT out = <<DType>>[16,16] add(convert, bias)
})";
for (const auto& type_combination : type_combinations) {
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<ABType>>"] = std::get<0>(type_combination);
replacements["<<DType>>"] = std::get<1>(type_combination);
const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements);
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (std::get<0>(type_combination) == "bf16" && IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
continue;
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
EXPECT_THAT(optimized_module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(m::Parameter(0), m::Parameter(1),
m::Negate(m::Parameter(2))),
0)));
}
}
TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeNotSupported) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = bf16[16,32] parameter(0)
y = bf16[32,16] parameter(1)
z = f64[16,16] parameter(2)
dot_a = bf16[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bias = f64[16,16] negate(z)
convert = f64[16,16] convert(dot_a)
ROOT out = f64[16,16] add(convert, bias)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3}));
if (IsCuda() &&
!HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32";
}
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo_text));
MatchOptimizedHlo(hlo_text, R"(
; CHECK: %[[custom_call:.*]] = {{.*}} custom-call{{.*}}__cublas$lt$matmul
; CHECK: %[[tuple:.*]] = bf16[16,16]{1,0} get-tuple-element(%[[custom_call]]), index=0
; CHECK: ROOT {{.*}} fusion({{.*}}%[[tuple]]
)");
}
class ParameterizedFp8GemmRewriteTest : public ParameterizedGemmRewriteTest {
public:
ParameterizedFp8GemmRewriteTest() {
replacements_[kF8E4M3DatatypePlaceholder] =
IsCuda() ? "f8e4m3fn" : "f8e4m3fnuz";
replacements_[kF8E5M2DatatypePlaceholder] =
IsCuda() ? "f8e5m2" : "f8e5m2fnuz";
replacements_[kF8E4M3AmaxPlaceholder] = IsCuda() ? "448." : "240.";
}
void SetUp() override {
if (IsCuda() && GetToolkitVersion() < se::SemanticVersion{12, 0, 0}) {
GTEST_SKIP() << "F8 gemm rewrite is only supported in CUDA 12 and above.";
}
if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 0, 0}) {
GTEST_SKIP()
<< "F8 gemm rewrite is only supported in ROCm 6.0 and above.";
}
}
protected:
void CheckFp8IfSupported(absl::string_view hlo_text,
ErrorSpec error_spec = ErrorSpec{1e-2, 1e-2}) {
if (!HasFp8Support()) {
return;
}
std::string replaced_hlo_text =
absl::StrReplaceAll(hlo_text, replacements_);
EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_),
error_spec));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(replaced_hlo_text));
const HloInstruction* call =
FindInstruction(optimized_module.get(), HloOpcode::kCustomCall);
ASSERT_NE(call, nullptr);
EXPECT_EQ(call->custom_call_target(), "__cublas$lt$matmul$f8");
}
void MatchOptimizedHlo(absl::string_view hlo, const absl::string_view pattern,
bool print_operand_shape = false) {
GemmRewriteTest::MatchOptimizedHlo(
absl::StrReplaceAll(hlo, replacements_),
absl::StrReplaceAll(pattern, replacements_), print_operand_shape);
}
void RunAndFilecheckHloRewrite(
absl::string_view hlo, HloPassInterface&& hlo_pass,
std::optional<absl::string_view> expected,
std::function<void(HloModule*)> after_pass_checks = nullptr,
const HloModuleConfig* config = nullptr) {
if (expected.has_value()) {
std::string replaced_pattern =
absl::StrReplaceAll(expected.value(), replacements_);
GemmRewriteTest::RunAndFilecheckHloRewrite(
absl::StrReplaceAll(hlo, replacements_), std::move(hlo_pass),
replaced_pattern, after_pass_checks, config);
}
}
absl::StatusOr<std::unique_ptr<VerifiedHloModule>>
ParseAndReturnVerifiedModule(absl::string_view hlo_text,
int64_t replica_count = 1,
int64_t num_partitions = 1) {
return GemmRewriteTest::ParseAndReturnVerifiedModule(
absl::StrReplaceAll(hlo_text, replacements_));
}
private:
static constexpr const char* kF8E4M3DatatypePlaceholder{"<<F8E4M3>>"};
static constexpr const char* kF8E5M2DatatypePlaceholder{"<<F8E5M2>>"};
static constexpr const char* kF8E4M3AmaxPlaceholder{"<<F8E4M3_AMAX>>"};
};
TEST_P(ParameterizedFp8GemmRewriteTest, SupportsF8NonMajorBatchDim) {
const char* hlo_text = R"(
HloModule t
ENTRY main {
%bitcast.73421 = f8e4m3fn[16,8,640]{2,1,0} parameter(0)
%parameter_1.5 = f8e4m3fn[8,640,5120]{2,1,0} parameter(1)
%parameter_2 = f8e4m3fn[8,640,5120]{2,1,0} parameter(2)
%concatenate.2145 = f8e4m3fn[8,640,10240]{2,1,0} concatenate(
f8e4m3fn[8,640,5120]{2,1,0} %parameter_1.5,
f8e4m3fn[8,640,5120]{2,1,0} %parameter_2),
dimensions={2}
%dot.6237 = f32[8,16,10240]{2,1,0} dot(
f8e4m3fn[16,8,640]{2,1,0} %bitcast.73421,
f8e4m3fn[8,640,10240]{2,1,0} %concatenate.2145),
lhs_batch_dims={1},
lhs_contracting_dims={2},
rhs_batch_dims={0},
rhs_contracting_dims={1}
ROOT %convert.20480 = bf16[8,16,10240]{2,1,0} convert(
f32[8,16,10240]{2,1,0} %dot.6237)
})";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK: custom-call({{.*}}"lhs_batch_dimensions":["1"],"rhs_batch_dimensions":["0"]
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, DoNotRewriteToF8OnPreAda) {
if (!IsCuda()) {
GTEST_SKIP() << "FP8 Rewrite pattern is different on ROCM-6.2 ";
}
if (HasFp8Support()) {
GTEST_SKIP() << "Test requires a pre-Ada GPU";
}
const char* hlo_text = R"(
HloModule test
ENTRY PreAdaTest {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
ROOT out = <<F8E4M3>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_),
ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %PreAdaTest ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> <<F8E4M3>>[16,16] {
; CHECK: {{.*}} = {{.*}} custom-call({{.*}}, {{.*}})
; CHECK-DAG: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>"
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, DoNotRewriteOnPreAdaWithF32Output) {
if (HasFp8Support()) {
GTEST_SKIP() << "Test requires a pre-Ada GPU or an AMD GPU prior to MI300.";
}
const char* hlo_text = R"(
HloModule test
ENTRY PreAdaTest {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
ROOT out = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_),
ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %PreAdaTest ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> f32[16,16] {
; CHECK: {{.*}} = {{.*}} custom-call({{.*}}, {{.*}})
; CHECK-DAG: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>"
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnsupportedTypesF8) {
const char* hlo_text = R"(
HloModule test
ENTRY unsupported_types {
x = <<F8E5M2>>[16,16] parameter(0)
y = <<F8E5M2>>[16,16] parameter(1)
ROOT out = <<F8E5M2>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_),
ErrorSpec{1e-2, 1e-2}));
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(Capability(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %unsupported_types ({{.*}}: <<F8E5M2>>[16,16], {{.*}}: <<F8E5M2>>[16,16]) -> <<F8E5M2>>[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E5M2>>[16,16]{1,0} parameter(0)
; CHECK-NEXT: [[P0_CONVERT:%[^ ]+]] = f16[16,16]{1,0} convert([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E5M2>>[16,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_CONVERT:%[^ ]+]] = f16[16,16]{1,0} convert([[P1]])
; CHECK-NEXT: [[DOT:%[^ ]+]] = f16[16,16]{1,0} dot([[P0_CONVERT]], [[P1_CONVERT]]), lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = <<F8E5M2>>[16,16]{1,0} convert([[DOT]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
ROOT out = <<F8E4M3>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
std::string checks = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> <<F8E4M3>>[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C1:[^ ]+]] = f32[] constant(1)
)";
if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) {
checks.append(
R"(; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]),
)");
} else {
checks.append(
R"(; CHECK-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]),
)");
}
checks.append(
R"(; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
checks);
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDMatrixBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
dot_a = <<F8E4M3>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
b = <<F8E4M3>>[16,16] parameter(2)
ROOT out = <<F8E4M3>>[16,16] add(dot_a, b)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: <<F8E4M3>>[16,16]) -> <<F8E4M3>>[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C1:[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[DOT_TUPLE:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[DOT:%[^ ]+]] = <<F8E4M3>>[16,16]{1,0} get-tuple-element([[DOT_TUPLE]]), index=0
; CHECK-NEXT: [[P2:%[^ ]+]] = <<F8E4M3>>[16,16]{1,0} parameter(2)
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = <<F8E4M3>>[16,16]{1,0} add([[DOT]], [[P2]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDColMajorLhsF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[2,64,32]{1,2,0} parameter(0)
y = <<F8E4M3>>[2,32,16]{2,1,0} parameter(1)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
dq_scale = f32[] multiply(x_scale, y_scale)
dq_scale_bcast = f32[2,64,16] broadcast(dq_scale), dimensions={}
out.0 = f32[2,64,16] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
ROOT out = f32[2,64,16] multiply(out.0, dq_scale_bcast)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[2,64,32], {{.*}}: <<F8E4M3>>[2,32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[2,64,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[2,64,32]{1,2,0} parameter(0)
; CHECK-NEXT: [[P0_BT:%[^ ]+]] = <<F8E4M3>>[2,32,64]{2,1,0} bitcast([[P0]])
; CHECK-NEXT: [[P0_TR:%[^ ]+]] = <<F8E4M3>>[2,64,32]{2,1,0} transpose([[P0_BT]]), dimensions={0,2,1}
; CHECK-NEXT: [[P0_BT1:%[^ ]+]] = <<F8E4M3>>[2,32,64]{1,2,0} bitcast([[P0_TR]])
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[2,32,16]{2,1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[2,16,32]{2,1,0} transpose([[P1]]), dimensions={0,2,1}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[DQ:%[^ ]+]] = f32[] multiply([[P2]], [[P3]])
; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,64,16]{2,1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BT1]], [[P1_TRANSPOSE]], [[DQ]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["2"]
; CHECK-DAG: "lhs_batch_dimensions":["0"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDPaddedF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[13,17] parameter(0)
y = <<F8E4M3>>[17,31] parameter(1)
x_f32 = f32[13,17] convert(x)
y_f32 = f32[17,31] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[13,17] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[17,31] broadcast(y_scale), dimensions={}
x_unscaled = f32[13,17] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[17,31] multiply(y_f32, y_scale_bcast)
ROOT out = f32[13,31] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[13,17], {{.*}}: <<F8E4M3>>[17,31], {{.*}}: f32[], {{.*}}: f32[]) -> f32[13,31] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[13,17]{1,0} parameter(0)
; CHECK-NEXT: [[C0:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P0_PADDED:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} pad([[P0]], [[C0]]), padding=0_3x0_15
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[17,31]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[31,17]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P1_TRANSPOSE_PADDED:%[^ ]+]] = <<F8E4M3>>[32,32]{1,0} pad([[P1_TRANSPOSE]], [[C1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[DOT_TUPLE:%[^ ]+]] = (f32[16,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PADDED]], [[P1_TRANSPOSE_PADDED]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK-NEXT: [[DOT:%[^ ]+]] = f32[16,32]{1,0} get-tuple-element([[DOT_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[13,31]{1,0} slice([[DOT]]), slice={[0:13], [0:31]}
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDBitcastF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[2,8,16] parameter(0)
y = <<F8E4M3>>[16,16] parameter(1)
x_f32 = f32[2,8,16] convert(x)
y_f32 = f32[16,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[2,8,16] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[2,8,16] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,16] multiply(y_f32, y_scale_bcast)
x_bitcast = f32[16,16] bitcast(x_unscaled)
ROOT out = f32[16,16] dot(x_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$lt$matmul$f8"}), 0)
.WithShape(F32, {16, 16})));
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDWithConvertF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
ROOT out = f32[16,16] dot(x_f32, y_f32), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDUnaryOpsF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[3] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[3] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[3] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[3] multiply(x_f32, x_scale_bcast)
zero = f32[] constant(0)
x_unscaled_padded = f32[30] pad(x_unscaled, zero), padding=0_27
x_unscaled_padded_bcast = f32[30,8,5] broadcast(x_unscaled_padded), dimensions={0}
x_unscaled_padded_bcast_sliced = f32[16,8,4] slice(x_unscaled_padded_bcast), slice={[2:18], [0:8], [0:4]}
x_unscaled_padded_bcast_sliced_reshaped = f32[16,32] reshape(x_unscaled_padded_bcast_sliced)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
ROOT out = f32[16,16] dot(x_unscaled_padded_bcast_sliced_reshaped, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[3], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[3]{0} parameter(0)
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[C0_CONVERT:%[^ ]+]] = <<F8E4M3>>[] convert([[C0]])
; CHECK-NEXT: [[P0_U0:%[^ ]+]] = <<F8E4M3>>[30]{0} pad([[P0]], [[C0_CONVERT]]), padding=0_27
; CHECK-NEXT: [[P0_U1:%[^ ]+]] = <<F8E4M3>>[30,8,5]{2,1,0} broadcast([[P0_U0]]), dimensions={0}
; CHECK-NEXT: [[P0_U2:%[^ ]+]] = <<F8E4M3>>[16,8,4]{2,1,0} slice([[P0_U1]]), slice={[2:18], [0:8], [0:4]}
; CHECK-NEXT: [[P0_U3:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} reshape([[P0_U2]])
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_U3]], [[P1_TRANSPOSE]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
UnscaledABUnscaledDUnaryOpsWithConvertF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[3] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[3] convert(x)
y_f32 = f32[32,16] convert(y)
zero = f32[] constant(0)
x_padded = f32[30] pad(x_f32, zero), padding=0_27
x_padded_bcast = f32[30,8,5] broadcast(x_padded), dimensions={0}
x_padded_bcast_sliced = f32[16,8,4] slice(x_padded_bcast), slice={[2:18], [0:8], [0:4]}
x_padded_bcast_sliced_reshaped = f32[16,32] reshape(x_padded_bcast_sliced)
ROOT out = f32[16,16] dot(x_padded_bcast_sliced_reshaped, y_f32), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[3], {{.*}}: <<F8E4M3>>[32,16]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[3]{0} parameter(0)
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[C0_CONVERT:%[^ ]+]] = <<F8E4M3>>[] convert([[C0]])
; CHECK-NEXT: [[P0_U0:%[^ ]+]] = <<F8E4M3>>[30]{0} pad([[P0]], [[C0_CONVERT]]), padding=0_27
; CHECK-NEXT: [[P0_U1:%[^ ]+]] = <<F8E4M3>>[30,8,5]{2,1,0} broadcast([[P0_U0]]), dimensions={0}
; CHECK-NEXT: [[P0_U2:%[^ ]+]] = <<F8E4M3>>[16,8,4]{2,1,0} slice([[P0_U1]]), slice={[2:18], [0:8], [0:4]}
; CHECK-NEXT: [[P0_U3:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} reshape([[P0_U2]])
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C2:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_U3]], [[P1_TRANSPOSE]], [[C2]], [[C2]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDDynamicSliceF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[32,32] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
zero = s32[] constant(0)
x_f32 = f32[32,32] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[32,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[32,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
dyn_slice = f32[16,32]{1,0} dynamic-slice(x_unscaled, zero, zero), dynamic_slice_sizes={16,32}
ROOT dot_a = f32[16,16] dot(dyn_slice, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[32,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[32,32]{1,0} parameter(0)
; CHECK-NEXT: [[C0:%[^ ]+]] = s32[] constant(0)
; CHECK-NEXT: [[DYN_SLICE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} dynamic-slice([[P0]], [[C0]], [[C0]]), dynamic_slice_sizes={16,32}
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[DYN_SLICE]], [[P1]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDSelectF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
k = pred[16,32] parameter(4)
c = f32[] constant(0)
c_bcast = f32[16,32] broadcast(c), dimensions={}
select_a = f32[16,32] select(k, y_unscaled, c_bcast)
ROOT dot_a = f32[16,16] dot(x_unscaled, select_a), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: pred[16,32]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P4:%[^ ]+]] = pred[16,32]{1,0} parameter(4)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[C0_BCAST:%[^ ]+]] = f32[16,32]{1,0} broadcast([[C0]]), dimensions={}
; CHECK-NEXT: [[C0_CONVERT:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} convert([[C0_BCAST]])
; CHECK-NEXT: [[SELECT:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} select([[P4]], [[P1]], [[C0_CONVERT]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[SELECT]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDSelectNonzeroConstantF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
k = pred[16,32] parameter(4)
c = f32[] constant(1)
c_bcast = f32[16,32] broadcast(c), dimensions={}
select_a = f32[16,32] select(k, y_unscaled, c_bcast)
ROOT dot_a = f32[16,16] dot(x_unscaled, select_a), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
}
TEST_P(ParameterizedFp8GemmRewriteTest, BatchedScaledABUnscaledDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[10,16,32] parameter(0)
y = <<F8E4M3>>[10,32,16] parameter(1)
x_f32 = f32[10,16,32] convert(x)
y_f32 = f32[10,32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[10,16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[10,32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[10,16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[10,32,16] multiply(y_f32, y_scale_bcast)
ROOT out = f32[10,16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[10,16,32], {{.*}}: <<F8E4M3>>[10,32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[10,16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[10,16,32]{2,1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[10,32,16]{2,1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[10,16,32]{2,1,0} transpose([[P1]]), dimensions={0,2,1}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[10,16,16]{2,1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["2"]
; CHECK-DAG: "rhs_contracting_dimensions":["2"]
; CHECK-DAG: "lhs_batch_dimensions":["0"]
; CHECK-DAG: "rhs_batch_dimensions":["0"]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABAlphaDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
k = f32[] constant(3.0)
k_bcast = f32[16,16] broadcast(k), dimensions={}
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[16,16] multiply(dot_a, k_bcast)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":3
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDReluActivationF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = f32[] constant(0)
c_bcast = f32[16,16] broadcast(c), dimensions={}
ROOT out = f32[16,16] maximum(dot_a, c_bcast)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDVectorBiasThenApproxGeluActivationF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_bf16 = bf16[16,32] convert(x)
y_bf16 = bf16[32,16] convert(y)
x_scale = bf16[] parameter(2)
y_scale = bf16[] parameter(3)
bias = bf16[16] parameter(4)
x_scale_bcast = bf16[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = bf16[32,16] broadcast(y_scale), dimensions={}
x_unscaled = bf16[16,32] multiply(x_bf16, x_scale_bcast)
y_unscaled = bf16[32,16] multiply(y_bf16, y_scale_bcast)
dot1 = bf16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
b_bcast = bf16[16,16] broadcast(bias), dimensions={1}
dot = bf16[16,16] add(dot1, b_bcast)
mul.0 = bf16[16,16] multiply(dot, dot)
mul.1 = bf16[16,16] multiply(dot, mul.0)
const.0 = bf16[] constant(0.044715)
bcast.0 = bf16[16,16] broadcast(const.0), dimensions={}
mul.2 = bf16[16,16] multiply(mul.1, bcast.0)
add.0 = bf16[16,16] add(dot, mul.2)
const.1 = bf16[] constant(0.797884583)
bcast.1 = bf16[16,16] broadcast(const.1), dimensions={}
mul.3 = bf16[16,16] multiply(add.0, bcast.1)
tanh = bf16[16,16] tanh(mul.3)
const.2 = bf16[] constant(1)
bcast.2 = bf16[16,16] broadcast(const.2), dimensions={}
add.2 = bf16[16,16] add(tanh, bcast.2)
const.3 = bf16[] constant(0.5)
bcast.3 = bf16[16,16] broadcast(const.3), dimensions={}
mul.4 = bf16[16,16] multiply(add.2, bcast.3)
ROOT out = bf16[16,16] multiply(dot, mul.4)
}
)";
CheckFp8IfSupported(hlo_text);
if ((IsCuda() && GetToolkitVersion() >= se::SemanticVersion{12, 4, 0}) ||
IsRocm()) {
std::string checks = R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: bf16[], {{.*}}: bf16[], {{.*}}: bf16[16]) -> bf16[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = bf16[] parameter(2)
; CHECK-NEXT: [[XS:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = bf16[] parameter(3)
; CHECK-NEXT: [[XS1:%[^ ]+]] = f32[] convert([[P3]])
)";
if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) {
checks +=
R"(; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]]),
)";
} else {
checks += R"(; CHECK-NEXT: [[B:%[^ ]+]] = bf16[16]{0} parameter(4)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]], [[B]]),
)";
}
checks += R"(; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
)";
if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) {
checks +=
R"(; CHECK-GCN-DAG: "epilogue":"DEFAULT"
)";
} else {
checks +=
R"(; CHECK-DAG: "epilogue":"BIAS_GELU"
)";
}
checks += R"(; CHECK: }
)";
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
checks);
}
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDApproxGeluActivationF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_bf16 = bf16[16,32] convert(x)
y_bf16 = bf16[32,16] convert(y)
x_scale = bf16[] parameter(2)
y_scale = bf16[] parameter(3)
x_scale_bcast = bf16[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = bf16[32,16] broadcast(y_scale), dimensions={}
x_unscaled = bf16[16,32] multiply(x_bf16, x_scale_bcast)
y_unscaled = bf16[32,16] multiply(y_bf16, y_scale_bcast)
dot = bf16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
mul.0 = bf16[16,16] multiply(dot, dot)
mul.1 = bf16[16,16] multiply(dot, mul.0)
const.0 = bf16[] constant(0.044715)
bcast.0 = bf16[16,16] broadcast(const.0), dimensions={}
mul.2 = bf16[16,16] multiply(mul.1, bcast.0)
add.0 = bf16[16,16] add(dot, mul.2)
const.1 = bf16[] constant(0.797884583)
bcast.1 = bf16[16,16] broadcast(const.1), dimensions={}
mul.3 = bf16[16,16] multiply(add.0, bcast.1)
tanh = bf16[16,16] tanh(mul.3)
const.2 = bf16[] constant(1)
bcast.2 = bf16[16,16] broadcast(const.2), dimensions={}
add.2 = bf16[16,16] add(tanh, bcast.2)
const.3 = bf16[] constant(0.5)
bcast.3 = bf16[16,16] broadcast(const.3), dimensions={}
mul.4 = bf16[16,16] multiply(add.2, bcast.3)
ROOT out = bf16[16,16] multiply(dot, mul.4)
}
)";
CheckFp8IfSupported(hlo_text);
if ((IsCuda() && GetToolkitVersion() >= se::SemanticVersion{12, 4, 0}) ||
IsRocm()) {
std::string checks =
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: bf16[], {{.*}}: bf16[]) -> bf16[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = bf16[] parameter(2)
; CHECK-NEXT: [[XS:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = bf16[] parameter(3)
; CHECK-NEXT: [[XS1:%[^ ]+]] = f32[] convert([[P3]])
)";
if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) {
checks +=
R"(; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]]),
)";
} else {
checks +=
R"(; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]]),
)";
}
checks += R"(; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
)";
if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) {
checks += R"(; CHECK-GCN-DAG: "epilogue":"DEFAULT"
)";
} else {
checks += R"(; CHECK-DAG: "epilogue":"GELU"
)";
}
checks += R"(; CHECK: }
)";
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
checks);
}
}
TEST_P(ParameterizedFp8GemmRewriteTest, InvScaledABUnscaledDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] divide(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] divide(y_f32, y_scale_bcast)
ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDMatrixBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
b = f32[16,16] parameter(2)
one = f32[] constant(1)
ones = f32[16,16] broadcast(one), dimensions={}
b_ones = f32[16,16] add(b, ones)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(3)
y_scale = f32[] parameter(4)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = add(dot_a, b_ones)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[16,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK: [[C0:%[^ ]+]] = f32[16,16]{1,0} add({{.*}})
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C0]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: output_to_operand_aliasing={
; CHECK-SAME: {0}: (2, {})
; CHECK-SAME: }
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDMatrixBiasPaddedF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[14,31] parameter(0)
y = <<F8E4M3>>[31,14] parameter(1)
b = f32[14,14] parameter(2)
x_f32 = f32[14,31] convert(x)
y_f32 = f32[31,14] convert(y)
x_scale = f32[] parameter(3)
y_scale = f32[] parameter(4)
x_scale_bcast = f32[14,31] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[31,14] broadcast(y_scale), dimensions={}
x_unscaled = f32[14,31] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[31,14] multiply(y_f32, y_scale_bcast)
dot_a = f32[14,14] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = add(dot_a, b)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[14,31], {{.*}}: <<F8E4M3>>[31,14], {{.*}}: f32[14,14], {{.*}}: f32[], {{.*}}: f32[]) -> f32[14,14] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[14,31]{1,0} parameter(0)
; CHECK-NEXT: [[C0:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P0_PADDED:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} pad([[P0]], [[C0]]), padding=0_2x0_1
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[31,14]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[14,31]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P1_TRANSPOSE_PADDED:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} pad([[P1_TRANSPOSE]], [[C1]]), padding=0_2x0_1
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[14,14]{1,0} parameter(2)
; CHECK-NEXT: [[C2:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[P2_PADDED:%[^ ]+]] = f32[16,16]{1,0} pad([[P2]], [[C2]]), padding=0_2x0_2
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4)
; CHECK-NEXT: [[DOT_TUPLE:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PADDED]], [[P1_TRANSPOSE_PADDED]], [[P2_PADDED]], [[P3]], [[P4]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[DOT:%[^ ]+]] = f32[16,16]{1,0} get-tuple-element([[DOT_TUPLE]]), index=0
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[14,14]{1,0} slice([[DOT]]), slice={[0:14], [0:14]}
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABScaledDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
z_scale = f32[] parameter(2)
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[16,16] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> <<F8E4M3>>[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P2_INV:%[^ ]+]] = f32[] divide([[C0]], [[P2]])
; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[C2:%[^ ]+]] = f32[] constant(1)
; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_INV]], [[C1]], [[C2]]),
; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_INV]], [[C1]], [[C2]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABScaledF32DF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
z_scale = f32[] parameter(2)
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> f32[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P2_INV:%[^ ]+]] = f32[] divide([[C0]], [[P2]])
; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_INV]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABInvScaledF32DF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
z_scale = f32[] parameter(2)
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT dot_a_scaled = f32[16,16] multiply(dot_a, z_scale_bcast)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> f32[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[C0]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABScaledF32DMatrixBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
b = f32[16,16] parameter(2)
z_scale = f32[] parameter(3)
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bias = f32[16,16] add(dot_a, b)
ROOT dot_a_scaled = f32[16,16] divide(dot_a_bias, z_scale_bcast)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> f32[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[16,16]{1,0} parameter(2)
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1)
; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[C0]], [[C0]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK-PTX-NEXT: [[GEMM:%[^ ]+]] = f32[16,16]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
; CHECK-PTX-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-PTX-NEXT: [[P3_BCAST:%[^ ]+]] = f32[16,16]{1,0} broadcast([[P3]]), dimensions={}
; CHECK-PTX-NEXT: ROOT [[OUT:%[^ ]+]] = f32[16,16]{1,0} divide([[GEMM]], [[P3_BCAST]])
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
z_scale = f32[] parameter(4)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast)
c1 = f32[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f32[16,16] broadcast(c1), dimensions={}
c2 = f32[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f32[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> <<F8E4M3>>[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1)
; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4)
; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]])
; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]),
; CHECK-GCN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABInvScaledDF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
z_scale = f32[] parameter(4)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_scaled = f32[16,16] multiply(dot_a, z_scale_bcast)
c1 = f32[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f32[16,16] broadcast(c1), dimensions={}
c2 = f32[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f32[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-NOT: divide
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDReluActivationF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
z_scale = f32[] parameter(4)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
c = f32[] constant(0)
c_bcast = f32[16,16] broadcast(c), dimensions={}
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
relu_a = f32[16,16] maximum(dot_a, c_bcast)
relu_a_scaled = f32[16,16] divide(relu_a, z_scale_bcast)
c1 = f32[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f32[16,16] broadcast(c1), dimensions={}
c2 = f32[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f32[16,16] broadcast(c2), dimensions={}
relu_a_clamped = f32[16,16] clamp(c1_bcast, relu_a_scaled, c2_bcast)
ROOT out = <<F8E4M3>>[16,16] convert(relu_a_clamped)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> <<F8E4M3>>[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1)
; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4)
; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]])
; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]),
; CHECK-CGN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDMatrixBiasWithDAmaxF8) {
const char* hlo_text = R"(
HloModule test
apply {
a = f16[] parameter(0)
b = f16[] parameter(1)
ROOT c = f16[] maximum(a, b)
}
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f16 = f16[16,32] convert(x)
y_f16 = f16[32,16] convert(y)
b = f16[16,16] parameter(2)
one = f16[] constant(1)
ones = f16[16,16] broadcast(one), dimensions={}
b_ones = f16[16,16] add(b, ones)
x_scale = f16[] parameter(3)
y_scale = f16[] parameter(4)
z_scale = f16[] parameter(5)
x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f16[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast)
y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast)
dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bias = f16[16,16] add(dot_a, b_ones)
abs_dot_a = f16[16,16] abs(dot_a_bias)
c0 = f16[] constant(-inf)
amax = f16[] reduce(abs_dot_a, c0), dimensions={0,1}, to_apply=apply
dot_a_scaled = f16[16,16] divide(dot_a_bias, z_scale_bcast)
c1 = f16[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f16[16,16] broadcast(c1), dimensions={}
c2 = f16[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f16[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f16[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
ROOT result = (<<F8E4M3>>[16,16], f16[]) tuple(dot_a_f8, amax)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{0.1, 0.1});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16,16], {{.*}}: f16[], {{.*}}: f16[], {{.*}}: f16[]) -> (<<F8E4M3>>[16,16], f16[]) {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK: [[C0:%[^ ]+]] = f16[16,16]{1,0} add({{.*}})
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3)
; CHECK: [[P3:%[^ ]+]] = f16[] parameter(4)
; CHECK-PTX: [[P4:%[^ ]+]] = f16[] parameter(5)
; CHECK-PTX: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C0]], [[DUMMY0:%[^ ]+]], [[DUMMY1:%[^ ]+]], [[DUMMY2:%[^ ]+]]),
; CHECK-NOT: output_to_operand_aliasing
; CHECK-GCN: [[OUT:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C0]], [[DUMMY0:%[^ ]+]], [[DUMMY1:%[^ ]+]], [[DUMMY2:%[^ ]+]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDVectorBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f16 = f16[16,32] convert(x)
y_f16 = f16[32,16] convert(y)
b = f16[16] parameter(2)
b_bcast = f16[16,16] broadcast(b), dimensions={1}
x_scale = f16[] parameter(3)
y_scale = f16[] parameter(4)
z_scale = f16[] parameter(5)
x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f16[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast)
y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast)
dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bias = f16[16,16] add(dot_a, b_bcast)
dot_a_scaled = f16[16,16] divide(dot_a_bias, z_scale_bcast)
c1 = f16[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f16[16,16] broadcast(c1), dimensions={}
c2 = f16[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f16[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f16[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{0.1, 0.1});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16], {{.*}}: f16[], {{.*}}: f16[], {{.*}}: f16[]) -> <<F8E4M3>>[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3)
; CHECK-NEXT: [[CV:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4)
; CHECK-NEXT: [[CV1:%[^ ]+]] = f32[] convert([[P3]])
; CHECK-NEXT: [[VB:%[^ ]+]] = f16[16]{0} parameter(2)
; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f16[] constant(1)
; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f16[] parameter(5)
; CHECK-PTX-NEXT: [[DV:%[^ ]+]] = f16[] divide([[C2]], [[P4]])
; CHECK-PTX-NEXT: [[CV2:%[^ ]+]] = f32[] convert([[DV]])
; CHECK-PTX: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[CV]], [[CV1]], [[VB]], [[CV2]]),
; CHECK-GCN: [[C:%[^ ]+]] = f32[] constant(1)
; CHECK-GCN: [[OUT:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[CV]], [[CV1]], [[C]], [[C]], [[VB]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF32VectorBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
b = f32[16] parameter(2)
b_bf16 = bf16[16] convert(b)
b_f32 = f32[16] convert(b_bf16)
b_bcast = f32[16,16] broadcast(b_f32), dimensions={1}
x_scale = f32[] parameter(3)
y_scale = f32[] parameter(4)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[16,16] add(dot_a, b_bcast)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4)
; CHECK-NEXT: [[VB:%[^ ]+]] = f32[16]{0} parameter(2)
; CHECK-NEXT: [[VBC:%[^ ]+]] = bf16[16]{0} convert([[VB]])
; CHECK: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[VBC]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDVectorBiasThenReluActivationF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
b = f16[16] parameter(2)
b_bcast = f16[16,16] broadcast(b), dimensions={1}
x_f32 = f16[16,32] convert(x)
y_f32 = f16[32,16] convert(y)
x_scale = f16[] parameter(3)
y_scale = f16[] parameter(4)
x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f16[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f16[32,16] multiply(y_f32, y_scale_bcast)
c = f16[] constant(0)
c_bcast = f16[16,16] broadcast(c), dimensions={}
dot_a0 = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a = f16[16,16] add(dot_a0, b_bcast)
ROOT out = f16[16,16] maximum(dot_a, c_bcast)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{2e-3, 0.});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16], {{.*}}: f16[], {{.*}}: f16[]) -> f16[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3)
; CHECK-NEXT: [[CV:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4)
; CHECK-NEXT: [[CV1:%[^ ]+]] = f32[] convert([[P3]])
; CHECK-NEXT: [[VB:%[^ ]+]] = f16[16]{0} parameter(2)
; CHECK : ROOT [[OUT:%[^ ]+]] = f16[16,16]{1,0} custom-call([[P0]], [[P1_TRANSPOSE]], [[CV]], [[CV1]], [[VB]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS_RELU"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, Rank3ScaledABUnscaledDVectorBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[4,16,16] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
b = f32[32] parameter(2)
b_f16 = f16[32] convert(b)
b_bcast = f16[4,16,32] broadcast(b_f16), dimensions={2}
x_f16 = f16[4,16,16] convert(x)
y_f16 = f16[16,32] convert(y)
x_scale = f16[] parameter(3)
y_scale = f16[] parameter(4)
x_scale_bcast = f16[4,16,16] broadcast(x_scale), dimensions={}
y_scale_bcast = f16[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f16[4,16,16] multiply(x_f16, x_scale_bcast)
x_unscaled_bitcast = f16[64,16] bitcast(x_unscaled)
y_unscaled = f16[16,32] multiply(y_f16, y_scale_bcast)
dot_a = f16[64,32] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bitcast = f16[4,16,32]{2,1,0} bitcast(dot_a)
ROOT out = f16[4,16,32] add(dot_a_bitcast, b_bcast)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(m::GetTupleElement(
m::CustomCall({"__cublas$lt$matmul$f8"}), 0)
.WithShape(F16, {64, 32}))
.WithShape(F16, {4, 16, 32})));
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[4,16,16], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[32], {{.*}}: f16[], {{.*}}: f16[]) -> f16[4,16,32] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[4,16,16]{2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[64,16]{1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3)
; CHECK-NEXT: [[P2_CV:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4)
; CHECK-NEXT: [[P3_CV:%[^ ]+]] = f32[] convert([[P3]])
; CHECK-NEXT: [[B:%[^ ]+]] = f32[32]{0} parameter(2)
; CHECK-NEXT: [[B_F16:%[^ ]+]] = f16[32]{0} convert([[B]])
; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f16[64,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BITCAST]], [[P1_TRANSPOSE]], [[P2_CV]], [[P3_CV]], [[B_F16]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
; CHECK: [[GEMM:%[^ ]+]] = f16[64,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
; CHECK: ROOT [[OUT:%[^ ]+]] = f16[4,16,32]{2,1,0} bitcast([[GEMM]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
Rank3ScaledABUnscaledDVectorBiasPaddedF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[4,15,15] parameter(0)
y = <<F8E4M3>>[15,31] parameter(1)
b = f32[31] parameter(2)
b_f16 = f16[31] convert(b)
b_bcast = f16[4,15,31] broadcast(b_f16), dimensions={2}
x_f16 = f16[4,15,15] convert(x)
y_f16 = f16[15,31] convert(y)
x_scale = f16[] parameter(3)
y_scale = f16[] parameter(4)
x_scale_bcast = f16[4,15,15] broadcast(x_scale), dimensions={}
y_scale_bcast = f16[15,31] broadcast(y_scale), dimensions={}
x_unscaled = f16[4,15,15] multiply(x_f16, x_scale_bcast)
x_unscaled_bitcast = f16[60,15] bitcast(x_unscaled)
y_unscaled = f16[15,31] multiply(y_f16, y_scale_bcast)
dot_a = f16[60,31] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bitcast = f16[4,15,31]{2,1,0} bitcast(dot_a)
ROOT out = f16[4,15,31] add(dot_a_bitcast, b_bcast)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Bitcast(m::Slice(m::GetTupleElement(
m::CustomCall({"__cublas$lt$matmul$f8"}), 0)
.WithShape(F16, {64, 32}))
.WithShape(F16, {60, 31}))
.WithShape(F16, {4, 15, 31})));
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[4,15,15], {{.*}}: <<F8E4M3>>[15,31], {{.*}}: f32[31], {{.*}}: f16[], {{.*}}: f16[]) -> f16[4,15,31] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[4,15,15]{2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[60,15]{1,0} bitcast([[P0]])
; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P0_PAD:%[^ ]+]] = <<F8E4M3>>[64,16]{1,0} pad([[P0_BITCAST]], [[C1]]), padding=0_4x0_1
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[15,31]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[31,15]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C2:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P1_PAD:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} pad([[P1_TRANSPOSE]], [[C2]]), padding=0_1x0_1
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3)
; CHECK-NEXT: [[P2_CV:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4)
; CHECK-NEXT: [[P3_CV:%[^ ]+]] = f32[] convert([[P3]])
; CHECK-NEXT: [[B:%[^ ]+]] = f32[31]{0} parameter(2)
; CHECK-NEXT: [[B_F16:%[^ ]+]] = f16[31]{0} convert([[B]])
; CHECK-NEXT: [[C3:%[^ ]+]] = f16[] constant(0)
; CHECK-NEXT: [[P2_PAD:%[^ ]+]] = f16[32]{0} pad([[B_F16]], [[C3]]), padding=0_1
; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f16[64,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PAD]], [[P1_PAD]], [[P2_CV]], [[P3_CV]], [[P2_PAD]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"BIAS"
; CHECK: }
; CHECK: [[GEMM:%[^ ]+]] = f16[64,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
; CHECK-NEXT: [[SLICE:%[^ ]+]] = f16[60,31]{1,0} slice([[GEMM]]), slice={[0:60], [0:31]}
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f16[4,15,31]{2,1,0} bitcast([[SLICE]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, Rank3ScaledABUnscaledDMatrixBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[4,16,16] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
b = f32[4,16,32] parameter(2)
x_f32 = f32[4,16,16] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(3)
y_scale = f32[] parameter(4)
x_scale_bcast = f32[4,16,16] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[4,16,16] multiply(x_f32, x_scale_bcast)
x_unscaled_bitcast = f32[64,16] bitcast(x_unscaled)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
dot_a = f32[64,32] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bitcast = f32[4,16,32]{2,1,0} bitcast(dot_a)
ROOT out = f32[4,16,32] add(dot_a_bitcast, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(m::GetTupleElement(
m::CustomCall({"__cublas$lt$matmul$f8"}), 0)
.WithShape(F32, {64, 32}))
.WithShape(F32, {4, 16, 32})));
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[4,16,16], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[4,16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[4,16,32] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[4,16,16]{2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[64,16]{1,0} bitcast([[P0]])
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[B:%[^ ]+]] = f32[4,16,32]{2,1,0} parameter(2)
; CHECK-NEXT: [[B_BITCAST:%[^ ]+]] = f32[64,32]{1,0} bitcast([[B]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4)
; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[64,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BITCAST]], [[P1_TRANSPOSE]], [[B_BITCAST]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[GEMM:%[^ ]+]] = f32[64,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
; CHECK: ROOT [[OUT:%[^ ]+]] = f32[4,16,32]{2,1,0} bitcast([[GEMM]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
Rank3ScaledABUnscaledDMatrixBiasPaddedF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[3,15,15] parameter(0)
y = <<F8E4M3>>[15,31] parameter(1)
b = f32[3,15,31] parameter(2)
x_f32 = f32[3,15,15] convert(x)
y_f32 = f32[15,31] convert(y)
x_scale = f32[] parameter(3)
y_scale = f32[] parameter(4)
x_scale_bcast = f32[3,15,15] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[15,31] broadcast(y_scale), dimensions={}
x_unscaled = f32[3,15,15] multiply(x_f32, x_scale_bcast)
x_unscaled_bitcast = f32[45,15] bitcast(x_unscaled)
y_unscaled = f32[15,31] multiply(y_f32, y_scale_bcast)
dot_a = f32[45,31] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bitcast = f32[3,15,31]{2,1,0} bitcast(dot_a)
ROOT out = f32[3,15,31] add(dot_a_bitcast, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Bitcast(m::Slice(m::GetTupleElement(
m::CustomCall({"__cublas$lt$matmul$f8"}), 0)
.WithShape(F32, {48, 32}))
.WithShape(F32, {45, 31}))
.WithShape(F32, {3, 15, 31})));
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[3,15,15], {{.*}}: <<F8E4M3>>[15,31], {{.*}}: f32[3,15,31], {{.*}}: f32[], {{.*}}: f32[]) -> f32[3,15,31] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[3,15,15]{2,1,0} parameter(0)
; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[45,15]{1,0} bitcast([[P0]])
; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P0_PADDED:%[^ ]+]] = <<F8E4M3>>[48,16]{1,0} pad([[P0_BITCAST]], [[C1]]), padding=0_3x0_1
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[15,31]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[31,15]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[C2:%[^ ]+]] = <<F8E4M3>>[] constant(0)
; CHECK-NEXT: [[P1_PADDED:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} pad([[P1_TRANSPOSE]], [[C2]]), padding=0_1x0_1
; CHECK-NEXT: [[B:%[^ ]+]] = f32[3,15,31]{2,1,0} parameter(2)
; CHECK-NEXT: [[B_BITCAST:%[^ ]+]] = f32[45,31]{1,0} bitcast([[B]])
; CHECK-NEXT: [[C3:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[P2_PADDED:%[^ ]+]] = f32[48,32]{1,0} pad([[B_BITCAST]], [[C3]]), padding=0_3x0_1
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4)
; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[48,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PADDED]], [[P1_PADDED]], [[P2_PADDED]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK-NEXT: [[GEMM:%[^ ]+]] = f32[48,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
; CHECK-NEXT: [[SLICE:%[^ ]+]] = f32[45,31]{1,0} slice([[GEMM]]), slice={[0:45], [0:31]}
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[3,15,31]{2,1,0} bitcast([[SLICE]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDMatrixBiasWithSliceF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[48,16] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
b = f32[32,16] parameter(2)
x_f32 = f32[48,16] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(3)
y_scale = f32[] parameter(4)
x_scale_bcast = f32[48,16] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[48,16] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
dot_a = f32[48,32] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_sliced = f32[32,16] slice(dot_a), slice={[16:48], [16:32]}
ROOT out = f32[32,16] add(dot_a_sliced, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[48,16], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[32,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[48,16]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4)
; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[48,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[GEMM:%[^_]+]] = f32[48,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
; CHECK-NEXT: [[SLICE:%[^ ]+]] = f32[32,16]{1,0} slice([[GEMM]]), slice={[16:48], [16:32]}
; CHECK-NEXT: [[B:%[^ ]+]] = f32[32,16]{1,0} parameter(2)
; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[32,16]{1,0} add([[SLICE]], [[B]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDWithAllGatherF8) {
absl::string_view hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
all_gather = f32[16,64]{1,0} all-gather(x_unscaled), channel_id=1, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true
all_gather1 = f32[64,32]{1,0} all-gather(y_unscaled), channel_id=2, replica_groups={{0,2,4,6},{1,3,5,7}}, dimensions={0}, use_global_device_ids=true
ROOT dot_a = f32[16,32] dot(all_gather, all_gather1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(8);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,32] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK: [[AG:%[^ ]+]] = <<F8E4M3>>[16,64]{1,0} all-gather([[P0]]), {{[^ ]+}}
; CHECK: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK: [[AG1:%[^ ]+]] = <<F8E4M3>>[64,32]{1,0} all-gather([[P1]]), {{[^ ]+}}
; CHECK: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,64]{1,0} transpose([[AG1]]), dimensions={1,0}
; CHECK: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK: [[GEMM_TUPLE:%[^ ]+]] = (f32[16,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[AG]], [[P1_TRANSPOSE]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: ROOT [[GEMM:%[^_]+]] = f32[16,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0
)",
nullptr, &config);
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDWithAllToAllF8) {
absl::string_view hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
all_to_all = f32[16,32]{1,0} all-to-all(x_unscaled), channel_id=1, replica_groups={{0,1,2,3},{4,5,6,7}}, dimensions={0}
ROOT dot_a = f32[16,16] dot(all_to_all, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(8);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK: [[AA:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} all-to-all([[P0]]), {{[^ ]+}}
; CHECK: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK: [[GEMM:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[AA]], [[P1]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)",
nullptr, &config);
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDWithCollectivePermuteF8) {
absl::string_view hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[16,32] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[16,32] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast)
collective_permute = f32[16,32]{1,0} collective-permute(x_unscaled), source_target_pairs={{0,0}, {1,1}, {2,4}, {3,5}, {4,2}, {5,3}, {6,6}, {7,7}}
ROOT dot_a = f32[16,16] dot(collective_permute, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
)";
HloModuleConfig config = GetModuleConfigForTest();
config.set_use_spmd_partitioning(true);
config.set_num_partitions(8);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK: [[AA:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} collective-permute([[P0]]), {{[^ ]+}}
; CHECK: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1)
; CHECK: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK: [[GEMM:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[AA]], [[P1]], [[P2]], [[P3]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)",
nullptr, &config);
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDMatrixBiasThenVectorBiasF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f16 = f16[16,32] convert(x)
y_f16 = f16[32,16] convert(y)
b = f16[16] parameter(2)
b_bcast = f16[16,16] broadcast(b), dimensions={1}
b2 = f16[16,16] parameter(3)
x_scale = f16[] parameter(4)
y_scale = f16[] parameter(5)
x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast)
y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast)
dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot_a_bias1 = f16[16,16] add(dot_a, b2)
ROOT dot_a_bias = f16[16,16] add(dot_a_bias1, b_bcast)
}
)";
CheckFp8IfSupported(hlo_text, ErrorSpec{2e-3, 0.});
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16], {{.*}}: f16[16,16], {{.*}}: f16[], {{.*}}: f16[]) -> f16[16,16] {
; CHECK-DAG: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0}
; CHECK-NEXT: [[MB:%[^ ]+]] = f16[16,16]{1,0} parameter(3)
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(4)
; CHECK-NEXT: [[CV0:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(5)
; CHECK-NEXT: [[CV1:%[^ ]+]] = f32[] convert([[P3]])
; CHECK: [[GEMMOUT_TUPLE:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[MB]], [[CV0]], [[CV1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":1
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
; CHECK: [[GEMMOUT:%[^ ]+]] = f16[16,16]{1,0} get-tuple-element([[GEMMOUT_TUPLE]]), index=0
; CHECK: [[VB:%[^ ]+]] = f16[16]{0} parameter(2)
; CHECK: [[VBC:%[^ ]+]] = f16[16,16]{1,0} broadcast([[VB]]), dimensions={1}
; CHECK: ROOT [[OUT:%[^ ]+]] = f16[16,16]{1,0} add([[GEMMOUT]], [[VBC]])
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDWithDAmaxF8) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
z_scale = f32[] parameter(4)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
abs_dot_a = f32[16,16] abs(dot_a)
c0 = f32[] constant(-inf)
amax = f32[] reduce(abs_dot_a, c0), dimensions={0,1}, to_apply=apply
dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast)
c1 = f32[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f32[16,16] broadcast(c1), dimensions={}
c2 = f32[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f32[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
ROOT out = (<<F8E4M3>>[16,16], f32[]) tuple(dot_a_f8, amax)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> (<<F8E4M3>>[16,16], f32[]) {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1)
; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4)
; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]])
; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]),
; CHECK-GCN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABScaledDWithDAmaxF8WithF16Intermediates) {
const char* hlo_text = R"(
HloModule test
apply {
a = f16[] parameter(0)
b = f16[] parameter(1)
ROOT c = f16[] maximum(a, b)
}
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f16 = f16[16,32] convert(x)
y_f16 = f16[32,16] convert(y)
x_scale = f16[] parameter(2)
y_scale = f16[] parameter(3)
z_scale = f16[] parameter(4)
x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f16[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast)
y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast)
dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
abs_dot_a = f16[16,16] abs(dot_a)
c0 = f16[] constant(-inf)
amax = f16[] reduce(abs_dot_a, c0), dimensions={0,1}, to_apply=apply
dot_a_scaled = f16[16,16] divide(dot_a, z_scale_bcast)
c1 = f16[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f16[16,16] broadcast(c1), dimensions={}
c2 = f16[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f16[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f16[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
ROOT out = (<<F8E4M3>>[16,16], f16[]) tuple(dot_a_f8, amax)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[], {{.*}}: f16[], {{.*}}: f16[]) -> (<<F8E4M3>>[16,16], f16[]) {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(2)
; CHECK-NEXT: [[P2_CONVERT:%[^ ]+]] = f32[] convert([[P2]])
; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(3)
; CHECK-NEXT: [[P3_CONVERT:%[^ ]+]] = f32[] convert([[P3]])
; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f16[] constant(1)
; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f16[] parameter(4)
; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f16[] divide([[C2]], [[P4]])
; CHECK-PTX-NEXT: [[P4_INV_CONVERT:%[^ ]+]] = f32[] convert([[P4_INV]])
; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_CONVERT]], [[P3_CONVERT]], [[P4_INV_CONVERT]]),
; CHECK-CGN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_CONVERT]], [[P3_CONVERT]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABScaledDReluActivationWithDAmaxF8) {
const char* hlo_text = R"(
HloModule test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E4M3>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
z_scale = f32[] parameter(4)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
czero = f32[] constant(0)
czero_bcast = f32[16,16] broadcast(czero), dimensions={}
dot_a_relu = f32[16,16] maximum(dot_a, czero_bcast)
c0 = f32[] constant(-inf)
amax = f32[] reduce(dot_a_relu, c0), dimensions={0,1}, to_apply=apply
dot_a_scaled = f32[16,16] divide(dot_a_relu, z_scale_bcast)
c1 = f32[] constant(-<<F8E4M3_AMAX>>)
c1_bcast = f32[16,16] broadcast(c1), dimensions={}
c2 = f32[] constant(<<F8E4M3_AMAX>>)
c2_bcast = f32[16,16] broadcast(c2), dimensions={}
dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast)
dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped)
ROOT out = (<<F8E4M3>>[16,16], f32[]) tuple(dot_a_f8, amax)
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> (<<F8E4M3>>[16,16], f32[]) {
; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1)
; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]])
; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1)
; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4)
; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]])
; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]),
; CHECK-CGN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]),
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"RELU"
; CHECK: }
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDPrecisionF8) {
const char* raw_hlo_template = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[1600,3200] parameter(0)
y = <<F8E4M3>>[3200,1600] parameter(1)
x_f32 = f32[1600,3200] convert(x)
y_f32 = f32[3200,1600] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[1600,3200] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[3200,1600] broadcast(y_scale), dimensions={}
x_unscaled = f32[1600,3200] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[3200,1600] multiply(y_f32, y_scale_bcast)
ROOT out = f32[1600,1600] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={<<precision>>,<<precision>>}
}
)";
std::string hlo_template =
absl::StrReplaceAll(raw_hlo_template, replacements_);
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<precision>>"] = "default";
const auto hlo_text_default = absl::StrReplaceAll(hlo_template, replacements);
EXPECT_TRUE(RunAndCompare(hlo_text_default, ErrorSpec{1e-3, 1e-3}));
replacements["<<precision>>"] = "highest";
const auto hlo_text_highest = absl::StrReplaceAll(hlo_template, replacements);
EXPECT_TRUE(RunAndCompare(hlo_text_highest, ErrorSpec{1e-4, 1e-4}));
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF8Parameterized) {
std::array<std::array<absl::string_view, 7>, 32> combinations;
int i = 0;
for (bool d_is_col : {false, true}) {
for (bool a_is_col : {false, true}) {
for (bool b_is_col : {false, true}) {
for (int lhs_contracting_dim : {0, 1}) {
for (int rhs_contracting_dim : {0, 1}) {
const absl::string_view lcd =
lhs_contracting_dim == 1 ? "{1}" : "{0}";
const absl::string_view rcd =
rhs_contracting_dim == 1 ? "{1}" : "{0}";
const absl::string_view a_shape =
lhs_contracting_dim == 1 ? "[64,32]" : "[32,64]";
const absl::string_view b_shape =
rhs_contracting_dim == 0 ? "[32,16]" : "[16,32]";
const absl::string_view a_layout = a_is_col ? "{0,1}" : "{1,0}";
const absl::string_view b_layout = b_is_col ? "{0,1}" : "{1,0}";
const absl::string_view output_layout =
d_is_col ? "{0,1}" : "{1,0}";
combinations[i++] = std::array{
lcd, rcd, a_shape, b_shape, a_layout, b_layout, output_layout};
}
}
}
}
}
const char* hlo_template = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>><<Ashape>><<Alayout>> parameter(0)
x_f32 = f32<<Ashape>><<Alayout>> convert(x)
x_scale = f32[] parameter(2)
x_scale_bcast = f32<<Ashape>> broadcast(x_scale), dimensions={}
x_unscaled = f32<<Ashape>> multiply(x_f32, x_scale_bcast)
y = <<F8E4M3>><<Bshape>><<Blayout>> parameter(1)
y_f32 = f32<<Bshape>><<Blayout>> convert(y)
y_scale = f32[] parameter(3)
y_scale_bcast = f32<<Bshape>> broadcast(y_scale), dimensions={}
y_unscaled = f32<<Bshape>> multiply(y_f32, y_scale_bcast)
ROOT out = f32[64,16]<<Olayout>> dot(x_unscaled, y_unscaled), lhs_contracting_dims=<<Lcd>>, rhs_contracting_dims=<<Rcd>>
}
)";
for (const auto& combination : combinations) {
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<Lcd>>"] = std::get<0>(combination);
replacements["<<Rcd>>"] = std::get<1>(combination);
replacements["<<Ashape>>"] = std::get<2>(combination);
replacements["<<Bshape>>"] = std::get<3>(combination);
replacements["<<Alayout>>"] = std::get<4>(combination);
replacements["<<Blayout>>"] = std::get<5>(combination);
replacements["<<Olayout>>"] = std::get<6>(combination);
const auto hlo_text = absl::StrReplaceAll(hlo_template, replacements);
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
)");
}
}
TEST_P(ParameterizedFp8GemmRewriteTest,
ScaledABUnscaledDF8ParameterizedBatched) {
std::array<std::array<std::string, 7>, 32> combinations;
std::string lcd, rcd, a_shape, b_shape, a_layout, b_layout, o_layout;
int i = 0;
for (bool o_is_col : {false, true}) {
for (int lhs_contracting_dim : {2, 1}) {
for (int rhs_contracting_dim : {2, 1}) {
lcd = lhs_contracting_dim == 2 ? "{2}" : "{1}";
rcd = rhs_contracting_dim == 2 ? "{2}" : "{1}";
a_shape = lhs_contracting_dim == 2 ? "[2,64,32]" : "[2,32,64]";
b_shape = rhs_contracting_dim == 1 ? "[2,32,16]" : "[2,16,32]";
o_layout = o_is_col ? "{2, 0, 1}" : "{2, 1, 0}";
for (std::string a_layout : {"{2,1,0}", "{1,2,0}"}) {
for (std::string b_layout : {"{2,1,0}", "{1,2,0}"}) {
combinations[i++] = std::array{lcd, rcd, a_shape, b_shape,
a_layout, b_layout, o_layout};
}
}
}
}
}
const char* hlo_template = R"(
HloModule m
ENTRY f {
x_q = <<F8E4M3>><<Ashape>><<Alayout>> parameter(0)
x_scale = f32[] parameter(2)
x_scale_broadcast = f32<<Ashape>><<Alayout>> broadcast(x_scale), dimensions={}
x_q_convert = f32<<Ashape>><<Alayout>> convert(x_q)
x_qdq = f32<<Ashape>><<Alayout>> multiply(x_q_convert, x_scale_broadcast)
y_q = <<F8E4M3>><<Bshape>><<Blayout>> parameter(1)
y_scale = f32[] parameter(3)
y_scale_broadcast = f32<<Bshape>><<Blayout>> broadcast(y_scale), dimensions={}
y_q_convert = f32<<Bshape>><<Blayout>> convert(y_q)
y_qdq = f32<<Bshape>><<Blayout>> multiply(y_q_convert, y_scale_broadcast)
ROOT out = f32[2,64,16]<<Olayout>> dot(x_qdq, y_qdq), lhs_batch_dims={0}, lhs_contracting_dims=<<Lcd>>, rhs_batch_dims={0}, rhs_contracting_dims=<<Rcd>>
}
)";
for (const auto& combination : combinations) {
absl::flat_hash_map<std::string, std::string> replacements;
replacements["<<Lcd>>"] = std::get<0>(combination);
replacements["<<Rcd>>"] = std::get<1>(combination);
replacements["<<Ashape>>"] = std::get<2>(combination);
replacements["<<Bshape>>"] = std::get<3>(combination);
replacements["<<Alayout>>"] = std::get<4>(combination);
replacements["<<Blayout>>"] = std::get<5>(combination);
replacements["<<Olayout>>"] = std::get<6>(combination);
const auto hlo_text = absl::StrReplaceAll(hlo_template, replacements);
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
)");
}
}
TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF8TF32E5M2) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = <<F8E4M3>>[16,32] parameter(0)
y = <<F8E5M2>>[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckFp8IfSupported(hlo_text);
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK: custom_call_target="__cublas$lt$matmul$f8",
)");
}
TEST_P(ParameterizedFp8GemmRewriteTest, FnuzTypeF8) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
x = f8e4m3fnuz[16,32] parameter(0)
y = f8e4m3fnuz[32,16] parameter(1)
x_f32 = f32[16,32] convert(x)
y_f32 = f32[32,16] convert(y)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={}
y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={}
x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast)
y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast)
ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
if (IsCuda()) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GemmRewriter pass(
CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only});
TF_ASSERT_OK_AND_ASSIGN(bool changed,
this->RunHloPass(&pass, module.get()));
EXPECT_FALSE(changed);
return;
}
if (IsRocm()) {
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-2, 1e-2}));
RunAndFilecheckHloRewrite(
hlo_text,
GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(),
GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}),
R"(
; CHECK-LABEL: ENTRY %test ({{.*}}: f8e4m3fnuz[16,32], {{.*}}: f8e4m3fnuz[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f8e4m3fnuz[16,32]{1,0} parameter(0)
; CHECK-PTX-NEXT: [[P0_CV:%[^ ]+]] = f32[16,32]{1,0} convert([[P0]])
; CHECK-PTX-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-PTX-NEXT: [[P2_B:%[^ ]+]] = f32[16,32]{1,0} broadcast([[P2]]), dimensions={}
; CHECK-PTX-NEXT: [[P0_UNSCALED:%[^ ]+]] = f32[16,32]{1,0} multiply([[P0_CV]], [[P2_B]])
; CHECK-PTX-NEXT: [[P1:%[^ ]+]] = f8e4m3fnuz[32,16]{1,0} parameter(1)
; CHECK-PTX-NEXT: [[P1_CV:%[^ ]+]] = f32[32,16]{1,0} convert([[P1]])
; CHECK-PTX-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-PTX-NEXT: [[P3_B:%[^ ]+]] = f32[32,16]{1,0} broadcast([[P3]]), dimensions={}
; CHECK-PTX-NEXT: [[P1_UNSCALED:%[^ ]+]] = f32[32,16]{1,0} multiply([[P1_CV]], [[P3_B]])
; CHECK-PTX-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0_UNSCALED]], [[P1_UNSCALED]]),
; CHECK-GCN-NEXT: [[P1:%[^ ]+]] = f8e4m3fnuz[32,16]{1,0} parameter(1)
; CHECK-GCN-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]])
; CHECK-GCN-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2)
; CHECK-GCN-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3)
; CHECK-GCN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1)
; CHECK-PTX: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>",
; CHECK-GCN: custom_call_target="__cublas$lt$matmul$f8",
; CHECK: backend_config={
; CHECK-DAG: "alpha_real":1
; CHECK-DAG: "alpha_imag":0
; CHECK-DAG: "beta":0
; CHECK-DAG: "dot_dimension_numbers":{
; CHECK-DAG: "lhs_contracting_dimensions":["1"]
; CHECK-PTX-DAG: "rhs_contracting_dimensions":["0"]
; CHECK-GCN-DAG: "rhs_contracting_dimensions":["1"]
; CHECK-DAG: "lhs_batch_dimensions":[]
; CHECK-DAG: "rhs_batch_dimensions":[]
; CHECK-DAG: }
; CHECK-DAG: "precision_config":{
; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"]
; CHECK-DAG: }
; CHECK-DAG: "epilogue":"DEFAULT"
; CHECK: }
)");
}
}
INSTANTIATE_TEST_SUITE_P(Fp8CublasTestsBothLegacyAndLt,
ParameterizedFp8GemmRewriteTest, ::testing::Bool());
TEST_F(GemmRewriteTest, NoFuseBiasBroadcast) {
const char* hlo = R"(
HloModule module
ENTRY main.10 {
Arg_0.1 = f16[384,128]{1,0} parameter(0)
Arg_1.2 = f16[128,256]{1,0} parameter(1)
dot.4 = f16[384,256]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
Arg_2.3 = f16[256]{0} parameter(2)
reshape.5 = f16[1,256]{1,0} reshape(Arg_2.3)
broadcast.6 = f16[1,256]{1,0} broadcast(reshape.5), dimensions={0,1}
reshape.7 = f16[256]{0} reshape(broadcast.6)
broadcast.8 = f16[384,256]{1,0} broadcast(reshape.7), dimensions={1}
ROOT add.9 = f16[384,256]{1,0} add(dot.4, broadcast.8)
})";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(GemmRewriteTest, ReduceOfBatchDot) {
absl::string_view hlo_string =
R"(
HloModule test
region_5.50 {
Arg_0.51 = f32[] parameter(0)
Arg_1.52 = f32[] parameter(1)
ROOT add.53 = f32[] add(Arg_0.51, Arg_1.52)
}
ENTRY main {
p0 = bf16[3,32,3,13]{3,2,1,0} parameter(0)
p1 = bf16[3,32,3,64]{3,2,1,0} parameter(1)
dot.95 = bf16[3,3,13,64]{3,2,1,0} dot(p0, p1), lhs_batch_dims={0,2}, lhs_contracting_dims={1}, rhs_batch_dims={0,2}, rhs_contracting_dims={1}, operand_precision={highest,highest}
transpose.96 = bf16[3,64,3,13]{1,3,2,0} transpose(dot.95), dimensions={0,3,1,2}
convert.101 = f32[3,64,3,13]{1,3,2,0} convert(transpose.96)
constant.66 = f32[] constant(0.0)
ROOT reduce.102 = f32[3,64,13]{2,1,0} reduce(convert.101, constant.66), dimensions={2}, to_apply=region_5.50
}
)";
MatchOptimizedHlo(hlo_string, R"(
)");
}
TEST_F(GemmRewriteTest, DotWithBias) {
const char* hlo = R"(
HloModule m
ENTRY main {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
p2 = f32[1024,1024] parameter(2)
p3 = f32[1024,1024] parameter(3)
dot0 = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[1024,1024] dot(p2, p3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT root = f32[1024,1024] add(dot0, dot1)
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(
hlo,
GemmRewriter(
se::CudaComputeCapability{},
stream_executor::SemanticVersion{0, 0, 0},
GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only}),
expected);
}
TEST_F(GemmRewriteTest, DotWithoutBias) {
const char* hlo = R"(
HloModule m
ENTRY main {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
p2 = f32[1024,1024] parameter(2)
p3 = f32[1024,1024] parameter(3)
dot0 = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot1 = f32[1024,1024] dot(p2, p3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT root = f32[1024,1024] add(dot0, dot1)
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(
hlo,
GemmRewriter(
se::CudaComputeCapability{},
stream_executor::SemanticVersion{0, 0, 0},
GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only,
GemmRewriterOptions::BiasMode::kNoBias}),
expected);
}
TEST_F(CublasLtGemmRewriteTest, CublasLtSuccessfullyMatchesLargeC64Lhs) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
p0 = c64[2000,3000,3]{2,1,0} parameter(0)
p1 = c64[3,6]{1,0} parameter(1)
ROOT dot = c64[2000,3000,6]{2,1,0} dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
}
)";
if (IsCuda()) {
MatchOptimizedHlo(hlo_text,
R"(; CHECK: custom_call_target="__cublas$lt$matmul")");
} else {
MatchOptimizedHlo(hlo_text,
R"(; CHECK: custom_call_target="__cublas$gemm")");
}
}
TEST_F(CublasLtGemmRewriteTest, CublasLtOnlyMatchesLargeC64RhsPostAmpere) {
const char* hlo_text = R"(
HloModule test
ENTRY test {
p0 = c64[6,3]{1,0} parameter(0)
p1 = c64[3,2000,3000]{2,1,0} parameter(1)
ROOT dot = c64[6,2000,3000]{2,1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
if (HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) {
MatchOptimizedHlo(hlo_text,
R"(; CHECK: custom_call_target="__cublas$lt$matmul")");
} else {
MatchOptimizedHlo(
hlo_text, R"(; CHECK-NOT: custom_call_target="__cublas$lt$matmul")");
}
}
class GemmRewriteAllocationTest : public GpuCodegenTest {
public:
void CheckNumberOfAllocations(const std::string& hlo,
int expected_number_of_allocations) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(hlo));
if (allocator_ == nullptr) {
allocator_ = std::make_unique<se::StreamExecutorMemoryAllocator>(
backend().default_stream_executor());
}
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Executable> executable,
backend().compiler()->RunBackend(std::move(optimized_module),
backend().default_stream_executor(),
allocator_.get()));
GpuExecutable* gpu_executable =
static_cast<GpuExecutable*>(executable.get());
absl::Span<const BufferAllocation> allocations =
gpu_executable->GetAllocations();
ASSERT_EQ(allocations.size(), expected_number_of_allocations);
}
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0);
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
private:
std::unique_ptr<se::DeviceMemoryAllocator> allocator_;
};
TEST_F(GemmRewriteAllocationTest, SharedBufferAssignment) {
const char* hlo_text = R"(
HloModule SharedBufferAssignment
ENTRY AddDotsFunc {
x = f32[2,2] parameter(0)
y = f32[2,2] parameter(1)
bias = f32[2,2] add(x, y)
dot = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT out = f32[2,2] add(dot, bias)
}
)";
CheckNumberOfAllocations(hlo_text, 4);
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
}
class SmallDotGemmRewriteTest : public GemmRewriteTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_gemm_rewrite_size_threshold(100);
return debug_options;
}
};
TEST_F(SmallDotGemmRewriteTest, SkipSmallMatrixMultiplicationRewrite) {
const char* hlo_text = R"(
HloModule SkipSmallMatrixRewrite
ENTRY DotFunc {
x = f32[3,3] parameter(0)
y = f32[3,3] parameter(1)
ROOT out = f32[3,3] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %DotFunc ({{.*}}: f32[3,3], {{.*}}: f32[3,3]) -> f32[3,3] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,3]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,3]{1,0} parameter(1)
; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} dot([[P0]], [[P1]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
)");
}
TEST_F(SmallDotGemmRewriteTest, LargeMatrixMultiplicationIsRewritten) {
const char* hlo_text = R"(
HloModule SkipSmallMatrixRewrite
ENTRY DotFunc {
x = f32[8,8] parameter(0)
y = f32[8,8] parameter(1)
ROOT out = f32[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
MatchOptimizedHlo(hlo_text,
R"(
; CHECK-LABEL: ENTRY %DotFunc ({{.*}}: f32[8,8], {{.*}}: f32[8,8]) -> f32[8,8] {
; CHECK-NEXT: [[P0:%[^ ]+]] = f32[8,8]{1,0} parameter(0)
; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8,8]{1,0} parameter(1)
; CHECK: {{[^ ]+}} = {{.*}} custom-call([[P0]], [[P1]])
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
61bb902e-4e11-4c95-ab9f-f2ec2d85005e | cpp | tensorflow/tensorflow | graph_view_internal | tensorflow/core/grappler/utils/graph_view_internal.h | tensorflow/core/grappler/utils/graph_view_internal_test.cc | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_GRAPH_VIEW_INTERNAL_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_GRAPH_VIEW_INTERNAL_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace internal {
constexpr int kMissingSlot = -2;
constexpr int kMissingIndex = -1;
constexpr int kNodeNamePresent = -1;
template <typename NodeViewT, typename GraphViewT>
class NodeIndexAndPortIndex {
public:
NodeIndexAndPortIndex()
: graph_view_(nullptr),
node_index_(kMissingIndex),
port_index_(kMissingSlot) {}
NodeIndexAndPortIndex(GraphViewT* graph_view, int node_index, int port_index)
: graph_view_(graph_view),
node_index_(node_index),
port_index_(port_index) {}
bool operator==(const NodeIndexAndPortIndex& other) const {
return port_index_ == other.port_index_ &&
node_index_ == other.node_index_ && graph_view_ == other.graph_view_;
}
template <typename Hash>
friend Hash AbslHashValue(Hash h, const NodeIndexAndPortIndex& n) {
return Hash::combine(std::move(h), n.node_index_, n.port_index_);
}
NodeViewT* node_view() const {
if (graph_view_ == nullptr) {
return nullptr;
}
return graph_view_->GetNode(node_index_);
}
int node_index() const { return node_index_; }
int index() const { return port_index_; }
protected:
GraphViewT* graph_view_;
int node_index_;
int port_index_;
};
class NodeDefAndPortIndex {
public:
NodeDefAndPortIndex(const NodeDef* node_def, int port_index)
: node_def_(node_def), port_index_(port_index) {}
bool operator==(const NodeDefAndPortIndex& other) const {
return node_def_ == other.node_def_ && port_index_ == other.port_index_;
}
template <typename Hash>
friend Hash AbslHashValue(Hash h, const NodeDefAndPortIndex& n) {
return Hash::combine(std::move(h), n.node_def_, n.port_index_);
}
private:
const NodeDef* node_def_;
int port_index_;
};
template <typename FaninViewT, typename FanoutViewT, typename GraphViewT,
bool IsConst>
class NodeViewInternal {
private:
using NodeDefT =
typename std::conditional<IsConst, const NodeDef, NodeDef>::type;
public:
explicit NodeViewInternal(GraphViewT* graph_view, int node_index)
: graph_view_(graph_view),
node_index_(node_index),
attrs_(AttrSlice(graph_view->graph()->node(node_index))) {}
NodeViewInternal()
: graph_view_(nullptr), node_index_(kMissingIndex), attrs_(AttrSlice()) {}
virtual ~NodeViewInternal() {}
NodeViewInternal(NodeViewInternal&&) = default;
NodeViewInternal& operator=(NodeViewInternal&&) = default;
bool operator==(const NodeViewInternal& other) const {
return node_index_ == other.node_index_ && graph_view_ == other.graph_view_;
}
template <typename Hash>
friend Hash AbslHashValue(Hash h, const NodeViewInternal& n) {
return Hash::combine(std::move(h), n.node_index_);
}
virtual NodeDefT* node() const = 0;
int node_index() const { return node_index_; }
const string& GetName() const { return node()->name(); }
const string& GetOp() const { return node()->op(); }
const string& GetDevice() const { return node()->device(); }
const std::vector<FanoutViewT>& GetRegularFanins() const {
return regular_fanins_;
}
const FanoutViewT& GetRegularFanin(int i) const {
int regular_fanins_size = regular_fanins_.size();
if (i < 0 || i >= regular_fanins_size) {
return GetMissingFanin();
}
return regular_fanins_[i];
}
const std::vector<FanoutViewT>& GetControllingFanins() const {
return controlling_fanins_;
}
const std::vector<std::vector<FaninViewT>>& GetRegularFanouts() const {
return regular_fanouts_by_port_;
}
const std::vector<FaninViewT>& GetRegularFanout(int i) const {
int regular_fanouts_by_port_size = regular_fanouts_by_port_.size();
if (i < 0 || i >= regular_fanouts_by_port_size) {
return GetMissingFanout();
}
return regular_fanouts_by_port_[i];
}
const std::vector<FaninViewT>& GetControlledFanouts() const {
return controlled_fanouts_;
}
int NumRegularFanins() const { return regular_fanins_.size(); }
int NumControllingFanins() const { return controlling_fanins_.size(); }
int NumRegularFanouts() const { return num_regular_fanouts_; }
int NumControlledFanouts() const { return controlled_fanouts_.size(); }
virtual bool HasFanin(const FanoutViewT& fanin) const = 0;
virtual bool HasFanout(const FaninViewT& fanout) const = 0;
const AttrValue* GetAttr(absl::string_view attr_name) const {
return attrs_.Find(attr_name);
}
const AttrSlice& GetAttrs() const { return attrs_; }
int NumAttrs() const { return attrs_.size(); }
bool HasAttr(absl::string_view attr_name) const {
return attrs_.Find(attr_name) != nullptr;
}
protected:
virtual inline const FanoutViewT& GetMissingFanin() const = 0;
virtual inline const std::vector<FaninViewT>& GetMissingFanout() const = 0;
std::vector<FanoutViewT> regular_fanins_;
std::vector<FanoutViewT> controlling_fanins_;
std::vector<std::vector<FaninViewT>> regular_fanouts_by_port_;
int num_regular_fanouts_ = 0;
std::vector<FaninViewT> controlled_fanouts_;
GraphViewT* graph_view_;
int node_index_;
AttrSlice attrs_;
};
template <typename NodeViewT, typename FaninViewT, typename FanoutViewT,
bool IsConst>
class GraphViewInternal {
private:
using GraphDefT =
typename std::conditional<IsConst, const GraphDef, GraphDef>::type;
public:
explicit GraphViewInternal(GraphDefT* graph) : graph_(graph) {}
virtual ~GraphViewInternal() {}
bool operator==(const GraphViewInternal& other) const {
return graph_ == other.graph_;
}
GraphDefT* graph() const { return graph_; }
const NodeViewT* GetNode(int node_index) const {
int nodes_size = nodes_.size();
if (node_index < 0 || node_index >= nodes_size) {
return nullptr;
}
return &nodes_[node_index];
}
NodeViewT* GetNode(int node_index) {
int nodes_size = nodes_.size();
if (node_index < 0 || node_index >= nodes_size) {
return nullptr;
}
return &nodes_[node_index];
}
const NodeViewT* GetNode(absl::string_view node_name) const {
auto it = node_index_by_name_.find(node_name);
if (it == node_index_by_name_.end()) {
return nullptr;
}
return &nodes_[it->second];
}
NodeViewT* GetNode(absl::string_view node_name) {
auto it = node_index_by_name_.find(node_name);
if (it == node_index_by_name_.end()) {
return nullptr;
}
return &nodes_[it->second];
}
const std::vector<NodeViewT>& GetNodes() const { return nodes_; }
bool HasNode(absl::string_view node_name) const {
return node_index_by_name_.contains(node_name);
}
int NumNodes() const { return nodes_.size(); }
protected:
void Reset() {
std::vector<NodeViewT>().swap(nodes_);
absl::flat_hash_map<absl::string_view, int>().swap(node_index_by_name_);
}
std::vector<NodeViewT> nodes_;
absl::flat_hash_map<absl::string_view, int> node_index_by_name_;
GraphDefT* graph_;
const FanoutViewT missing_fanin_;
const std::vector<FaninViewT> missing_fanout_;
};
inline SafeTensorId EmptyTensorId() {
return SafeTensorId("", internal::kMissingSlot);
}
inline bool IsEmptyTensorId(const TensorId tensor_id) {
return tensor_id.node().empty() &&
tensor_id.index() == internal::kMissingSlot;
}
template <typename GraphViewT>
struct NodeViewDiff {
explicit NodeViewDiff(GraphViewT* graph_view, int node_index)
: graph_view(graph_view), node_index(node_index) {}
GraphViewT* graph_view;
int node_index;
string name;
bool update_name = false;
string op;
bool update_op = false;
string device;
bool update_device = false;
std::vector<SafeTensorId> regular_inputs_to_add;
int num_regular_inputs_to_add = 0;
std::map<int, SafeTensorId> regular_inputs_to_update;
std::vector<bool> regular_inputs_to_remove;
int num_regular_inputs_to_remove = 0;
absl::flat_hash_set<string> controlling_inputs_to_add;
std::set<int> controlling_inputs_to_remove;
absl::flat_hash_map<string, AttrValue> attrs_to_add;
absl::flat_hash_set<string> attrs_to_remove;
absl::optional<AttrValueMap> processed_attrs;
};
template <typename GraphViewT>
inline bool UpdateName(NodeViewDiff<GraphViewT>* diff, absl::string_view name) {
if (diff->graph_view->GetNode(diff->node_index)->GetName() == name) {
diff->name.clear();
diff->update_name = false;
} else {
diff->name = string(name);
diff->update_name = true;
}
return true;
}
template <typename GraphViewT>
inline bool UpdateOp(NodeViewDiff<GraphViewT>* diff, absl::string_view op) {
if (diff->graph_view->GetNode(diff->node_index)->GetOp() == op) {
diff->op.clear();
diff->update_op = false;
} else {
diff->op = string(op);
diff->update_op = true;
}
return true;
}
template <typename GraphViewT>
inline bool UpdateDevice(NodeViewDiff<GraphViewT>* diff,
absl::string_view device) {
if (diff->graph_view->GetNode(diff->node_index)->GetDevice() == device) {
diff->device.clear();
diff->update_device = false;
} else {
diff->device = string(device);
diff->update_device = true;
}
return true;
}
template <typename T, typename U>
inline bool AddOrUpdateAtIndex(std::vector<T>* v, int i, const U& value,
const T& default_value) {
int v_size = v->size();
if (i > v_size) {
v->reserve(i + 1);
v->resize(i, default_value);
v->push_back({value});
} else if (i == v_size) {
v->push_back({value});
} else {
bool updated = (*v)[i] == default_value;
(*v)[i] = {value};
return updated;
}
return true;
}
template <typename GraphViewT>
inline bool CheckNodeNameExists(
absl::string_view node_name,
const absl::flat_hash_map<absl::string_view, int>& updated_node_names,
const GraphViewT* graph_view) {
auto it = updated_node_names.find(node_name);
if (it != updated_node_names.end()) {
return it->second == kNodeNamePresent;
}
return graph_view->HasNode(node_name);
}
template <typename GraphViewT>
inline bool AddOrUpdateRegularFanin(NodeViewDiff<GraphViewT>* diff, int index,
const TensorId& fanin) {
if (index < 0) {
return false;
}
auto* node_view = diff->graph_view->GetNode(diff->node_index);
const int num_regular_fanins = node_view->NumRegularFanins();
if (index < num_regular_fanins) {
const int relative_removal_index = num_regular_fanins - index - 1;
int diff_regular_inputs_to_remove_size =
diff->regular_inputs_to_remove.size();
if (relative_removal_index < diff_regular_inputs_to_remove_size &&
diff->regular_inputs_to_remove[relative_removal_index]) {
diff->regular_inputs_to_remove[relative_removal_index] = false;
--diff->num_regular_inputs_to_remove;
}
const auto& existing_fanin = node_view->GetRegularFanin(index);
if (existing_fanin.index() != fanin.index() ||
existing_fanin.node_view()->GetName() != fanin.node()) {
gtl::InsertOrUpdate(&diff->regular_inputs_to_update, index,
SafeTensorId(fanin));
}
} else {
const int relative_add_index = index - num_regular_fanins;
if (AddOrUpdateAtIndex(&diff->regular_inputs_to_add, relative_add_index,
fanin, EmptyTensorId())) {
++diff->num_regular_inputs_to_add;
}
}
return true;
}
template <typename GraphViewT>
inline bool RemoveRegularFanin(NodeViewDiff<GraphViewT>* diff, int index) {
if (index < 0) {
return false;
}
auto* node_view = diff->graph_view->GetNode(diff->node_index);
const int num_regular_fanins = node_view->NumRegularFanins();
if (index < num_regular_fanins) {
diff->regular_inputs_to_update.erase(index);
const int relative_removal_index = num_regular_fanins - index - 1;
if (AddOrUpdateAtIndex(&diff->regular_inputs_to_remove,
relative_removal_index,
true, false)) {
++diff->num_regular_inputs_to_remove;
}
} else {
const int relative_add_index = index - num_regular_fanins;
int diff_regular_inputs_to_add_size = diff->regular_inputs_to_add.size();
if (relative_add_index >= diff_regular_inputs_to_add_size ||
IsEmptyTensorId(diff->regular_inputs_to_add[relative_add_index])) {
return false;
}
diff->regular_inputs_to_add[relative_add_index] = EmptyTensorId();
--diff->num_regular_inputs_to_add;
}
return true;
}
template <typename GraphViewT>
inline bool AddControllingFanin(NodeViewDiff<GraphViewT>* diff,
int control_index,
absl::string_view fanin_node_name) {
if (control_index == kMissingIndex) {
diff->controlling_inputs_to_add.emplace(fanin_node_name);
} else {
diff->controlling_inputs_to_remove.erase(control_index);
}
return true;
}
template <typename GraphViewT>
inline bool RemoveControllingFanin(NodeViewDiff<GraphViewT>* diff,
int control_index,
absl::string_view fanin_node_name) {
if (control_index == kMissingIndex) {
diff->controlling_inputs_to_add.erase(fanin_node_name);
} else {
diff->controlling_inputs_to_remove.emplace(control_index);
}
return true;
}
template <typename GraphViewT>
inline bool AddOrUpdateAttribute(NodeViewDiff<GraphViewT>* diff,
absl::string_view attr_name,
const AttrValue& attr_value) {
diff->attrs_to_add.empty() ? 0 : diff->attrs_to_remove.erase(attr_name);
gtl::InsertOrUpdate(&diff->attrs_to_add, string(attr_name), attr_value);
return true;
}
template <typename GraphViewT>
inline bool RemoveAttribute(NodeViewDiff<GraphViewT>* diff,
absl::string_view attr_name) {
const size_t num_erased =
diff->attrs_to_add.empty() ? 0 : diff->attrs_to_add.erase(attr_name);
auto* node_view = diff->graph_view->GetNode(diff->node_index);
if (node_view->HasAttr(attr_name)) {
diff->attrs_to_remove.emplace(attr_name);
return true;
}
return num_erased > 0;
}
template <typename T>
inline void ResizeByTrimmingEndForValue(std::vector<T>* v, const T& value) {
int curr_index = v->size();
const int last_index = v->size() - 1;
for (int i = last_index; i >= 0; --i) {
if ((*v)[i] == value) {
curr_index = i;
} else {
break;
}
}
if (curr_index <= last_index) {
v->resize(curr_index);
}
}
template <typename GraphViewT>
inline bool IsEmpty(NodeViewDiff<GraphViewT>* diff) {
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_remove, false);
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_add, EmptyTensorId());
return !diff->update_name && !diff->update_op && !diff->update_device &&
diff->regular_inputs_to_add.empty() &&
diff->regular_inputs_to_update.empty() &&
diff->regular_inputs_to_remove.empty() &&
diff->controlling_inputs_to_add.empty() &&
diff->controlling_inputs_to_remove.empty() &&
diff->attrs_to_add.empty() && diff->attrs_to_remove.empty();
}
template <typename GraphViewT>
inline void Reset(NodeViewDiff<GraphViewT>* diff) {
diff->name.clear();
diff->update_name = false;
diff->op.clear();
diff->update_op = false;
diff->device.clear();
diff->update_device = false;
std::vector<SafeTensorId>().swap(diff->regular_inputs_to_add);
diff->num_regular_inputs_to_add = false;
std::map<int, SafeTensorId>().swap(diff->regular_inputs_to_update);
std::vector<bool>().swap(diff->regular_inputs_to_remove);
diff->num_regular_inputs_to_remove = 0;
absl::flat_hash_set<string>().swap(diff->controlling_inputs_to_add);
std::set<int>().swap(diff->controlling_inputs_to_remove);
absl::flat_hash_map<string, AttrValue>().swap(diff->attrs_to_add);
absl::flat_hash_set<string>().swap(diff->attrs_to_remove);
}
template <typename GraphViewT>
inline bool IsWellFormed(
NodeViewDiff<GraphViewT>* diff,
const absl::flat_hash_map<absl::string_view, int>& updated_node_names) {
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_remove, false);
ResizeByTrimmingEndForValue(&diff->regular_inputs_to_add, EmptyTensorId());
int diff_regular_inputs_to_add_size = diff->regular_inputs_to_add.size();
if (diff_regular_inputs_to_add_size != diff->num_regular_inputs_to_add) {
return false;
} else if (diff->num_regular_inputs_to_add > 0 &&
!diff->regular_inputs_to_remove.empty()) {
return false;
} else if (static_cast<int>(diff->regular_inputs_to_remove.size()) !=
diff->num_regular_inputs_to_remove) {
return false;
}
auto* node_view = diff->graph_view->GetNode(diff->node_index);
const string& node_name =
diff->update_name ? diff->name : node_view->GetName();
auto invalid_node_name = [&](absl::string_view fanin_node_name) -> bool {
return fanin_node_name == node_name ||
!CheckNodeNameExists(fanin_node_name, updated_node_names,
diff->graph_view);
};
if (diff->update_name) {
const int last_index =
node_view->NumRegularFanins() - diff->num_regular_inputs_to_remove - 1;
auto regular_to_update_it = diff->regular_inputs_to_update.begin();
for (int i = 0; i <= last_index; ++i) {
if (regular_to_update_it != diff->regular_inputs_to_update.end() &&
regular_to_update_it->first < i) {
++regular_to_update_it;
}
if (regular_to_update_it != diff->regular_inputs_to_update.end() &&
regular_to_update_it->first == i) {
if (invalid_node_name(regular_to_update_it->second.node())) {
return false;
}
} else {
const string& regular_name =
node_view->GetRegularFanin(i).node_view()->GetName();
if (regular_name == node_name) {
return false;
}
}
}
auto& controls = node_view->GetControllingFanins();
const int num_controls = controls.size();
auto control_to_remove_it = diff->controlling_inputs_to_remove.begin();
for (int i = 0; i < num_controls; ++i) {
if (control_to_remove_it != diff->controlling_inputs_to_remove.end() &&
*control_to_remove_it < i) {
++control_to_remove_it;
}
if (control_to_remove_it != diff->controlling_inputs_to_remove.end() &&
*control_to_remove_it == i) {
continue;
} else if (controls[i].node_view()->GetName() == node_name) {
return false;
}
}
} else {
for (const auto& updated : diff->regular_inputs_to_update) {
const string& fanin_name = updated.second.node();
if (invalid_node_name(fanin_name)) {
return false;
}
}
}
for (const auto& regular : diff->regular_inputs_to_add) {
if (invalid_node_name(regular.node())) {
return false;
}
}
for (const auto& control : diff->controlling_inputs_to_add) {
if (invalid_node_name(control)) {
return false;
}
}
return true;
}
template <typename GraphViewT>
struct NewNode {
explicit NewNode(GraphViewT* graph_view, NodeDef&& node)
: graph_view(graph_view), node(std::move(node)) {}
GraphViewT* graph_view;
NodeDef node;
std::vector<SafeTensorId> regular_fanins;
int num_regular_fanins = 0;
absl::flat_hash_set<string> controlling_fanins;
};
template <typename GraphViewT>
inline void UpdateName(NewNode<GraphViewT>* new_node, absl::string_view name) {
if (name.empty()) {
new_node->node.clear_name();
} else {
new_node->node.set_name(string(name));
}
}
template <typename GraphViewT>
inline void UpdateOp(NewNode<GraphViewT>* new_node, absl::string_view op) {
if (op.empty()) {
new_node->node.clear_op();
} else {
new_node->node.set_op(string(op));
}
}
template <typename GraphViewT>
inline void UpdateDevice(NewNode<GraphViewT>* new_node,
absl::string_view device) {
if (device.empty()) {
new_node->node.clear_device();
} else {
new_node->node.set_device(string(device));
}
}
template <typename GraphViewT>
inline void AddOrUpdateRegularFanin(NewNode<GraphViewT>* new_node, int index,
const TensorId& fanin) {
if (index < 0) {
return;
} else if (AddOrUpdateAtIndex(&new_node->regular_fanins, index, fanin,
EmptyTensorId())) {
++new_node->num_regular_fanins;
}
}
template <typename GraphViewT>
inline void RemoveRegularFanin(NewNode<GraphViewT>* new_node, int index) {
int new_node_regular_fanins_size = new_node->regular_fanins.size();
if (index < 0 || index >= new_node_regular_fanins_size ||
IsEmptyTensorId(new_node->regular_fanins[index])) {
return;
}
new_node->regular_fanins[index] = EmptyTensorId();
--new_node->num_regular_fanins;
}
template <typename GraphViewT>
inline void AddControllingFanin(NewNode<GraphViewT>* new_node,
absl::string_view fanin_node_name) {
new_node->controlling_fanins.emplace(fanin_node_name);
}
template <typename GraphViewT>
inline void RemoveControllingFanin(NewNode<GraphViewT>* new_node,
absl::string_view fanin_node_name) {
new_node->controlling_fanins.erase(fanin_node_name);
}
template <typename GraphViewT>
inline void AddOrUpdateAttribute(NewNode<GraphViewT>* new_node,
absl::string_view attr_name,
const AttrValue& attr_value) {
gtl::InsertOrUpdate(new_node->node.mutable_attr(), string(attr_name),
attr_value);
}
template <typename GraphViewT>
inline void RemoveAttribute(NewNode<GraphViewT>* new_node,
absl::string_view attr_name) {
new_node->node.mutable_attr()->erase(string(attr_name));
}
template <typename GraphViewT>
inline bool IsWellFormed(
NewNode<GraphViewT>* new_node,
const absl::flat_hash_map<absl::string_view, int>& updated_node_names) {
ResizeByTrimmingEndForValue(&new_node->regular_fanins, EmptyTensorId());
int new_node_regular_fanins_size = new_node->regular_fanins.size();
if (new_node_regular_fanins_size != new_node->num_regular_fanins) {
return false;
}
const string& node_name = new_node->node.name();
auto invalid_node_name = [new_node, updated_node_names,
node_name](absl::string_view fanin_node_name) {
return fanin_node_name == node_name ||
!CheckNodeNameExists(fanin_node_name, updated_node_names,
new_node->graph_view);
};
for (const auto& regular : new_node->regular_fanins) {
if (invalid_node_name(regular.node())) {
return false;
}
}
for (const auto& control : new_node->controlling_fanins) {
if (invalid_node_name(control)) {
return false;
}
}
return true;
}
}
}
}
}
#endif | #include "tensorflow/core/grappler/utils/graph_view_internal.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace utils {
namespace internal {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
constexpr char kNodeOp[] = "NotImportant";
GraphDef SimpleTestGraphForMutation() {
return GDef(
{NDef("a", kNodeOp, {}), NDef("b", kNodeOp, {}), NDef("c", kNodeOp, {}),
NDef("d", kNodeOp, {"a:2", "b:3", "a:4", "^c", "^b"},
{{"attr_1", "a"}, {"attr_2", 2.0f}}, "device_d")},
{});
}
absl::flat_hash_map<absl::string_view, int> GetUpdatedNodeNames(
const MutableGraphView* graph_view) {
absl::flat_hash_map<absl::string_view, int> updated_node_names;
updated_node_names.reserve(graph_view->NumNodes());
for (const auto& node_view : graph_view->GetNodes()) {
updated_node_names.emplace(node_view.GetName(), -1);
}
return updated_node_names;
}
using MutableNodeViewDiff = NodeViewDiff<MutableGraphView>;
TEST(MutableNodeViewDiffTest, UpdateName) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateName(&diff, "d");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, UpdateOp) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateOp(&diff, "RandomOp");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateOp(&diff, kNodeOp);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, UpdateDevice) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateDevice(&diff, "random_device");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
UpdateDevice(&diff, "device_d");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddOrUpdateRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, -1, {"a", 0});
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"a", 2});
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"a", 3});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"b", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 5, {"c", 5});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddOrUpdateRegularFaninBetweenRemovedFanins) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 0);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 2);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 1, {"c", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"c", 0});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 0);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 2, {"c", 2});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, -1);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 3);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"b", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"b", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 3);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 5, {"b", 6});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 4});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 3);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 5);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 1, {"a", 3});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 1, {"b", 3});
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveRegularFaninResize) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"c", 5});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 4, {"c", 6});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 5, {"c", 7});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 4);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 5);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, 0, "c");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveControllingFanin(&diff, 0, "c");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, 0, "c");
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, AddOrUpdateAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_1;
attr_1.set_b(true);
AddOrUpdateAttribute(&diff, "attr_1", attr_1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_3;
attr_3.set_i(4);
AddOrUpdateAttribute(&diff, "attr_1", attr_3);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RemoveAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_1;
attr_1.set_b(true);
AddOrUpdateAttribute(&diff, "attr_1", attr_1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveAttribute(&diff, "attr_1");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveAttribute(&diff, "attr_3");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, Reset) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 2);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "a");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AttrValue attr_1;
attr_1.set_b(true);
AddOrUpdateAttribute(&diff, "attr_1", attr_1);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
Reset(&diff);
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedWithRemovedAndAppendedFanins) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
RemoveRegularFanin(&diff, 2);
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"a", 8});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedSelfLoopRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"d", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedSelfLoopRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"d", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedSelfLoopControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "d");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedMissingFaninRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedMissingFaninRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedMissingControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedSelfLoopRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedSelfLoopRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"e", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedSelfLoopControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedMissingFaninRegularUpdate) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 0, {"f", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedMissingFaninRegularNew) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddOrUpdateRegularFanin(&diff, 3, {"f", 1});
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, IsWellFormedRenamedMissingFaninControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
AddControllingFanin(&diff, kMissingIndex, "f");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RenamedAndRemovedFanins) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
string old_node_name = "d";
string new_node_name = "e";
updated_node_names.erase(old_node_name);
updated_node_names.emplace(old_node_name, 3);
updated_node_names.emplace(new_node_name, -1);
UpdateName(&diff, "e");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
for (int i = 0; i < 3; ++i) {
RemoveRegularFanin(&diff, i);
}
RemoveControllingFanin(&diff, 0, "c");
RemoveControllingFanin(&diff, 0, "b");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
}
TEST(MutableNodeViewDiffTest, RenamedWithSelfLoopControl) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutableNodeView* d_node = graph_view.GetNode("d");
ASSERT_NE(d_node, nullptr);
MutableNodeViewDiff diff(&graph_view, d_node->node_index());
EXPECT_TRUE(IsEmpty(&diff));
EXPECT_TRUE(IsWellFormed(&diff, updated_node_names));
updated_node_names.erase("d");
UpdateName(&diff, "c");
EXPECT_FALSE(IsEmpty(&diff));
EXPECT_FALSE(IsWellFormed(&diff, updated_node_names));
}
using MutationNewNodeForTest = NewNode<MutableGraphView>;
TEST(MutationNewNodeTest, UpdateName) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, UpdateOp) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateOp(&new_node, "Identity");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateOp(&new_node, "");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, UpdateDevice) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateDevice(&new_node, "foo_device");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateDevice(&new_node, "");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, AddOrUpdateRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, -1, {"a", 1});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"a", 1});
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 0, {"b", 2});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 2, {"c", 3});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"d", 4});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"e", 5});
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"new", 6});
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"d", 4});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, RemoveRegularFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 0, {"a", 1});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 1, {"b", 2});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddOrUpdateRegularFanin(&new_node, 2, {"c", 3});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 3);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 2);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 0);
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
RemoveRegularFanin(&new_node, 1);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, AddControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "a");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "e");
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "new");
EXPECT_FALSE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "e");
RemoveControllingFanin(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, RemoveControllingFanin) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
UpdateName(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AddControllingFanin(&new_node, "a");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "e");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "new");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveControllingFanin(&new_node, "a");
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, AddOrUpdateAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
string attr_name = "attr_name";
AttrValue attr_1;
attr_1.set_i(8);
AddOrUpdateAttribute(&new_node, attr_name, attr_1);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
AttrValue attr_2;
attr_2.set_f(2.0f);
AddOrUpdateAttribute(&new_node, attr_name, attr_2);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
TEST(MutationNewNodeTest, RemoveAttribute) {
GraphDef graph = SimpleTestGraphForMutation();
Status s;
MutableGraphView graph_view(&graph, &s);
TF_ASSERT_OK(s);
auto updated_node_names = GetUpdatedNodeNames(&graph_view);
MutationNewNodeForTest new_node(&graph_view, {});
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
string attr_name = "attr_name";
AttrValue attr_1;
attr_1.set_i(8);
AddOrUpdateAttribute(&new_node, attr_name, attr_1);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveAttribute(&new_node, attr_name);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
RemoveAttribute(&new_node, attr_name);
EXPECT_TRUE(IsWellFormed(&new_node, updated_node_names));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/graph_view_internal.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/graph_view_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
13af8cfa-a80f-4abd-b903-d4e5aa058635 | cpp | tensorflow/tensorflow | while_loop_trip_count_annotator | third_party/xla/xla/service/while_loop_trip_count_annotator.cc | third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc | #include "xla/service/while_loop_trip_count_annotator.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> WhileLoopTripCountAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const HloComputation* comp : module->computations(execution_threads)) {
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() != HloOpcode::kWhile) {
continue;
}
if (auto trip_count = ComputeWhileLoopTripCount(instr)) {
WhileLoopBackendConfig config;
config.mutable_known_trip_count()->set_n(*trip_count);
TF_RETURN_IF_ERROR(instr->set_backend_config(config));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/while_loop_trip_count_annotator.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class TripCountAnnotatorTest : public HloTestBase {};
TEST_F(TripCountAnnotatorTest, KnownSmallTripCount) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(10)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(0)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(10, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, KnownLargeTripCount) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(0)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(1000000, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, NonzeroStart) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LT
}
ENTRY test {
i_start = s32[] constant(10)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(999990, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, LessThanOrEqualTo) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
one = s32[] constant(1)
i_plus_one = s32[] add(i, one)
ROOT tuple = (s32[]) tuple(i_plus_one)
}
Cond {
param = (s32[]) parameter(0)
i = s32[] get-tuple-element(param), index=0
trip_count = s32[] constant(1000000)
ROOT done = pred[] compare(i, trip_count), direction=LE
}
ENTRY test {
i_start = s32[] constant(10)
initial_tuple = (s32[]) tuple(i_start)
ROOT while = (s32[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(auto config,
m->entry_computation()
->root_instruction()
->backend_config<WhileLoopBackendConfig>());
EXPECT_EQ(999991, config.known_trip_count().n());
}
TEST_F(TripCountAnnotatorTest, Int64Overflow) {
const char* kModuleStr = R"(
HloModule test
Body {
param = (s64[]) parameter(0)
i = s64[] get-tuple-element(param), index=0
one = s64[] constant(1)
i_plus_one = s64[] add(i, one)
ROOT tuple = (s64[]) tuple(i_plus_one)
}
Cond {
param = (s64[]) parameter(0)
i = s64[] get-tuple-element(param), index=0
trip_count = s64[] constant(9223372036854775807)
ROOT done = pred[] compare(i, trip_count), direction=LE
}
ENTRY test {
i_start = s64[] constant(-9223372036854775808)
initial_tuple = (s64[]) tuple(i_start)
ROOT while = (s64[]) while(initial_tuple), condition=Cond, body=Body
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kModuleStr));
WhileLoopTripCountAnnotator pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, m.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_trip_count_annotator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a10386a-666e-4dfa-b3e2-222f710c08c7 | cpp | tensorflow/tensorflow | random_dataset_op | tensorflow/core/kernels/data/experimental/random_dataset_op.cc | tensorflow/core/kernels/data/experimental/random_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/random_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const RandomDatasetOp::kDatasetType;
constexpr const char* const RandomDatasetOp::kSeed;
constexpr const char* const RandomDatasetOp::kSeed2;
constexpr const char* const RandomDatasetOp::kOutputTypes;
constexpr const char* const RandomDatasetOp::kOutputShapes;
constexpr const char* const
RandomDatasetOp::kRerandomizeEachIteration;
namespace {
constexpr char kRandomDatasetV1[] = "RandomDataset";
constexpr char kRandomDatasetV2[] = "RandomDatasetV2";
constexpr char kSeedGenerator[] = "SeedGenerator";
constexpr char kEpochNumRandomSamples[] = "epoch_num_random_samples";
constexpr char kNumRandomSamples[] = "num_random_samples";
}
class RandomDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, RandomSeeds&& seeds,
SeedGeneratorManager* manager, ResourceHandle&& resource_handle,
bool owns_resource, int op_version)
: DatasetBase(DatasetContext(ctx)),
seeds_(std::move(seeds)),
op_version_(op_version),
manager_(manager),
resource_handle_(resource_handle),
resource_mgr_(ctx->resource_manager()),
owns_resource_(owns_resource) {}
~Dataset() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<SeedGeneratorManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete RNG resource: " << s;
}
}
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(std::make_unique<IndexSplitProvider>(kint64max));
return absl::OkStatus();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(
Iterator::Params{this, strings::StrCat(prefix, "::Random")},
manager_->get().get());
}
const DataTypeVector& output_dtypes() const override {
static DataTypeVector* dtypes = new DataTypeVector({DT_INT64});
return *dtypes;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
static std::vector<PartialTensorShape>* shapes =
new std::vector<PartialTensorShape>({{}});
return *shapes;
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.op_version = op_version_;
params.set_args(seeds_.input_seed(), seeds_.input_seed2());
return name_utils::DatasetDebugString(RandomDatasetOp::kDatasetType,
params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return kInfiniteCardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* seed_node = nullptr;
Node* seed2_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed(), &seed_node));
TF_RETURN_IF_ERROR(b->AddScalar(seeds_.input_seed2(), &seed2_node));
if (op_version_ == 1) {
return b->AddDataset(this, {seed_node, seed2_node}, output);
}
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
AttrValue rerandomize_each_iteration;
b->BuildAttrValue(manager_->get()->reshuffle_each_iteration(),
&rerandomize_each_iteration);
return b->AddDataset(
this, {seed_node, seed2_node, resource_handle_node},
{std::make_pair(kRerandomizeEachIteration, rerandomize_each_iteration)},
output);
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
Iterator(const Params& params, SeedGenerator* seed_generator)
: DatasetIterator<Dataset>(params),
seed_generator_(seed_generator),
parent_generator_(seed_generator_->seed(), seed_generator_->seed2()),
generator_(&parent_generator_) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
seed_generator_->GenerateSeeds(&seed_, &seed2_);
ResetRngs();
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
out_tensors->reserve(1);
mutex_lock l(mu_);
out_tensors->emplace_back(ctx->allocator({}), DT_INT64, TensorShape({}));
out_tensors->back().scalar<int64_t>()() = Random();
*end_of_sequence = false;
return absl::OkStatus();
}
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kEpochNumRandomSamples),
seed_generator_->num_random_samples()));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kNumRandomSamples),
num_random_samples_));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kSeed), seed_));
TF_RETURN_IF_ERROR(writer->WriteScalar(this->full_name(kSeed2), seed2_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t num_random_samples;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kEpochNumRandomSamples),
&num_random_samples));
seed_generator_->set_num_random_samples(num_random_samples);
seed_generator_->Reset();
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kNumRandomSamples),
&num_random_samples_));
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kSeed), &seed_));
TF_RETURN_IF_ERROR(reader->ReadScalar(this->full_name(kSeed2), &seed2_));
ResetRngs();
return absl::OkStatus();
}
protected:
void ResetRngs() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
parent_generator_ = random::PhiloxRandom(seed_, seed2_);
generator_ =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator_);
generator_.Skip(num_random_samples_);
}
private:
random::SingleSampleAdapter<random::PhiloxRandom>::ResultType Random()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
num_random_samples_++;
auto out = generator_();
return out;
}
mutex mu_;
SeedGenerator* const seed_generator_ TF_GUARDED_BY(mu_);
random::PhiloxRandom parent_generator_ TF_GUARDED_BY(mu_);
random::SingleSampleAdapter<random::PhiloxRandom> generator_
TF_GUARDED_BY(mu_);
int64_t num_random_samples_ TF_GUARDED_BY(mu_) = 0;
int64_t seed_ TF_GUARDED_BY(mu_) = 0;
int64_t seed2_ TF_GUARDED_BY(mu_) = 0;
};
private:
const RandomSeeds seeds_;
const int op_version_;
SeedGeneratorManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
const bool owns_resource_;
};
RandomDatasetOp::RandomDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
auto& op_name = ctx->def().op();
if (op_name == kRandomDatasetV2) {
op_version_ = 2;
} else if (op_name == kRandomDatasetV1) {
op_version_ = 1;
}
if (ctx->HasAttr(kRerandomizeEachIteration)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kRerandomizeEachIteration,
&rerandomize_each_iteration_));
}
}
void RandomDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
int64_t seed;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, "seed", &seed));
int64_t seed2;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, "seed2", &seed2));
RandomSeeds seeds(seed, seed2);
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kSeedGenerator, "_",
resource_id_counter.fetch_add(1));
SeedGeneratorManager* manager = nullptr;
ResourceHandle handle;
bool owns_resource = true;
if (op_version_ == 2) {
OP_REQUIRES_OK(ctx, HandleFromInput(ctx, 2, &handle));
Status s = ctx->resource_manager()->Lookup<SeedGeneratorManager>(
handle.container(), handle.name(), &manager);
owns_resource = false;
if (errors::IsNotFound(s)) {
owns_resource = true;
} else {
OP_REQUIRES_OK(ctx, s);
}
}
if (owns_resource) {
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<SeedGeneratorManager>(
container, name, &manager,
[rerandomize = rerandomize_each_iteration_,
&seeds](SeedGeneratorManager** manager) {
if (rerandomize) {
*manager =
new SeedGeneratorManager(new RandomSeedGenerator(seeds));
} else {
*manager =
new SeedGeneratorManager(new FixedSeedGenerator(seeds));
}
return absl::OkStatus();
}));
handle = MakeResourceHandle<SeedGenerator>(ctx, container, name);
}
*output = new RandomDatasetOp::Dataset(ctx, std::move(seeds), manager,
std::move(handle), owns_resource,
op_version_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name(kRandomDatasetV1).Device(DEVICE_CPU),
RandomDatasetOp);
REGISTER_KERNEL_BUILDER(Name(kRandomDatasetV2).Device(DEVICE_CPU),
RandomDatasetOp);
REGISTER_KERNEL_BUILDER(Name("ExperimentalRandomDataset").Device(DEVICE_CPU),
RandomDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/random_dataset_op.h"
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/random_seed_ops.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/random_distributions.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "random_dataset";
constexpr char kIteratorPrefix[] = "Iterator";
constexpr int kCount = 10;
void GenerateExpectedEpochData(int64_t seed, int64_t seed2, int count,
std::vector<Tensor>* epoch_data) {
auto parent_generator = random::PhiloxRandom(seed, seed2);
auto generator =
random::SingleSampleAdapter<random::PhiloxRandom>(&parent_generator);
for (int i = 0; i < count; ++i) {
epoch_data->push_back(
CreateTensor<int64_t>(TensorShape({}), {generator()}));
}
}
std::vector<Tensor> GenerateExpectedData(int64_t seed, int64_t seed2, int count,
bool rerandomize_each_iteration,
int iterations) {
RandomSeedGenerator parent_seed_generator(RandomSeeds(seed, seed2));
std::vector<Tensor> ret;
for (int j = 0; j < iterations; ++j) {
if (rerandomize_each_iteration) {
parent_seed_generator.GenerateSeeds(&seed, &seed2);
}
GenerateExpectedEpochData(seed, seed2, count, &ret);
}
return ret;
}
std::vector<Tensor> GenerateExpectedSaveAndRestoreData(
int64_t seed, int64_t seed2, int count, bool rerandomize_each_iteration) {
RandomSeedGenerator parent_seed_generator(RandomSeeds(seed, seed2));
if (rerandomize_each_iteration) {
parent_seed_generator.GenerateSeeds(&seed, &seed2);
parent_seed_generator.GenerateSeeds(&seed, &seed2);
}
std::vector<Tensor> ret;
GenerateExpectedEpochData(seed, seed2, count, &ret);
return ret;
}
class RandomDatasetParams : public DatasetParams {
public:
RandomDatasetParams(int64_t seed, int64_t seed2, int32_t op_version,
bool rerandomize_each_iteration,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
seed_(CreateTensor<int64_t>(TensorShape({}), {seed})),
seed2_(CreateTensor<int64_t>(TensorShape({}), {seed2})),
dummy_resource_handle_(CreateDummyResourceHandle()),
seed_generator_resource_(CreateTensor<ResourceHandle>(
TensorShape({}), {dummy_resource_handle_})),
rerandomize_each_iteration_(rerandomize_each_iteration) {
op_version_ = op_version;
}
ResourceHandle CreateDummyResourceHandle() { return ResourceHandle(); }
virtual std::vector<Tensor> GetInputTensors() const override {
return {seed_, seed2_, seed_generator_resource_};
}
virtual Status GetInputNames(
std::vector<string>* input_names) const override {
*input_names = {RandomDatasetOp::kSeed, RandomDatasetOp::kSeed2};
if (op_version_ == 2) {
input_names->emplace_back("seed_generator");
}
return absl::OkStatus();
}
virtual Status GetAttributes(AttributeVector* attributes) const override {
*attributes = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
if (op_version_ == 2) {
attributes->emplace_back("rerandomize_each_iteration",
rerandomize_each_iteration_);
}
return absl::OkStatus();
}
virtual string dataset_type() const override {
return RandomDatasetOp::kDatasetType;
}
private:
Tensor seed_;
Tensor seed2_;
ResourceHandle dummy_resource_handle_;
Tensor seed_generator_resource_;
bool rerandomize_each_iteration_;
};
class RandomDatasetOpTest : public DatasetOpsTestBase {};
RandomDatasetParams FortyTwo() {
return {42,
42,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed() {
return {1000,
42,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2() {
return {42,
1000,
1,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams FortyTwoV2RerandomizeEachIterationFalse() {
return {42,
42,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeedV2RerandomizeEachIterationFalse() {
return {1000,
42,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2V2RerandomizeEachIterationFalse() {
return {42,
1000,
2,
false,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams FortyTwoV2RerandomizeEachIterationTrue() {
return {42,
42,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeedV2RerandomizeEachIterationTrue() {
return {1000,
42,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
RandomDatasetParams ChangeSeed2V2RerandomizeEachIterationTrue() {
return {42,
1000,
2,
true,
{DT_INT64},
{PartialTensorShape({})},
kNodeName};
}
class ParameterizedGetNextTest : public RandomDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<RandomDatasetParams>> {};
std::vector<GetNextTestCase<RandomDatasetParams>> GetNextTestCases() {
return {{FortyTwo(),
GenerateExpectedData(
42, 42, kCount,
false,
2)},
{ChangeSeed(),
GenerateExpectedData(
1000, 42, kCount,
false,
2)},
{ChangeSeed2(),
GenerateExpectedData(
42, 1000, kCount,
false,
2)},
{FortyTwoV2RerandomizeEachIterationFalse(),
GenerateExpectedData(
42, 42, kCount,
false,
2)},
{ChangeSeedV2RerandomizeEachIterationFalse(),
GenerateExpectedData(
1000, 42, kCount,
false,
2)},
{ChangeSeed2V2RerandomizeEachIterationFalse(),
GenerateExpectedData(
42, 1000, kCount,
false,
2)},
{FortyTwoV2RerandomizeEachIterationTrue(),
GenerateExpectedData(
42, 42, kCount,
true, 2)},
{ChangeSeedV2RerandomizeEachIterationTrue(),
GenerateExpectedData(
1000, 42, kCount,
true, 2)},
{ChangeSeed2V2RerandomizeEachIterationTrue(),
GenerateExpectedData(
42, 1000, kCount,
true, 2)}};
}
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (out_tensors.size() < kCount) {
std::vector<Tensor> next;
TF_ASSERT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
while (out_tensors.size() < 2 * kCount) {
std::vector<Tensor> next;
TF_ASSERT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
ASSERT_FALSE(end_of_sequence);
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_ASSERT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(
RandomDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(
std::vector<GetNextTestCase<RandomDatasetParams>>(GetNextTestCases())));
std::vector<DatasetNodeNameTestCase<RandomDatasetParams>>
DatasetNodeNameTestCases() {
return {{FortyTwo(), kNodeName}};
}
DATASET_NODE_NAME_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetNodeNameTestCases());
std::vector<DatasetTypeStringTestCase<RandomDatasetParams>>
DatasetTypeStringTestCases() {
return {{FortyTwo(),
name_utils::OpName(
RandomDatasetOp::kDatasetType)}};
}
DATASET_TYPE_STRING_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetTypeStringTestCases());
std::vector<DatasetOutputDtypesTestCase<RandomDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{FortyTwo(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetOutputDtypesTestCases());
std::vector<DatasetOutputShapesTestCase<RandomDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FortyTwo(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
DatasetOutputShapesTestCases());
std::vector<CardinalityTestCase<RandomDatasetParams>> CardinalityTestCases() {
return {{FortyTwo(),
kInfiniteCardinality}};
}
DATASET_CARDINALITY_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
CardinalityTestCases());
std::vector<IteratorOutputDtypesTestCase<RandomDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{FortyTwo(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputDtypesTestCases());
std::vector<IteratorOutputShapesTestCase<RandomDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FortyTwo(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputShapesTestCases());
std::vector<IteratorPrefixTestCase<RandomDatasetParams>>
IteratorOutputPrefixTestCases() {
return {{FortyTwo(),
name_utils::IteratorPrefix(
RandomDatasetOp::kDatasetType, kIteratorPrefix)}};
}
ITERATOR_PREFIX_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorOutputPrefixTestCases());
std::vector<IteratorSaveAndRestoreTestCase<RandomDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FortyTwo(), {2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
false)},
{FortyTwoV2RerandomizeEachIterationFalse(),
{2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
false)},
{FortyTwoV2RerandomizeEachIterationTrue(),
{2, 5, 8},
GenerateExpectedSaveAndRestoreData(
42, 42, 9 ,
true)}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(RandomDatasetOpTest, RandomDatasetParams,
IteratorSaveAndRestoreTestCases());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/random_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/random_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8a9d037e-3b07-40e2-a833-78cd5a534886 | cpp | google/quiche | quic_error_codes | quiche/quic/core/quic_error_codes.cc | quiche/quic/core/quic_error_codes_test.cc | #include "quiche/quic/core/quic_error_codes.h"
#include <cstdint>
#include <cstring>
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
#include "openssl/ssl.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
#define RETURN_STRING_LITERAL(x) \
case x: \
return #x;
const char* QuicRstStreamErrorCodeToString(QuicRstStreamErrorCode error) {
switch (error) {
RETURN_STRING_LITERAL(QUIC_STREAM_NO_ERROR);
RETURN_STRING_LITERAL(QUIC_ERROR_PROCESSING_STREAM);
RETURN_STRING_LITERAL(QUIC_MULTIPLE_TERMINATION_OFFSETS);
RETURN_STRING_LITERAL(QUIC_BAD_APPLICATION_PAYLOAD);
RETURN_STRING_LITERAL(QUIC_STREAM_CONNECTION_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_PEER_GOING_AWAY);
RETURN_STRING_LITERAL(QUIC_STREAM_CANCELLED);
RETURN_STRING_LITERAL(QUIC_RST_ACKNOWLEDGEMENT);
RETURN_STRING_LITERAL(QUIC_REFUSED_STREAM);
RETURN_STRING_LITERAL(QUIC_INVALID_PROMISE_URL);
RETURN_STRING_LITERAL(QUIC_UNAUTHORIZED_PROMISE_URL);
RETURN_STRING_LITERAL(QUIC_DUPLICATE_PROMISE_URL);
RETURN_STRING_LITERAL(QUIC_PROMISE_VARY_MISMATCH);
RETURN_STRING_LITERAL(QUIC_INVALID_PROMISE_METHOD);
RETURN_STRING_LITERAL(QUIC_PUSH_STREAM_TIMED_OUT);
RETURN_STRING_LITERAL(QUIC_HEADERS_TOO_LARGE);
RETURN_STRING_LITERAL(QUIC_STREAM_TTL_EXPIRED);
RETURN_STRING_LITERAL(QUIC_DATA_AFTER_CLOSE_OFFSET);
RETURN_STRING_LITERAL(QUIC_STREAM_GENERAL_PROTOCOL_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_INTERNAL_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_STREAM_CREATION_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_CLOSED_CRITICAL_STREAM);
RETURN_STRING_LITERAL(QUIC_STREAM_FRAME_UNEXPECTED);
RETURN_STRING_LITERAL(QUIC_STREAM_FRAME_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_EXCESSIVE_LOAD);
RETURN_STRING_LITERAL(QUIC_STREAM_ID_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_SETTINGS_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_MISSING_SETTINGS);
RETURN_STRING_LITERAL(QUIC_STREAM_REQUEST_REJECTED);
RETURN_STRING_LITERAL(QUIC_STREAM_REQUEST_INCOMPLETE);
RETURN_STRING_LITERAL(QUIC_STREAM_CONNECT_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_VERSION_FALLBACK);
RETURN_STRING_LITERAL(QUIC_STREAM_DECOMPRESSION_FAILED);
RETURN_STRING_LITERAL(QUIC_STREAM_ENCODER_STREAM_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_DECODER_STREAM_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_UNKNOWN_APPLICATION_ERROR_CODE);
RETURN_STRING_LITERAL(QUIC_STREAM_WEBTRANSPORT_SESSION_GONE);
RETURN_STRING_LITERAL(
QUIC_STREAM_WEBTRANSPORT_BUFFERED_STREAMS_LIMIT_EXCEEDED);
RETURN_STRING_LITERAL(QUIC_APPLICATION_DONE_WITH_STREAM);
RETURN_STRING_LITERAL(QUIC_STREAM_LAST_ERROR);
}
return "INVALID_RST_STREAM_ERROR_CODE";
}
const char* QuicErrorCodeToString(QuicErrorCode error) {
switch (error) {
RETURN_STRING_LITERAL(QUIC_NO_ERROR);
RETURN_STRING_LITERAL(QUIC_INTERNAL_ERROR);
RETURN_STRING_LITERAL(QUIC_STREAM_DATA_AFTER_TERMINATION);
RETURN_STRING_LITERAL(QUIC_INVALID_PACKET_HEADER);
RETURN_STRING_LITERAL(QUIC_INVALID_FRAME_DATA);
RETURN_STRING_LITERAL(QUIC_MISSING_PAYLOAD);
RETURN_STRING_LITERAL(QUIC_INVALID_FEC_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_STREAM_DATA);
RETURN_STRING_LITERAL(QUIC_OVERLAPPING_STREAM_DATA);
RETURN_STRING_LITERAL(QUIC_UNENCRYPTED_STREAM_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_RST_STREAM_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_CONNECTION_CLOSE_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_GOAWAY_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_WINDOW_UPDATE_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_BLOCKED_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_STOP_WAITING_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_PATH_CLOSE_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_ACK_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_VERSION_NEGOTIATION_PACKET);
RETURN_STRING_LITERAL(QUIC_INVALID_PUBLIC_RST_PACKET);
RETURN_STRING_LITERAL(QUIC_DECRYPTION_FAILURE);
RETURN_STRING_LITERAL(QUIC_ENCRYPTION_FAILURE);
RETURN_STRING_LITERAL(QUIC_PACKET_TOO_LARGE);
RETURN_STRING_LITERAL(QUIC_PEER_GOING_AWAY);
RETURN_STRING_LITERAL(QUIC_HANDSHAKE_FAILED);
RETURN_STRING_LITERAL(QUIC_HANDSHAKE_FAILED_PACKETS_BUFFERED_TOO_LONG);
RETURN_STRING_LITERAL(QUIC_HANDSHAKE_FAILED_INVALID_HOSTNAME);
RETURN_STRING_LITERAL(QUIC_CRYPTO_TAGS_OUT_OF_ORDER);
RETURN_STRING_LITERAL(QUIC_CRYPTO_TOO_MANY_ENTRIES);
RETURN_STRING_LITERAL(QUIC_CRYPTO_TOO_MANY_REJECTS);
RETURN_STRING_LITERAL(QUIC_CRYPTO_INVALID_VALUE_LENGTH)
RETURN_STRING_LITERAL(QUIC_CRYPTO_MESSAGE_AFTER_HANDSHAKE_COMPLETE);
RETURN_STRING_LITERAL(QUIC_CRYPTO_INTERNAL_ERROR);
RETURN_STRING_LITERAL(QUIC_CRYPTO_VERSION_NOT_SUPPORTED);
RETURN_STRING_LITERAL(QUIC_CRYPTO_NO_SUPPORT);
RETURN_STRING_LITERAL(QUIC_INVALID_CRYPTO_MESSAGE_TYPE);
RETURN_STRING_LITERAL(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER);
RETURN_STRING_LITERAL(QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND);
RETURN_STRING_LITERAL(QUIC_CRYPTO_MESSAGE_PARAMETER_NO_OVERLAP);
RETURN_STRING_LITERAL(QUIC_CRYPTO_MESSAGE_INDEX_NOT_FOUND);
RETURN_STRING_LITERAL(QUIC_UNSUPPORTED_PROOF_DEMAND);
RETURN_STRING_LITERAL(QUIC_INVALID_STREAM_ID);
RETURN_STRING_LITERAL(QUIC_INVALID_PRIORITY);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_OPEN_STREAMS);
RETURN_STRING_LITERAL(QUIC_PUBLIC_RESET);
RETURN_STRING_LITERAL(QUIC_INVALID_VERSION);
RETURN_STRING_LITERAL(QUIC_PACKET_WRONG_VERSION);
RETURN_STRING_LITERAL(QUIC_INVALID_0RTT_PACKET_NUMBER_OUT_OF_ORDER);
RETURN_STRING_LITERAL(QUIC_INVALID_HEADER_ID);
RETURN_STRING_LITERAL(QUIC_INVALID_NEGOTIATED_VALUE);
RETURN_STRING_LITERAL(QUIC_DECOMPRESSION_FAILURE);
RETURN_STRING_LITERAL(QUIC_NETWORK_IDLE_TIMEOUT);
RETURN_STRING_LITERAL(QUIC_HANDSHAKE_TIMEOUT);
RETURN_STRING_LITERAL(QUIC_ERROR_MIGRATING_ADDRESS);
RETURN_STRING_LITERAL(QUIC_ERROR_MIGRATING_PORT);
RETURN_STRING_LITERAL(QUIC_PACKET_WRITE_ERROR);
RETURN_STRING_LITERAL(QUIC_PACKET_READ_ERROR);
RETURN_STRING_LITERAL(QUIC_EMPTY_STREAM_FRAME_NO_FIN);
RETURN_STRING_LITERAL(QUIC_INVALID_HEADERS_STREAM_DATA);
RETURN_STRING_LITERAL(QUIC_HEADERS_STREAM_DATA_DECOMPRESS_FAILURE);
RETURN_STRING_LITERAL(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA);
RETURN_STRING_LITERAL(QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA);
RETURN_STRING_LITERAL(QUIC_FLOW_CONTROL_INVALID_WINDOW);
RETURN_STRING_LITERAL(QUIC_CONNECTION_IP_POOLED);
RETURN_STRING_LITERAL(QUIC_PROOF_INVALID);
RETURN_STRING_LITERAL(QUIC_CRYPTO_DUPLICATE_TAG);
RETURN_STRING_LITERAL(QUIC_CRYPTO_ENCRYPTION_LEVEL_INCORRECT);
RETURN_STRING_LITERAL(QUIC_CRYPTO_SERVER_CONFIG_EXPIRED);
RETURN_STRING_LITERAL(QUIC_INVALID_CHANNEL_ID_SIGNATURE);
RETURN_STRING_LITERAL(QUIC_CRYPTO_SYMMETRIC_KEY_SETUP_FAILED);
RETURN_STRING_LITERAL(QUIC_CRYPTO_MESSAGE_WHILE_VALIDATING_CLIENT_HELLO);
RETURN_STRING_LITERAL(QUIC_CRYPTO_UPDATE_BEFORE_HANDSHAKE_COMPLETE);
RETURN_STRING_LITERAL(QUIC_VERSION_NEGOTIATION_MISMATCH);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_OUTSTANDING_SENT_PACKETS);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_OUTSTANDING_RECEIVED_PACKETS);
RETURN_STRING_LITERAL(QUIC_CONNECTION_CANCELLED);
RETURN_STRING_LITERAL(QUIC_BAD_PACKET_LOSS_RATE);
RETURN_STRING_LITERAL(QUIC_PUBLIC_RESETS_POST_HANDSHAKE);
RETURN_STRING_LITERAL(QUIC_FAILED_TO_SERIALIZE_PACKET);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_AVAILABLE_STREAMS);
RETURN_STRING_LITERAL(QUIC_UNENCRYPTED_FEC_DATA);
RETURN_STRING_LITERAL(QUIC_BAD_MULTIPATH_FLAG);
RETURN_STRING_LITERAL(QUIC_IP_ADDRESS_CHANGED);
RETURN_STRING_LITERAL(QUIC_CONNECTION_MIGRATION_NO_MIGRATABLE_STREAMS);
RETURN_STRING_LITERAL(QUIC_CONNECTION_MIGRATION_TOO_MANY_CHANGES);
RETURN_STRING_LITERAL(QUIC_CONNECTION_MIGRATION_NO_NEW_NETWORK);
RETURN_STRING_LITERAL(QUIC_CONNECTION_MIGRATION_NON_MIGRATABLE_STREAM);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_RTOS);
RETURN_STRING_LITERAL(QUIC_ATTEMPT_TO_SEND_UNENCRYPTED_STREAM_DATA);
RETURN_STRING_LITERAL(QUIC_MAYBE_CORRUPTED_MEMORY);
RETURN_STRING_LITERAL(QUIC_CRYPTO_CHLO_TOO_LARGE);
RETURN_STRING_LITERAL(QUIC_MULTIPATH_PATH_DOES_NOT_EXIST);
RETURN_STRING_LITERAL(QUIC_MULTIPATH_PATH_NOT_ACTIVE);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_STREAM_DATA_INTERVALS);
RETURN_STRING_LITERAL(QUIC_STREAM_SEQUENCER_INVALID_STATE);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_SESSIONS_ON_SERVER);
RETURN_STRING_LITERAL(QUIC_STREAM_LENGTH_OVERFLOW);
RETURN_STRING_LITERAL(QUIC_CONNECTION_MIGRATION_DISABLED_BY_CONFIG);
RETURN_STRING_LITERAL(QUIC_CONNECTION_MIGRATION_INTERNAL_ERROR);
RETURN_STRING_LITERAL(QUIC_INVALID_MAX_DATA_FRAME_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_MAX_STREAM_DATA_FRAME_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_STREAM_BLOCKED_DATA);
RETURN_STRING_LITERAL(QUIC_MAX_STREAMS_DATA);
RETURN_STRING_LITERAL(QUIC_STREAMS_BLOCKED_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_NEW_CONNECTION_ID_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_RETIRE_CONNECTION_ID_DATA);
RETURN_STRING_LITERAL(QUIC_CONNECTION_ID_LIMIT_ERROR);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_CONNECTION_ID_WAITING_TO_RETIRE);
RETURN_STRING_LITERAL(QUIC_INVALID_STOP_SENDING_FRAME_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_PATH_CHALLENGE_DATA);
RETURN_STRING_LITERAL(QUIC_INVALID_PATH_RESPONSE_DATA);
RETURN_STRING_LITERAL(QUIC_CONNECTION_MIGRATION_HANDSHAKE_UNCONFIRMED);
RETURN_STRING_LITERAL(QUIC_PEER_PORT_CHANGE_HANDSHAKE_UNCONFIRMED);
RETURN_STRING_LITERAL(QUIC_INVALID_MESSAGE_DATA);
RETURN_STRING_LITERAL(IETF_QUIC_PROTOCOL_VIOLATION);
RETURN_STRING_LITERAL(QUIC_INVALID_NEW_TOKEN);
RETURN_STRING_LITERAL(QUIC_DATA_RECEIVED_ON_WRITE_UNIDIRECTIONAL_STREAM);
RETURN_STRING_LITERAL(QUIC_TRY_TO_WRITE_DATA_ON_READ_UNIDIRECTIONAL_STREAM);
RETURN_STRING_LITERAL(QUIC_STREAMS_BLOCKED_ERROR);
RETURN_STRING_LITERAL(QUIC_MAX_STREAMS_ERROR);
RETURN_STRING_LITERAL(QUIC_HTTP_DECODER_ERROR);
RETURN_STRING_LITERAL(QUIC_STALE_CONNECTION_CANCELLED);
RETURN_STRING_LITERAL(QUIC_IETF_GQUIC_ERROR_MISSING);
RETURN_STRING_LITERAL(
QUIC_WINDOW_UPDATE_RECEIVED_ON_READ_UNIDIRECTIONAL_STREAM);
RETURN_STRING_LITERAL(QUIC_TOO_MANY_BUFFERED_CONTROL_FRAMES);
RETURN_STRING_LITERAL(QUIC_TRANSPORT_INVALID_CLIENT_INDICATION);
RETURN_STRING_LITERAL(QUIC_QPACK_DECOMPRESSION_FAILED);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_ERROR);
RETURN_STRING_LITERAL(QUIC_QPACK_DECODER_STREAM_ERROR);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_INTEGER_TOO_LARGE);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_STRING_LITERAL_TOO_LONG);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_HUFFMAN_ENCODING_ERROR);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_INVALID_STATIC_ENTRY);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_STATIC);
RETURN_STRING_LITERAL(
QUIC_QPACK_ENCODER_STREAM_INSERTION_INVALID_RELATIVE_INDEX);
RETURN_STRING_LITERAL(
QUIC_QPACK_ENCODER_STREAM_INSERTION_DYNAMIC_ENTRY_NOT_FOUND);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_DYNAMIC);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_LITERAL);
RETURN_STRING_LITERAL(
QUIC_QPACK_ENCODER_STREAM_DUPLICATE_INVALID_RELATIVE_INDEX);
RETURN_STRING_LITERAL(
QUIC_QPACK_ENCODER_STREAM_DUPLICATE_DYNAMIC_ENTRY_NOT_FOUND);
RETURN_STRING_LITERAL(QUIC_QPACK_ENCODER_STREAM_SET_DYNAMIC_TABLE_CAPACITY);
RETURN_STRING_LITERAL(QUIC_QPACK_DECODER_STREAM_INTEGER_TOO_LARGE);
RETURN_STRING_LITERAL(QUIC_QPACK_DECODER_STREAM_INVALID_ZERO_INCREMENT);
RETURN_STRING_LITERAL(QUIC_QPACK_DECODER_STREAM_INCREMENT_OVERFLOW);
RETURN_STRING_LITERAL(QUIC_QPACK_DECODER_STREAM_IMPOSSIBLE_INSERT_COUNT);
RETURN_STRING_LITERAL(QUIC_QPACK_DECODER_STREAM_INCORRECT_ACKNOWLEDGEMENT);
RETURN_STRING_LITERAL(QUIC_STREAM_DATA_BEYOND_CLOSE_OFFSET);
RETURN_STRING_LITERAL(QUIC_STREAM_MULTIPLE_OFFSET);
RETURN_STRING_LITERAL(QUIC_HTTP_FRAME_TOO_LARGE);
RETURN_STRING_LITERAL(QUIC_HTTP_FRAME_ERROR);
RETURN_STRING_LITERAL(QUIC_HTTP_FRAME_UNEXPECTED_ON_SPDY_STREAM);
RETURN_STRING_LITERAL(QUIC_HTTP_FRAME_UNEXPECTED_ON_CONTROL_STREAM);
RETURN_STRING_LITERAL(QUIC_HTTP_INVALID_FRAME_SEQUENCE_ON_SPDY_STREAM);
RETURN_STRING_LITERAL(QUIC_HTTP_INVALID_FRAME_SEQUENCE_ON_CONTROL_STREAM);
RETURN_STRING_LITERAL(QUIC_HTTP_DUPLICATE_UNIDIRECTIONAL_STREAM);
RETURN_STRING_LITERAL(QUIC_HTTP_SERVER_INITIATED_BIDIRECTIONAL_STREAM);
RETURN_STRING_LITERAL(QUIC_HTTP_STREAM_WRONG_DIRECTION);
RETURN_STRING_LITERAL(QUIC_HTTP_CLOSED_CRITICAL_STREAM);
RETURN_STRING_LITERAL(QUIC_HTTP_MISSING_SETTINGS_FRAME);
RETURN_STRING_LITERAL(QUIC_HTTP_DUPLICATE_SETTING_IDENTIFIER);
RETURN_STRING_LITERAL(QUIC_HTTP_INVALID_MAX_PUSH_ID);
RETURN_STRING_LITERAL(QUIC_HTTP_STREAM_LIMIT_TOO_LOW);
RETURN_STRING_LITERAL(QUIC_HTTP_ZERO_RTT_RESUMPTION_SETTINGS_MISMATCH);
RETURN_STRING_LITERAL(QUIC_HTTP_ZERO_RTT_REJECTION_SETTINGS_MISMATCH);
RETURN_STRING_LITERAL(QUIC_HTTP_GOAWAY_INVALID_STREAM_ID);
RETURN_STRING_LITERAL(QUIC_HTTP_GOAWAY_ID_LARGER_THAN_PREVIOUS);
RETURN_STRING_LITERAL(QUIC_HTTP_RECEIVE_SPDY_SETTING);
RETURN_STRING_LITERAL(QUIC_HTTP_RECEIVE_SPDY_FRAME);
RETURN_STRING_LITERAL(QUIC_HTTP_RECEIVE_SERVER_PUSH);
RETURN_STRING_LITERAL(QUIC_HTTP_INVALID_SETTING_VALUE);
RETURN_STRING_LITERAL(QUIC_HPACK_INDEX_VARINT_ERROR);
RETURN_STRING_LITERAL(QUIC_HPACK_NAME_LENGTH_VARINT_ERROR);
RETURN_STRING_LITERAL(QUIC_HPACK_VALUE_LENGTH_VARINT_ERROR);
RETURN_STRING_LITERAL(QUIC_HPACK_NAME_TOO_LONG);
RETURN_STRING_LITERAL(QUIC_HPACK_VALUE_TOO_LONG);
RETURN_STRING_LITERAL(QUIC_HPACK_NAME_HUFFMAN_ERROR);
RETURN_STRING_LITERAL(QUIC_HPACK_VALUE_HUFFMAN_ERROR);
RETURN_STRING_LITERAL(QUIC_HPACK_MISSING_DYNAMIC_TABLE_SIZE_UPDATE);
RETURN_STRING_LITERAL(QUIC_HPACK_INVALID_INDEX);
RETURN_STRING_LITERAL(QUIC_HPACK_INVALID_NAME_INDEX);
RETURN_STRING_LITERAL(QUIC_HPACK_DYNAMIC_TABLE_SIZE_UPDATE_NOT_ALLOWED);
RETURN_STRING_LITERAL(
QUIC_HPACK_INITIAL_TABLE_SIZE_UPDATE_IS_ABOVE_LOW_WATER_MARK);
RETURN_STRING_LITERAL(
QUIC_HPACK_TABLE_SIZE_UPDATE_IS_ABOVE_ACKNOWLEDGED_SETTING);
RETURN_STRING_LITERAL(QUIC_HPACK_TRUNCATED_BLOCK);
RETURN_STRING_LITERAL(QUIC_HPACK_FRAGMENT_TOO_LONG);
RETURN_STRING_LITERAL(QUIC_HPACK_COMPRESSED_HEADER_SIZE_EXCEEDS_LIMIT);
RETURN_STRING_LITERAL(QUIC_ZERO_RTT_UNRETRANSMITTABLE);
RETURN_STRING_LITERAL(QUIC_ZERO_RTT_REJECTION_LIMIT_REDUCED);
RETURN_STRING_LITERAL(QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED);
RETURN_STRING_LITERAL(QUIC_SILENT_IDLE_TIMEOUT);
RETURN_STRING_LITERAL(QUIC_MISSING_WRITE_KEYS);
RETURN_STRING_LITERAL(QUIC_KEY_UPDATE_ERROR);
RETURN_STRING_LITERAL(QUIC_AEAD_LIMIT_REACHED);
RETURN_STRING_LITERAL(QUIC_MAX_AGE_TIMEOUT);
RETURN_STRING_LITERAL(QUIC_INVALID_PRIORITY_UPDATE);
RETURN_STRING_LITERAL(QUIC_TLS_BAD_CERTIFICATE);
RETURN_STRING_LITERAL(QUIC_TLS_UNSUPPORTED_CERTIFICATE);
RETURN_STRING_LITERAL(QUIC_TLS_CERTIFICATE_REVOKED);
RETURN_STRING_LITERAL(QUIC_TLS_CERTIFICATE_EXPIRED);
RETURN_STRING_LITERAL(QUIC_TLS_CERTIFICATE_UNKNOWN);
RETURN_STRING_LITERAL(QUIC_TLS_INTERNAL_ERROR);
RETURN_STRING_LITERAL(QUIC_TLS_UNRECOGNIZED_NAME);
RETURN_STRING_LITERAL(QUIC_TLS_CERTIFICATE_REQUIRED);
RETURN_STRING_LITERAL(QUIC_INVALID_CHARACTER_IN_FIELD_VALUE);
RETURN_STRING_LITERAL(QUIC_TLS_UNEXPECTED_KEYING_MATERIAL_EXPORT_LABEL);
RETURN_STRING_LITERAL(QUIC_TLS_KEYING_MATERIAL_EXPORTS_MISMATCH);
RETURN_STRING_LITERAL(QUIC_TLS_KEYING_MATERIAL_EXPORT_NOT_AVAILABLE);
RETURN_STRING_LITERAL(QUIC_UNEXPECTED_DATA_BEFORE_ENCRYPTION_ESTABLISHED);
RETURN_STRING_LITERAL(QUIC_SERVER_UNHEALTHY);
RETURN_STRING_LITERAL(QUIC_CLIENT_LOST_NETWORK_ACCESS);
RETURN_STRING_LITERAL(QUIC_LAST_ERROR);
}
return "INVALID_ERROR_CODE";
}
std::string QuicIetfTransportErrorCodeString(QuicIetfTransportErrorCodes c) {
if (c >= CRYPTO_ERROR_FIRST && c <= CRYPTO_ERROR_LAST) {
const int tls_error = static_cast<int>(c - CRYPTO_ERROR_FIRST);
const char* tls_error_description = SSL_alert_desc_string_long(tls_error);
if (strcmp("unknown", tls_error_description) != 0) {
return absl::StrCat("CRYPTO_ERROR(", tls_error_description, ")");
}
return absl::StrCat("CRYPTO_ERROR(unknown(", tls_error, "))");
}
switch (c) {
RETURN_STRING_LITERAL(NO_IETF_QUIC_ERROR);
RETURN_STRING_LITERAL(INTERNAL_ERROR);
RETURN_STRING_LITERAL(SERVER_BUSY_ERROR);
RETURN_STRING_LITERAL(FLOW_CONTROL_ERROR);
RETURN_STRING_LITERAL(STREAM_LIMIT_ERROR);
RETURN_STRING_LITERAL(STREAM_STATE_ERROR);
RETURN_STRING_LITERAL(FINAL_SIZE_ERROR);
RETURN_STRING_LITERAL(FRAME_ENCODING_ERROR);
RETURN_STRING_LITERAL(TRANSPORT_PARAMETER_ERROR);
RETURN_STRING_LITERAL(CONNECTION_ID_LIMIT_ERROR);
RETURN_STRING_LITERAL(PROTOCOL_VIOLATION);
RETURN_STRING_LITERAL(INVALID_TOKEN);
RETURN_STRING_LITERAL(CRYPTO_BUFFER_EXCEEDED);
RETURN_STRING_LITERAL(KEY_UPDATE_ERROR);
RETURN_STRING_LITERAL(AEAD_LIMIT_REACHED);
case CRYPTO_ERROR_FIRST:
case CRYPTO_ERROR_LAST:
QUICHE_DCHECK(false) << "Unexpected error " << static_cast<uint64_t>(c);
break;
}
return absl::StrCat("Unknown(", static_cast<uint64_t>(c), ")");
}
std::ostream& operator<<(std::ostream& os,
const QuicIetfTransportErrorCodes& c) {
os << QuicIetfTransportErrorCodeString(c);
return os;
}
QuicErrorCodeToIetfMapping QuicErrorCodeToTransportErrorCode(
QuicErrorCode error) {
switch (error) {
case QUIC_NO_ERROR:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_INTERNAL_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_STREAM_DATA_AFTER_TERMINATION:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_PACKET_HEADER:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_FRAME_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_MISSING_PAYLOAD:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_FEC_DATA:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_STREAM_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_OVERLAPPING_STREAM_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_UNENCRYPTED_STREAM_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_ATTEMPT_TO_SEND_UNENCRYPTED_STREAM_DATA:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_MAYBE_CORRUPTED_MEMORY:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_UNENCRYPTED_FEC_DATA:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_RST_STREAM_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_CONNECTION_CLOSE_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_GOAWAY_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_WINDOW_UPDATE_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_BLOCKED_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_STOP_WAITING_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_PATH_CLOSE_DATA:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_ACK_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_MESSAGE_DATA:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_VERSION_NEGOTIATION_PACKET:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_PUBLIC_RST_PACKET:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_DECRYPTION_FAILURE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_ENCRYPTION_FAILURE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_PACKET_TOO_LARGE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_PEER_GOING_AWAY:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_STREAM_ID:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_PRIORITY:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_TOO_MANY_OPEN_STREAMS:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_TOO_MANY_AVAILABLE_STREAMS:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_PUBLIC_RESET:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_VERSION:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_PACKET_WRONG_VERSION:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_0RTT_PACKET_NUMBER_OUT_OF_ORDER:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_HEADER_ID:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_NEGOTIATED_VALUE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_DECOMPRESSION_FAILURE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_NETWORK_IDLE_TIMEOUT:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_SILENT_IDLE_TIMEOUT:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_HANDSHAKE_TIMEOUT:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_ERROR_MIGRATING_ADDRESS:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_ERROR_MIGRATING_PORT:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_PACKET_WRITE_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_PACKET_READ_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_EMPTY_STREAM_FRAME_NO_FIN:
return {true, static_cast<uint64_t>(FRAME_ENCODING_ERROR)};
case QUIC_INVALID_HEADERS_STREAM_DATA:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HEADERS_STREAM_DATA_DECOMPRESS_FAILURE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA:
return {true, static_cast<uint64_t>(FLOW_CONTROL_ERROR)};
case QUIC_FLOW_CONTROL_SENT_TOO_MUCH_DATA:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_FLOW_CONTROL_INVALID_WINDOW:
return {true, static_cast<uint64_t>(FLOW_CONTROL_ERROR)};
case QUIC_CONNECTION_IP_POOLED:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_TOO_MANY_OUTSTANDING_SENT_PACKETS:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_TOO_MANY_OUTSTANDING_RECEIVED_PACKETS:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_CANCELLED:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_BAD_PACKET_LOSS_RATE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_PUBLIC_RESETS_POST_HANDSHAKE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_FAILED_TO_SERIALIZE_PACKET:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_TOO_MANY_RTOS:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_HANDSHAKE_FAILED:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_TAGS_OUT_OF_ORDER:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_TOO_MANY_ENTRIES:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_INVALID_VALUE_LENGTH:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_MESSAGE_AFTER_HANDSHAKE_COMPLETE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_CRYPTO_MESSAGE_TYPE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_CHANNEL_ID_SIGNATURE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_MESSAGE_PARAMETER_NO_OVERLAP:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_MESSAGE_INDEX_NOT_FOUND:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_UNSUPPORTED_PROOF_DEMAND:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_INTERNAL_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CRYPTO_VERSION_NOT_SUPPORTED:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_NO_SUPPORT:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_TOO_MANY_REJECTS:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_PROOF_INVALID:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_DUPLICATE_TAG:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_ENCRYPTION_LEVEL_INCORRECT:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_SERVER_CONFIG_EXPIRED:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_SYMMETRIC_KEY_SETUP_FAILED:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_MESSAGE_WHILE_VALIDATING_CLIENT_HELLO:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_UPDATE_BEFORE_HANDSHAKE_COMPLETE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_CRYPTO_CHLO_TOO_LARGE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_VERSION_NEGOTIATION_MISMATCH:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_BAD_MULTIPATH_FLAG:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_MULTIPATH_PATH_DOES_NOT_EXIST:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_MULTIPATH_PATH_NOT_ACTIVE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_IP_ADDRESS_CHANGED:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_MIGRATION_NO_MIGRATABLE_STREAMS:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_MIGRATION_TOO_MANY_CHANGES:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_MIGRATION_NO_NEW_NETWORK:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_MIGRATION_NON_MIGRATABLE_STREAM:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_MIGRATION_DISABLED_BY_CONFIG:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_MIGRATION_INTERNAL_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_CONNECTION_MIGRATION_HANDSHAKE_UNCONFIRMED:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_PEER_PORT_CHANGE_HANDSHAKE_UNCONFIRMED:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_TOO_MANY_STREAM_DATA_INTERVALS:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_STREAM_SEQUENCER_INVALID_STATE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_TOO_MANY_SESSIONS_ON_SERVER:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_STREAM_LENGTH_OVERFLOW:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_MAX_DATA_FRAME_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_MAX_STREAM_DATA_FRAME_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_MAX_STREAMS_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_STREAMS_BLOCKED_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_STREAM_BLOCKED_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_NEW_CONNECTION_ID_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_STOP_SENDING_FRAME_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_PATH_CHALLENGE_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_PATH_RESPONSE_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case IETF_QUIC_PROTOCOL_VIOLATION:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_INVALID_NEW_TOKEN:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_DATA_RECEIVED_ON_WRITE_UNIDIRECTIONAL_STREAM:
return {true, static_cast<uint64_t>(STREAM_STATE_ERROR)};
case QUIC_TRY_TO_WRITE_DATA_ON_READ_UNIDIRECTIONAL_STREAM:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_RETIRE_CONNECTION_ID_DATA:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_STREAMS_BLOCKED_ERROR:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_MAX_STREAMS_ERROR:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_HTTP_DECODER_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_STALE_CONNECTION_CANCELLED:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_IETF_GQUIC_ERROR_MISSING:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_WINDOW_UPDATE_RECEIVED_ON_READ_UNIDIRECTIONAL_STREAM:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_TOO_MANY_BUFFERED_CONTROL_FRAMES:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_TRANSPORT_INVALID_CLIENT_INDICATION:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_QPACK_DECOMPRESSION_FAILED:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECOMPRESSION_FAILED)};
case QUIC_QPACK_ENCODER_STREAM_ERROR:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_DECODER_STREAM_ERROR:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_INTEGER_TOO_LARGE:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_STRING_LITERAL_TOO_LONG:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_HUFFMAN_ENCODING_ERROR:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_INVALID_STATIC_ENTRY:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_STATIC:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_INSERTION_INVALID_RELATIVE_INDEX:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_INSERTION_DYNAMIC_ENTRY_NOT_FOUND:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_DYNAMIC:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_ERROR_INSERTING_LITERAL:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_DUPLICATE_INVALID_RELATIVE_INDEX:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_DUPLICATE_DYNAMIC_ENTRY_NOT_FOUND:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_ENCODER_STREAM_SET_DYNAMIC_TABLE_CAPACITY:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR)};
case QUIC_QPACK_DECODER_STREAM_INTEGER_TOO_LARGE:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECODER_STREAM_ERROR)};
case QUIC_QPACK_DECODER_STREAM_INVALID_ZERO_INCREMENT:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECODER_STREAM_ERROR)};
case QUIC_QPACK_DECODER_STREAM_INCREMENT_OVERFLOW:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECODER_STREAM_ERROR)};
case QUIC_QPACK_DECODER_STREAM_IMPOSSIBLE_INSERT_COUNT:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECODER_STREAM_ERROR)};
case QUIC_QPACK_DECODER_STREAM_INCORRECT_ACKNOWLEDGEMENT:
return {false, static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECODER_STREAM_ERROR)};
case QUIC_STREAM_DATA_BEYOND_CLOSE_OFFSET:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_STREAM_MULTIPLE_OFFSET:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_HTTP_FRAME_TOO_LARGE:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::EXCESSIVE_LOAD)};
case QUIC_HTTP_FRAME_ERROR:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_ERROR)};
case QUIC_HTTP_FRAME_UNEXPECTED_ON_SPDY_STREAM:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED)};
case QUIC_HTTP_FRAME_UNEXPECTED_ON_CONTROL_STREAM:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED)};
case QUIC_HTTP_INVALID_FRAME_SEQUENCE_ON_SPDY_STREAM:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED)};
case QUIC_HTTP_INVALID_FRAME_SEQUENCE_ON_CONTROL_STREAM:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED)};
case QUIC_HTTP_DUPLICATE_UNIDIRECTIONAL_STREAM:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR)};
case QUIC_HTTP_SERVER_INITIATED_BIDIRECTIONAL_STREAM:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR)};
case QUIC_HTTP_STREAM_WRONG_DIRECTION:
return {true, static_cast<uint64_t>(STREAM_STATE_ERROR)};
case QUIC_HTTP_CLOSED_CRITICAL_STREAM:
return {false, static_cast<uint64_t>(
QuicHttp3ErrorCode::CLOSED_CRITICAL_STREAM)};
case QUIC_HTTP_MISSING_SETTINGS_FRAME:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::MISSING_SETTINGS)};
case QUIC_HTTP_DUPLICATE_SETTING_IDENTIFIER:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::SETTINGS_ERROR)};
case QUIC_HTTP_INVALID_MAX_PUSH_ID:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::ID_ERROR)};
case QUIC_HTTP_STREAM_LIMIT_TOO_LOW:
return {false, static_cast<uint64_t>(
QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR)};
case QUIC_HTTP_RECEIVE_SERVER_PUSH:
return {false, static_cast<uint64_t>(
QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR)};
case QUIC_HTTP_ZERO_RTT_RESUMPTION_SETTINGS_MISMATCH:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::SETTINGS_ERROR)};
case QUIC_HTTP_ZERO_RTT_REJECTION_SETTINGS_MISMATCH:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HTTP_GOAWAY_INVALID_STREAM_ID:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::ID_ERROR)};
case QUIC_HTTP_GOAWAY_ID_LARGER_THAN_PREVIOUS:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::ID_ERROR)};
case QUIC_HTTP_RECEIVE_SPDY_SETTING:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::SETTINGS_ERROR)};
case QUIC_HTTP_INVALID_SETTING_VALUE:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::SETTINGS_ERROR)};
case QUIC_HTTP_RECEIVE_SPDY_FRAME:
return {false,
static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED)};
case QUIC_HPACK_INDEX_VARINT_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_NAME_LENGTH_VARINT_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_VALUE_LENGTH_VARINT_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_NAME_TOO_LONG:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_VALUE_TOO_LONG:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_NAME_HUFFMAN_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_VALUE_HUFFMAN_ERROR:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_MISSING_DYNAMIC_TABLE_SIZE_UPDATE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_INVALID_INDEX:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_INVALID_NAME_INDEX:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_DYNAMIC_TABLE_SIZE_UPDATE_NOT_ALLOWED:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_INITIAL_TABLE_SIZE_UPDATE_IS_ABOVE_LOW_WATER_MARK:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_TABLE_SIZE_UPDATE_IS_ABOVE_ACKNOWLEDGED_SETTING:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_TRUNCATED_BLOCK:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_FRAGMENT_TOO_LONG:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HPACK_COMPRESSED_HEADER_SIZE_EXCEEDS_LIMIT:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_ZERO_RTT_UNRETRANSMITTABLE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_ZERO_RTT_REJECTION_LIMIT_REDUCED:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_MISSING_WRITE_KEYS:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_KEY_UPDATE_ERROR:
return {true, static_cast<uint64_t>(KEY_UPDATE_ERROR)};
case QUIC_AEAD_LIMIT_REACHED:
return {true, static_cast<uint64_t>(AEAD_LIMIT_REACHED)};
case QUIC_MAX_AGE_TIMEOUT:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR)};
case QUIC_INVALID_PRIORITY_UPDATE:
return {false, static_cast<uint64_t>(
QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR)};
case QUIC_TLS_BAD_CERTIFICATE:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_BAD_CERTIFICATE)};
case QUIC_TLS_UNSUPPORTED_CERTIFICATE:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_UNSUPPORTED_CERTIFICATE)};
case QUIC_TLS_CERTIFICATE_REVOKED:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_CERTIFICATE_REVOKED)};
case QUIC_TLS_CERTIFICATE_EXPIRED:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_CERTIFICATE_EXPIRED)};
case QUIC_TLS_CERTIFICATE_UNKNOWN:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_CERTIFICATE_UNKNOWN)};
case QUIC_TLS_INTERNAL_ERROR:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_INTERNAL_ERROR)};
case QUIC_TLS_UNRECOGNIZED_NAME:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_UNRECOGNIZED_NAME)};
case QUIC_TLS_CERTIFICATE_REQUIRED:
return {true, static_cast<uint64_t>(CRYPTO_ERROR_FIRST +
SSL_AD_CERTIFICATE_REQUIRED)};
case QUIC_CONNECTION_ID_LIMIT_ERROR:
return {true, static_cast<uint64_t>(CONNECTION_ID_LIMIT_ERROR)};
case QUIC_TOO_MANY_CONNECTION_ID_WAITING_TO_RETIRE:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_INVALID_CHARACTER_IN_FIELD_VALUE:
return {false, static_cast<uint64_t>(QuicHttp3ErrorCode::MESSAGE_ERROR)};
case QUIC_TLS_UNEXPECTED_KEYING_MATERIAL_EXPORT_LABEL:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_TLS_KEYING_MATERIAL_EXPORTS_MISMATCH:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_TLS_KEYING_MATERIAL_EXPORT_NOT_AVAILABLE:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_UNEXPECTED_DATA_BEFORE_ENCRYPTION_ESTABLISHED:
return {true, static_cast<uint64_t>(PROTOCOL_VIOLATION)};
case QUIC_SERVER_UNHEALTHY:
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
case QUIC_HANDSHAKE_FAILED_PACKETS_BUFFERED_TOO_LONG:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_CLIENT_LOST_NETWORK_ACCESS:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_HANDSHAKE_FAILED_INVALID_HOSTNAME:
return {true, static_cast<uint64_t>(NO_IETF_QUIC_ERROR)};
case QUIC_LAST_ERROR:
return {false, static_cast<uint64_t>(QUIC_LAST_ERROR)};
}
return {true, static_cast<uint64_t>(INTERNAL_ERROR)};
}
QuicErrorCode TlsAlertToQuicErrorCode(uint8_t desc) {
switch (desc) {
case SSL_AD_BAD_CERTIFICATE:
return QUIC_TLS_BAD_CERTIFICATE;
case SSL_AD_UNSUPPORTED_CERTIFICATE:
return QUIC_TLS_UNSUPPORTED_CERTIFICATE;
case SSL_AD_CERTIFICATE_REVOKED:
return QUIC_TLS_CERTIFICATE_REVOKED;
case SSL_AD_CERTIFICATE_EXPIRED:
return QUIC_TLS_CERTIFICATE_EXPIRED;
case SSL_AD_CERTIFICATE_UNKNOWN:
return QUIC_TLS_CERTIFICATE_UNKNOWN;
case SSL_AD_INTERNAL_ERROR:
return QUIC_TLS_INTERNAL_ERROR;
case SSL_AD_UNRECOGNIZED_NAME:
return QUIC_TLS_UNRECOGNIZED_NAME;
case SSL_AD_CERTIFICATE_REQUIRED:
return QUIC_TLS_CERTIFICATE_REQUIRED;
default:
return QUIC_HANDSHAKE_FAILED;
}
}
uint64_t RstStreamErrorCodeToIetfResetStreamErrorCode(
QuicRstStreamErrorCode rst_stream_error_code) {
switch (rst_stream_error_code) {
case QUIC_STREAM_NO_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::HTTP3_NO_ERROR);
case QUIC_ERROR_PROCESSING_STREAM:
return static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR);
case QUIC_MULTIPLE_TERMINATION_OFFSETS:
return static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR);
case QUIC_BAD_APPLICATION_PAYLOAD:
return static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR);
case QUIC_STREAM_CONNECTION_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR);
case QUIC_STREAM_PEER_GOING_AWAY:
return static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR);
case QUIC_STREAM_CANCELLED:
return static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_CANCELLED);
case QUIC_RST_ACKNOWLEDGEMENT:
return static_cast<uint64_t>(QuicHttp3ErrorCode::HTTP3_NO_ERROR);
case QUIC_REFUSED_STREAM:
return static_cast<uint64_t>(QuicHttp3ErrorCode::ID_ERROR);
case QUIC_INVALID_PROMISE_URL:
return static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR);
case QUIC_UNAUTHORIZED_PROMISE_URL:
return static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR);
case QUIC_DUPLICATE_PROMISE_URL:
return static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR);
case QUIC_PROMISE_VARY_MISMATCH:
return static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_CANCELLED);
case QUIC_INVALID_PROMISE_METHOD:
return static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR);
case QUIC_PUSH_STREAM_TIMED_OUT:
return static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_CANCELLED);
case QUIC_HEADERS_TOO_LARGE:
return static_cast<uint64_t>(QuicHttp3ErrorCode::EXCESSIVE_LOAD);
case QUIC_STREAM_TTL_EXPIRED:
return static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_CANCELLED);
case QUIC_DATA_AFTER_CLOSE_OFFSET:
return static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR);
case QUIC_STREAM_GENERAL_PROTOCOL_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR);
case QUIC_STREAM_INTERNAL_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR);
case QUIC_STREAM_STREAM_CREATION_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR);
case QUIC_STREAM_CLOSED_CRITICAL_STREAM:
return static_cast<uint64_t>(QuicHttp3ErrorCode::CLOSED_CRITICAL_STREAM);
case QUIC_STREAM_FRAME_UNEXPECTED:
return static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED);
case QUIC_STREAM_FRAME_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_ERROR);
case QUIC_STREAM_EXCESSIVE_LOAD:
return static_cast<uint64_t>(QuicHttp3ErrorCode::EXCESSIVE_LOAD);
case QUIC_STREAM_ID_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::ID_ERROR);
case QUIC_STREAM_SETTINGS_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::SETTINGS_ERROR);
case QUIC_STREAM_MISSING_SETTINGS:
return static_cast<uint64_t>(QuicHttp3ErrorCode::MISSING_SETTINGS);
case QUIC_STREAM_REQUEST_REJECTED:
return static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_REJECTED);
case QUIC_STREAM_REQUEST_INCOMPLETE:
return static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_INCOMPLETE);
case QUIC_STREAM_CONNECT_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::CONNECT_ERROR);
case QUIC_STREAM_VERSION_FALLBACK:
return static_cast<uint64_t>(QuicHttp3ErrorCode::VERSION_FALLBACK);
case QUIC_STREAM_DECOMPRESSION_FAILED:
return static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECOMPRESSION_FAILED);
case QUIC_STREAM_ENCODER_STREAM_ERROR:
return static_cast<uint64_t>(
QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR);
case QUIC_STREAM_DECODER_STREAM_ERROR:
return static_cast<uint64_t>(
QuicHttpQpackErrorCode::DECODER_STREAM_ERROR);
case QUIC_STREAM_UNKNOWN_APPLICATION_ERROR_CODE:
return static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR);
case QUIC_STREAM_WEBTRANSPORT_SESSION_GONE:
return static_cast<uint64_t>(QuicHttp3ErrorCode::CONNECT_ERROR);
case QUIC_STREAM_WEBTRANSPORT_BUFFERED_STREAMS_LIMIT_EXCEEDED:
return static_cast<uint64_t>(QuicHttp3ErrorCode::CONNECT_ERROR);
case QUIC_APPLICATION_DONE_WITH_STREAM:
return static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR);
case QUIC_STREAM_LAST_ERROR:
return static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR);
}
return static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR);
}
QuicRstStreamErrorCode IetfResetStreamErrorCodeToRstStreamErrorCode(
uint64_t ietf_error_code) {
switch (ietf_error_code) {
case static_cast<uint64_t>(QuicHttp3ErrorCode::HTTP3_NO_ERROR):
return QUIC_STREAM_NO_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR):
return QUIC_STREAM_GENERAL_PROTOCOL_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR):
return QUIC_STREAM_INTERNAL_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR):
return QUIC_STREAM_STREAM_CREATION_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::CLOSED_CRITICAL_STREAM):
return QUIC_STREAM_CLOSED_CRITICAL_STREAM;
case static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED):
return QUIC_STREAM_FRAME_UNEXPECTED;
case static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_ERROR):
return QUIC_STREAM_FRAME_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::EXCESSIVE_LOAD):
return QUIC_STREAM_EXCESSIVE_LOAD;
case static_cast<uint64_t>(QuicHttp3ErrorCode::ID_ERROR):
return QUIC_STREAM_ID_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::SETTINGS_ERROR):
return QUIC_STREAM_SETTINGS_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::MISSING_SETTINGS):
return QUIC_STREAM_MISSING_SETTINGS;
case static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_REJECTED):
return QUIC_STREAM_REQUEST_REJECTED;
case static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_CANCELLED):
return QUIC_STREAM_CANCELLED;
case static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_INCOMPLETE):
return QUIC_STREAM_REQUEST_INCOMPLETE;
case static_cast<uint64_t>(QuicHttp3ErrorCode::CONNECT_ERROR):
return QUIC_STREAM_CONNECT_ERROR;
case static_cast<uint64_t>(QuicHttp3ErrorCode::VERSION_FALLBACK):
return QUIC_STREAM_VERSION_FALLBACK;
case static_cast<uint64_t>(QuicHttpQpackErrorCode::DECOMPRESSION_FAILED):
return QUIC_STREAM_DECOMPRESSION_FAILED;
case static_cast<uint64_t>(QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR):
return QUIC_STREAM_ENCODER_STREAM_ERROR;
case static_cast<uint64_t>(QuicHttpQpackErrorCode::DECODER_STREAM_ERROR):
return QUIC_STREAM_DECODER_STREAM_ERROR;
}
return QUIC_STREAM_UNKNOWN_APPLICATION_ERROR_CODE;
}
QuicResetStreamError QuicResetStreamError::FromInternal(
QuicRstStreamErrorCode code) {
return QuicResetStreamError(
code, RstStreamErrorCodeToIetfResetStreamErrorCode(code));
}
QuicResetStreamError QuicResetStreamError::FromIetf(uint64_t code) {
return QuicResetStreamError(
IetfResetStreamErrorCodeToRstStreamErrorCode(code), code);
}
QuicResetStreamError QuicResetStreamError::FromIetf(QuicHttp3ErrorCode code) {
return FromIetf(static_cast<uint64_t>(code));
}
QuicResetStreamError QuicResetStreamError::FromIetf(
QuicHttpQpackErrorCode code) {
return FromIetf(static_cast<uint64_t>(code));
}
#undef RETURN_STRING_LITERAL
} | #include "quiche/quic/core/quic_error_codes.h"
#include <cstdint>
#include <string>
#include "openssl/ssl.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
using QuicErrorCodesTest = QuicTest;
TEST_F(QuicErrorCodesTest, QuicErrorCodeToString) {
EXPECT_STREQ("QUIC_NO_ERROR", QuicErrorCodeToString(QUIC_NO_ERROR));
}
TEST_F(QuicErrorCodesTest, QuicIetfTransportErrorCodeString) {
EXPECT_EQ("CRYPTO_ERROR(missing extension)",
QuicIetfTransportErrorCodeString(
static_cast<quic::QuicIetfTransportErrorCodes>(
CRYPTO_ERROR_FIRST + SSL_AD_MISSING_EXTENSION)));
EXPECT_EQ("NO_IETF_QUIC_ERROR",
QuicIetfTransportErrorCodeString(NO_IETF_QUIC_ERROR));
EXPECT_EQ("INTERNAL_ERROR", QuicIetfTransportErrorCodeString(INTERNAL_ERROR));
EXPECT_EQ("SERVER_BUSY_ERROR",
QuicIetfTransportErrorCodeString(SERVER_BUSY_ERROR));
EXPECT_EQ("FLOW_CONTROL_ERROR",
QuicIetfTransportErrorCodeString(FLOW_CONTROL_ERROR));
EXPECT_EQ("STREAM_LIMIT_ERROR",
QuicIetfTransportErrorCodeString(STREAM_LIMIT_ERROR));
EXPECT_EQ("STREAM_STATE_ERROR",
QuicIetfTransportErrorCodeString(STREAM_STATE_ERROR));
EXPECT_EQ("FINAL_SIZE_ERROR",
QuicIetfTransportErrorCodeString(FINAL_SIZE_ERROR));
EXPECT_EQ("FRAME_ENCODING_ERROR",
QuicIetfTransportErrorCodeString(FRAME_ENCODING_ERROR));
EXPECT_EQ("TRANSPORT_PARAMETER_ERROR",
QuicIetfTransportErrorCodeString(TRANSPORT_PARAMETER_ERROR));
EXPECT_EQ("CONNECTION_ID_LIMIT_ERROR",
QuicIetfTransportErrorCodeString(CONNECTION_ID_LIMIT_ERROR));
EXPECT_EQ("PROTOCOL_VIOLATION",
QuicIetfTransportErrorCodeString(PROTOCOL_VIOLATION));
EXPECT_EQ("INVALID_TOKEN", QuicIetfTransportErrorCodeString(INVALID_TOKEN));
EXPECT_EQ("CRYPTO_BUFFER_EXCEEDED",
QuicIetfTransportErrorCodeString(CRYPTO_BUFFER_EXCEEDED));
EXPECT_EQ("KEY_UPDATE_ERROR",
QuicIetfTransportErrorCodeString(KEY_UPDATE_ERROR));
EXPECT_EQ("AEAD_LIMIT_REACHED",
QuicIetfTransportErrorCodeString(AEAD_LIMIT_REACHED));
EXPECT_EQ("Unknown(1024)",
QuicIetfTransportErrorCodeString(
static_cast<quic::QuicIetfTransportErrorCodes>(0x400)));
}
TEST_F(QuicErrorCodesTest, QuicErrorCodeToTransportErrorCode) {
for (uint32_t internal_error_code = 0; internal_error_code < QUIC_LAST_ERROR;
++internal_error_code) {
std::string internal_error_code_string =
QuicErrorCodeToString(static_cast<QuicErrorCode>(internal_error_code));
if (internal_error_code_string == "INVALID_ERROR_CODE") {
continue;
}
QuicErrorCodeToIetfMapping ietf_error_code =
QuicErrorCodeToTransportErrorCode(
static_cast<QuicErrorCode>(internal_error_code));
if (ietf_error_code.is_transport_close) {
QuicIetfTransportErrorCodes transport_error_code =
static_cast<QuicIetfTransportErrorCodes>(ietf_error_code.error_code);
bool is_transport_crypto_error_code =
transport_error_code >= 0x100 && transport_error_code <= 0x1ff;
if (is_transport_crypto_error_code) {
EXPECT_EQ(
internal_error_code,
TlsAlertToQuicErrorCode(transport_error_code - CRYPTO_ERROR_FIRST));
}
bool is_valid_transport_error_code =
transport_error_code <= 0x0f || is_transport_crypto_error_code;
EXPECT_TRUE(is_valid_transport_error_code) << internal_error_code_string;
} else {
uint64_t application_error_code = ietf_error_code.error_code;
bool is_valid_http3_error_code =
application_error_code >= 0x100 && application_error_code <= 0x110;
bool is_valid_qpack_error_code =
application_error_code >= 0x200 && application_error_code <= 0x202;
EXPECT_TRUE(is_valid_http3_error_code || is_valid_qpack_error_code)
<< internal_error_code_string;
}
}
}
using QuicRstErrorCodesTest = QuicTest;
TEST_F(QuicRstErrorCodesTest, QuicRstStreamErrorCodeToString) {
EXPECT_STREQ("QUIC_BAD_APPLICATION_PAYLOAD",
QuicRstStreamErrorCodeToString(QUIC_BAD_APPLICATION_PAYLOAD));
}
TEST_F(QuicRstErrorCodesTest,
IetfResetStreamErrorCodeToRstStreamErrorCodeAndBack) {
for (uint64_t wire_code :
{static_cast<uint64_t>(QuicHttp3ErrorCode::HTTP3_NO_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::GENERAL_PROTOCOL_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::INTERNAL_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::CLOSED_CRITICAL_STREAM),
static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_UNEXPECTED),
static_cast<uint64_t>(QuicHttp3ErrorCode::FRAME_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::EXCESSIVE_LOAD),
static_cast<uint64_t>(QuicHttp3ErrorCode::ID_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::SETTINGS_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::MISSING_SETTINGS),
static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_REJECTED),
static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_CANCELLED),
static_cast<uint64_t>(QuicHttp3ErrorCode::REQUEST_INCOMPLETE),
static_cast<uint64_t>(QuicHttp3ErrorCode::CONNECT_ERROR),
static_cast<uint64_t>(QuicHttp3ErrorCode::VERSION_FALLBACK),
static_cast<uint64_t>(QuicHttpQpackErrorCode::DECOMPRESSION_FAILED),
static_cast<uint64_t>(QuicHttpQpackErrorCode::ENCODER_STREAM_ERROR),
static_cast<uint64_t>(QuicHttpQpackErrorCode::DECODER_STREAM_ERROR)}) {
QuicRstStreamErrorCode rst_stream_error_code =
IetfResetStreamErrorCodeToRstStreamErrorCode(wire_code);
EXPECT_EQ(wire_code, RstStreamErrorCodeToIetfResetStreamErrorCode(
rst_stream_error_code));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_error_codes.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_error_codes_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
2f292d6b-3fad-4bd3-9908-3933e5739475 | cpp | tensorflow/tensorflow | eigen_support | tensorflow/lite/kernels/eigen_support.cc | tensorflow/lite/kernels/eigen_support_test.cc | #include "tensorflow/lite/kernels/eigen_support.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
#include "tensorflow/lite/kernels/op_macros.h"
#ifndef EIGEN_DONT_ALIGN
#include "tensorflow/lite/util.h"
#endif
namespace tflite {
namespace eigen_support {
namespace {
const int kDefaultNumThreadpoolThreads = 4;
bool IsValidNumThreads(int num_threads) { return num_threads >= -1; }
int GetNumThreads(int num_threads) {
return num_threads > -1 ? num_threads : kDefaultNumThreadpoolThreads;
}
#ifndef EIGEN_DONT_ALIGN
static_assert(
kDefaultTensorAlignment % EIGEN_MAX_ALIGN_BYTES == 0,
"kDefaultTensorAlignment doesn't comply with Eigen alignment requirement.");
#endif
void SetEigenNbThreads(int threads) {
#if defined(EIGEN_HAS_OPENMP)
Eigen::setNbThreads(threads);
#endif
}
class EigenThreadPoolWrapper : public Eigen::ThreadPoolInterface {
public:
explicit EigenThreadPoolWrapper(int num_threads) {
if (num_threads > 1) {
pool_ = std::make_unique<Eigen::ThreadPool>(num_threads);
}
}
~EigenThreadPoolWrapper() override {}
void Schedule(std::function<void()> fn) override {
if (pool_) {
pool_->Schedule(std::move(fn));
} else {
fn();
}
}
int NumThreads() const override { return pool_ ? pool_->NumThreads() : 1; }
int CurrentThreadId() const override {
return pool_ ? pool_->CurrentThreadId() : 0;
}
private:
std::unique_ptr<Eigen::ThreadPool> pool_;
};
class LazyEigenThreadPoolHolder {
public:
explicit LazyEigenThreadPoolHolder(int num_threads) {
SetNumThreads(num_threads);
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice() {
if (!device_) {
thread_pool_wrapper_ =
std::make_unique<EigenThreadPoolWrapper>(target_num_threads_);
device_ = std::make_unique<Eigen::ThreadPoolDevice>(
thread_pool_wrapper_.get(), target_num_threads_);
}
return device_.get();
}
void SetNumThreads(int num_threads) {
const int target_num_threads = GetNumThreads(num_threads);
if (target_num_threads_ != target_num_threads) {
target_num_threads_ = target_num_threads;
device_.reset();
thread_pool_wrapper_.reset();
}
}
private:
int target_num_threads_ = kDefaultNumThreadpoolThreads;
std::unique_ptr<Eigen::ThreadPoolDevice> device_;
std::unique_ptr<Eigen::ThreadPoolInterface> thread_pool_wrapper_;
};
struct RefCountedEigenContext : public TfLiteExternalContext {
std::unique_ptr<LazyEigenThreadPoolHolder> thread_pool_holder;
int num_references = 0;
};
RefCountedEigenContext* GetEigenContext(TfLiteContext* context) {
return reinterpret_cast<RefCountedEigenContext*>(
context->GetExternalContext(context, kTfLiteEigenContext));
}
TfLiteStatus Refresh(TfLiteContext* context) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(GetNumThreads(context->recommended_num_threads));
}
auto* ptr = GetEigenContext(context);
if (ptr != nullptr) {
ptr->thread_pool_holder->SetNumThreads(context->recommended_num_threads);
}
return kTfLiteOk;
}
}
void IncrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(context->recommended_num_threads);
}
ptr = new RefCountedEigenContext;
ptr->type = kTfLiteEigenContext;
ptr->Refresh = Refresh;
ptr->thread_pool_holder = std::make_unique<LazyEigenThreadPoolHolder>(
context->recommended_num_threads);
ptr->num_references = 0;
context->SetExternalContext(context, kTfLiteEigenContext, ptr);
}
ptr->num_references++;
}
void DecrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to DecrementUsageCounter() not preceded by "
"IncrementUsageCounter()");
}
if (--ptr->num_references == 0) {
delete ptr;
context->SetExternalContext(context, kTfLiteEigenContext, nullptr);
}
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to GetFromContext() not preceded by IncrementUsageCounter()");
}
return ptr->thread_pool_holder->GetThreadPoolDevice();
}
}
} | #include "tensorflow/lite/kernels/eigen_support.h"
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
namespace tflite {
namespace eigen_support {
struct TestTfLiteContext : public TfLiteContext {
TestTfLiteContext() {
recommended_num_threads = -1;
external_context = nullptr;
GetExternalContext = GetExternalContextImpl;
SetExternalContext = SetExternalContextImpl;
}
static void SetExternalContextImpl(TfLiteContext* context,
TfLiteExternalContextType type,
TfLiteExternalContext* external_context) {
static_cast<TestTfLiteContext*>(context)->external_context =
external_context;
}
static TfLiteExternalContext* GetExternalContextImpl(
TfLiteContext* context, TfLiteExternalContextType type) {
return static_cast<TestTfLiteContext*>(context)->external_context;
}
TfLiteExternalContext* external_context;
};
TEST(EigenSupport, Default) {
TestTfLiteContext context;
IncrementUsageCounter(&context);
ASSERT_NE(context.external_context, nullptr);
EXPECT_EQ(context.external_context->type, kTfLiteEigenContext);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, SingleThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
EXPECT_EQ(thread_pool_device->numThreadsInPool(), 1);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, MultiThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 2;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 2);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, NumThreadsChanged) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -1;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
context.recommended_num_threads = 0;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 0);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -5;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, RefCounting) {
TestTfLiteContext context;
EXPECT_EQ(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_EQ(context.external_context, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/eigen_support.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/eigen_support_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6179176a-3ede-48e4-aaad-e68588ded661 | cpp | tensorflow/tensorflow | eigen_spatial_convolutions | tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h | third_party/xla/xla/tsl/framework/convolution/eigen_spatial_convolutions_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
#define EIGEN_USE_CUSTOM_THREAD_POOL
#define EIGEN_USE_THREADS
#define Eigen EigenForTFLite
#define TFLITE_REDUCE_INSTANTIATIONS
#if defined(TFLITE_REDUCE_INSTANTIATIONS)
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
if (this->m_lhs_inner_dim_contiguous && this->m_rhs_inner_dim_contiguous && \
!this->m_rhs_inner_dim_reordered) { \
METHOD<true, true, false, ALIGNMENT> ARGS; \
} else { \
eigen_assert(false && "Unsupported contraction formats"); \
}
#endif
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/tsl/framework/convolution/eigen_spatial_convolutions-inl.h"
#endif | #include "xla/tsl/framework/convolution/eigen_spatial_convolutions.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace Eigen {
#define EigenApprox(a, b) \
{ ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3); }
static int ceil_div(int a, int b) { return (a + b - 1) / b; }
TEST(EigenSpatialConvolutionsTest, Simple) {
const int input_depth = 7;
const int input_rows = 4;
const int input_cols = 5;
const int output_depth = 10;
const int patch_rows = 3;
const int patch_cols = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
Tensor<float, 3> input(input_depth, input_rows, input_cols);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 3> result(output_depth, output_rows, output_cols);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < output_rows &&
c - 1 + j < output_cols) {
expected +=
input(id, r - 1 + i, c - 1 + j) * kernel(od, id, r, c);
}
}
}
}
EigenApprox(result(od, i, j), expected);
}
}
}
}
TEST(EigenSpatialConvolutionsTest, SimpleRowMajor) {
const int input_depth = 7;
const int input_rows = 4;
const int input_cols = 5;
const int output_depth = 10;
const int patch_rows = 3;
const int patch_cols = 4;
const int output_rows = input_rows;
const int output_cols = input_cols;
Tensor<float, 3, RowMajor> input(input_cols, input_rows, input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 3, RowMajor> result(output_cols, output_rows, output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(0), output_cols);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_depth);
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < output_rows &&
c - 1 + j < output_cols) {
expected +=
input(c - 1 + j, r - 1 + i, id) * kernel(c, r, id, od);
}
}
}
}
EigenApprox(result(j, i, od), expected);
}
}
}
}
TEST(EigenSpatialConvolutionsTest, BatchedSpatialConvolution) {
Tensor<float, 4> input(10, 5, 5, 13);
Tensor<float, 4> kernel(7, 10, 3, 3);
Tensor<float, 4> result(7, 5, 5, 13);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(0), 7);
EXPECT_EQ(result.dimension(1), 5);
EXPECT_EQ(result.dimension(2), 5);
for (int b = 0; b < 13; ++b) {
for (int od = 0; od < 7; ++od) {
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 5; ++j) {
float expected = 0.0f;
for (int c = 0; c < 3; ++c) {
for (int r = 0; r < 3; ++r) {
for (int id = 0; id < 10; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < 5 &&
c - 1 + j < 5) {
expected +=
input(id, r - 1 + i, c - 1 + j, b) * kernel(od, id, r, c);
}
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, BatchedSpatialConvolutionRowMajor) {
Tensor<float, 4, RowMajor> input(13, 5, 5, 10);
Tensor<float, 4, RowMajor> kernel(3, 3, 10, 7);
Tensor<float, 4, RowMajor> result(13, 5, 5, 7);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result = SpatialConvolution(input, kernel);
EXPECT_EQ(result.dimension(1), 5);
EXPECT_EQ(result.dimension(2), 5);
EXPECT_EQ(result.dimension(3), 7);
for (int b = 0; b < 13; ++b) {
for (int od = 0; od < 7; ++od) {
for (int i = 0; i < 5; ++i) {
for (int j = 0; j < 5; ++j) {
float expected = 0.0f;
for (int c = 0; c < 3; ++c) {
for (int r = 0; r < 3; ++r) {
for (int id = 0; id < 10; ++id) {
if (r - 1 + i >= 0 && c - 1 + j >= 0 && r - 1 + i < 5 &&
c - 1 + j < 5) {
expected +=
input(b, c - 1 + j, r - 1 + i, id) * kernel(c, r, id, od);
}
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, ValidSpatialConvolution) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
const int stride = 1;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, r + i, c + j, b) * kernel(od, id, r, c);
}
}
}
if (result(od, i, j, b) != expected) {
std::cout << "at od=" << od << " b=" << b << " i=" << i
<< " j=" << j << " " << result(od, i, j, b) << " vs "
<< expected << std::endl;
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, ValidSpatialConvolutionUnequalStrides) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 4;
const int patch_cols = 4;
const int row_stride = 1;
const int col_stride = 2;
const int output_rows = 2;
const int output_cols = 1;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
result =
SpatialConvolution(input, kernel, row_stride, col_stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
if ( (true)) return;
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected +=
input(id, r + row_stride * i, c + col_stride * j, b) *
kernel(od, id, r, c);
}
}
}
if (result(od, i, j, b) != expected) {
std::cout << "at od=" << od << " b=" << b << " i=" << i
<< " j=" << j << " " << result(od, i, j, b) << " vs "
<< expected << std::endl;
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, ValidSpatialConvolutionRowMajor) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = input_rows - patch_rows + 1;
const int output_cols = input_cols - patch_cols + 1;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
const int stride = 1;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_rows; ++c) {
for (int r = 0; r < patch_cols; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, c + j, r + i, id) * kernel(c, r, id, od);
}
}
}
if (result(b, j, i, od) != expected) {
std::cout << "at od=" << od << " b=" << b << " i=" << i
<< " j=" << j << " " << result(b, j, i, od) << " vs "
<< expected << std::endl;
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, StridedSpatialConvolution) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, r + stride * i, c + stride * j, b) *
kernel(od, id, r, c);
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, KernelSmallerThanStride) {
const int input_depth = 2;
const int input_rows = 3;
const int input_cols = 3;
const int num_batches = 5;
const int output_depth = 6;
const int patch_rows = 1;
const int patch_cols = 1;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, r + stride * i, c + stride * j, b) *
kernel(od, id, r, c);
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, StridedSpatialConvolutionRowMajor) {
const int input_depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, c + stride * j, r + stride * i, id) *
kernel(c, r, id, od);
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, AtrousSpatial) {
const int input_depth = 10;
const int input_rows = 7;
const int input_cols = 7;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 3;
const int output_cols = 3;
Tensor<float, 4> input(input_depth, input_rows, input_cols, num_batches);
Tensor<float, 4> kernel(output_depth, input_depth, patch_rows, patch_cols);
Tensor<float, 4> result(output_depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 1;
int in_stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID,
in_stride, in_stride);
EXPECT_EQ(result.dimension(0), output_depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(id, in_stride * r + stride * i,
in_stride * c + stride * j, b) *
kernel(od, id, r, c);
}
}
}
EigenApprox(result(od, i, j, b), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, AtrousSpatialRowMajor) {
const int input_depth = 10;
const int input_rows = 7;
const int input_cols = 7;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 3;
const int output_cols = 3;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int stride = 1;
int in_stride = 2;
result = SpatialConvolution(input, kernel, stride, stride, PADDING_VALID,
in_stride, in_stride);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, in_stride * c + stride * j,
in_stride * r + stride * i, id) *
kernel(c, r, id, od);
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, AtrousSpatialRowMajorUnequalStrides) {
const int input_depth = 10;
const int input_rows = 7;
const int input_cols = 7;
const int num_batches = 13;
const int output_depth = 7;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 1;
const int output_cols = 3;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows,
input_depth);
Tensor<float, 4, RowMajor> kernel(patch_cols, patch_rows, input_depth,
output_depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
output_depth);
input = input.constant(11.0f) + input.random();
kernel = kernel.constant(2.0f) + kernel.random();
result.setRandom();
int row_stride = 1;
int col_stride = 2;
int row_in_stride = 3;
int col_in_stride = 1;
result = SpatialConvolution(input, kernel, row_stride, col_stride,
PADDING_VALID, row_in_stride, col_in_stride);
EXPECT_EQ(result.dimension(0), num_batches);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_depth);
for (int b = 0; b < num_batches; ++b) {
for (int od = 0; od < output_depth; ++od) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = 0.0f;
for (int c = 0; c < patch_cols; ++c) {
for (int r = 0; r < patch_rows; ++r) {
for (int id = 0; id < input_depth; ++id) {
expected += input(b, col_in_stride * c + col_stride * j,
row_in_stride * r + row_stride * i, id) *
kernel(c, r, id, od);
}
}
}
EigenApprox(result(b, j, i, od), expected);
}
}
}
}
}
TEST(EigenSpatialConvolutionsTest, SpatialConvContractionMapper) {
typedef Tensor<float, 1>::DimensionPair DimPair;
Tensor<float, 4> out(1, 1, 2, 1);
Tensor<float, 4> kern(1, 1, 2, 2);
for (int i = 0; i < kern.size(); ++i) {
kern.coeffRef(i) = static_cast<float>(i) + 1;
}
for (int i = 0; i < out.size(); ++i) {
out.coeffRef(i) = static_cast<float>(i) + 1;
}
DSizes<ptrdiff_t, 4> strides;
strides[0] = 1;
strides[1] = 2;
strides[2] = 2;
strides[3] = 1;
array<std::pair<ptrdiff_t, ptrdiff_t>, 4> paddings;
paddings[0] = std::make_pair(0, 0);
paddings[1] = std::make_pair(1, 2);
paddings[2] = std::make_pair(1, 1);
paddings[3] = std::make_pair(0, 0);
DSizes<ptrdiff_t, 3> out_dim;
out_dim[0] = 1;
out_dim[1] = 4;
out_dim[2] = 12;
array<bool, 4> kernel_reverse;
kernel_reverse[0] = false;
kernel_reverse[1] = false;
kernel_reverse[2] = true;
kernel_reverse[3] = true;
DSizes<ptrdiff_t, 3> k_dims;
k_dims[0] = 1;
k_dims[1] = 1;
k_dims[2] = 4;
array<DimPair, 2> contract_dims;
contract_dims[0] = DimPair(0, 0);
contract_dims[1] = DimPair(2, 1);
DSizes<ptrdiff_t, 4> in_dim;
in_dim[0] = 1;
in_dim[1] = 3;
in_dim[2] = 4;
in_dim[3] = 1;
DSizes<ptrdiff_t, 2> in_dbg_dim;
in_dbg_dim[0] = 3;
in_dbg_dim[1] = 4;
DSizes<ptrdiff_t, 2> out_dbg_dim;
out_dbg_dim[0] = 4;
out_dbg_dim[1] = 12;
Tensor<float, 4> direct =
kern.reverse(kernel_reverse)
.reshape(k_dims)
.contract(
out.extract_image_patches(2, 2, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 0)
.reshape(out_dim),
contract_dims)
.reshape(in_dim);
Tensor<float, 4> indirect =
kern.reverse(kernel_reverse)
.reshape(k_dims)
.contract(
out.inflate(strides)
.pad(paddings)
.extract_image_patches(2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0)
.reshape(out_dim),
contract_dims)
.reshape(in_dim);
eigen_assert(dimensions_match(direct.dimensions(), indirect.dimensions()));
for (size_t i = 0; i < direct.dimensions().TotalSize(); ++i) {
EigenApprox(direct.data()[i], indirect.data()[i]);
}
EigenApprox(1.0f, direct(0, 0, 0, 0));
EigenApprox(3.0f, direct(0, 0, 1, 0));
EigenApprox(2.0f, direct(0, 0, 2, 0));
EigenApprox(6.0f, direct(0, 0, 3, 0));
EigenApprox(2.0f, direct(0, 1, 0, 0));
EigenApprox(4.0f, direct(0, 1, 1, 0));
EigenApprox(4.0f, direct(0, 1, 2, 0));
EigenApprox(8.0f, direct(0, 1, 3, 0));
}
template <typename T>
static void PackRhsHelper(::testing::benchmark::State& state,
int input_batches, int input_cols, int input_rows,
int input_depth,
int filter_count, int filter_cols, int filter_rows,
Eigen::PaddingType padding,
int col_strides, int row_strides,
int patch_col_inflate_stride,
int patch_row_inflate_stride,
Index block_rows, Index block_cols) {
srand(12345);
using Dimensions = Eigen::DSizes<Eigen::Index, 4>;
Dimensions input_dims(input_depth, input_rows, input_cols, input_batches);
static const int packet_size = Eigen::internal::packet_traits<T>::size;
using NewDimension = Eigen::DSizes<Index, 2>;
using nocontract_t = Eigen::array<Eigen::Index, 1>;
using contract_t = Eigen::array<Eigen::Index, 1>;
using ArgType = TensorMap<Tensor<T, 4>, Eigen::Aligned>;
using Evaluator = TensorEvaluator<
const TensorReshapingOp<
NewDimension, const TensorImagePatchOp<Dynamic, Dynamic, ArgType>>,
Eigen::DefaultDevice>;
using InputMapper = Eigen::internal::TensorContractionInputMapper<
T, Index, Eigen::internal::Rhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
using SubMapper = Eigen::internal::TensorContractionSubMapper<
T, Index, Eigen::internal::Rhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
using PackRhsImpl =
Eigen::internal::gemm_pack_colmajor_block<T, Eigen::Index, SubMapper,
ColMajor>;
#else
using Traits = typename Eigen::internal::gebp_traits<T, T>;
using PackRhsImpl =
Eigen::internal::gemm_pack_rhs<T, Eigen::Index, SubMapper,
Traits::nr,
ColMajor,
false,
false>;
#endif
Eigen::DefaultDevice device;
const Eigen::Index not_important = -1234;
nocontract_t nocontract_dim = {not_important};
contract_t contract_dim = {not_important};
Tensor<T, 4> packed(input_dims);
size_t input_bytes = input_dims.TotalSize() * sizeof(T);
size_t mem_size_bytes = 1024 * 1024 * 512;
size_t num_inputs =
std::max(static_cast<size_t>(1), mem_size_bytes / input_bytes);
std::vector<Tensor<T, 4>> inputs;
std::vector<Evaluator> evaluators;
std::vector<InputMapper> input_mappers;
inputs.reserve(num_inputs);
evaluators.reserve(num_inputs);
input_mappers.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs.emplace_back(input_dims);
inputs[i].setRandom();
ArgType tensor_map(inputs[i].data(), input_dims);
const auto image_patch_op = TensorImagePatchOp<Dynamic, Dynamic, ArgType>(
tensor_map,
filter_rows, filter_cols,
row_strides, col_strides,
1, 1,
patch_row_inflate_stride, patch_col_inflate_stride,
padding, 0.0);
Index input_rows_eff = (input_rows - 1) * patch_row_inflate_stride + 1;
Index input_cols_eff = (input_cols - 1) * patch_col_inflate_stride + 1;
Index output_rows = 0;
Index output_cols = 0;
if (padding == Eigen::PADDING_SAME) {
output_rows = input_rows_eff / row_strides;
output_cols = input_cols_eff / col_strides;
} else if (padding == Eigen::PADDING_VALID) {
output_rows =
numext::ceil((input_rows_eff - filter_rows + 1.f) / row_strides);
output_cols =
numext::ceil((input_cols_eff - filter_cols + 1.f) / col_strides);
} else {
eigen_assert(false && "not supported");
}
NewDimension reshape_dims;
reshape_dims[0] = input_depth * filter_rows * filter_cols;
reshape_dims[1] = output_rows * output_cols * input_batches;
const auto reshape_op =
TensorReshapingOp<NewDimension, decltype(image_patch_op)>(
image_patch_op, reshape_dims);
evaluators.emplace_back(reshape_op, device);
input_mappers.emplace_back(evaluators[i], nocontract_dim, nocontract_dim,
contract_dim, contract_dim);
}
const Index patch_depth = evaluators[0].impl().dimensions()[0];
const Index patch_rows = evaluators[0].impl().dimensions()[1];
const Index patch_cols = evaluators[0].impl().dimensions()[2];
const Index num_patches = evaluators[0].impl().dimensions()[3];
const Index patch_size = patch_depth * patch_rows * patch_cols;
PackRhsImpl pack_rhs;
const Index packed_total_size = input_dims.TotalSize();
const auto round_up = [](const Index idx) {
return (idx / packet_size) * packet_size;
};
for (auto s : state) {
int input_idx =
num_inputs == 1 ? 1 : internal::random<int>(0, num_inputs - 1);
Index depth_offset =
(patch_size > block_rows)
? round_up(internal::random<Index>(0, patch_size - 10))
: 0;
Index col_offset = internal::random<Index>(0, num_patches - 10);
Index depth = std::min(block_rows, patch_size - depth_offset);
Index cols = std::min(block_cols, num_patches - col_offset);
Index packed_size = depth * cols;
Index packed_offset =
internal::random<Index>(0, packed_total_size - packed_size - 1);
SubMapper sub_mapper =
input_mappers[input_idx].getSubMapper(depth_offset, col_offset);
pack_rhs(packed.data() + packed_offset, sub_mapper, depth, cols);
}
state.SetLabel(
absl::StrCat("patch: ", patch_rows, "x", patch_cols, " D", patch_depth,
"; num_patches=", num_patches, " patch_size=", patch_size,
" num_inputs=", num_inputs, " padding=", padding));
}
template <typename T>
static void PackLhsHelper(::testing::benchmark::State& state,
int input_depth,
int filter_count, int filter_cols, int filter_rows,
Index block_rows, Index block_cols) {
srand(12345);
eigen_assert(block_rows <= filter_count);
eigen_assert(block_cols <= input_depth * filter_rows * filter_cols);
using Dimensions = Eigen::DSizes<Eigen::Index, 4>;
Dimensions filter_dims(filter_count, filter_rows, filter_cols, input_depth);
static const int packet_size = Eigen::internal::packet_traits<T>::size;
using NewDimension = Eigen::DSizes<Index, 2>;
using nocontract_t = Eigen::array<Eigen::Index, 1>;
using contract_t = Eigen::array<Eigen::Index, 1>;
using ArgType = TensorMap<Tensor<T, 4>, Eigen::Aligned>;
using Evaluator =
TensorEvaluator<const TensorReshapingOp<NewDimension, ArgType>,
Eigen::DefaultDevice>;
using InputMapper = Eigen::internal::TensorContractionInputMapper<
T, Index, Eigen::internal::Lhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
using SubMapper = Eigen::internal::TensorContractionSubMapper<
T, Index, Eigen::internal::Lhs, Evaluator,
nocontract_t, contract_t,
packet_size,
true,
false,
0>;
#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
using PackLhsImpl =
Eigen::internal::gemm_pack_colmajor_block<T, Eigen::Index, SubMapper,
ColMajor>;
#else
using Traits = typename Eigen::internal::gebp_traits<T, T>;
using PackLhsImpl =
Eigen::internal::gemm_pack_lhs<T, Eigen::Index, SubMapper,
Traits::mr,
Traits::LhsProgress,
typename Traits::LhsPacket4Packing,
ColMajor>;
#endif
Eigen::DefaultDevice device;
NewDimension reshape_dims;
reshape_dims[0] = filter_count;
reshape_dims[1] = input_depth * filter_rows * filter_cols;
nocontract_t nocontract_strides = {1};
contract_t contract_strides = {filter_count};
nocontract_t i_strides = {1};
contract_t k_strides = {1};
Tensor<T, 4> packed(filter_dims);
size_t input_bytes = filter_dims.TotalSize() * sizeof(T);
size_t mem_size_bytes = 1024 * 1024 * 512;
size_t num_filters =
std::max(static_cast<size_t>(1), mem_size_bytes / input_bytes);
std::vector<Tensor<T, 4>> filters;
std::vector<Evaluator> evaluators;
std::vector<InputMapper> input_mappers;
filters.reserve(num_filters);
evaluators.reserve(num_filters);
input_mappers.reserve(num_filters);
for (int i = 0; i < num_filters; ++i) {
filters.emplace_back(filter_dims);
filters[i].setRandom();
ArgType tensor_map(filters[i].data(), filter_dims);
const auto reshape_op =
TensorReshapingOp<NewDimension, ArgType>(tensor_map, reshape_dims);
evaluators.emplace_back(reshape_op, device);
input_mappers.emplace_back(evaluators[i], nocontract_strides, i_strides,
contract_strides, k_strides);
}
PackLhsImpl pack_lhs;
const Index packed_total_size = filter_dims.TotalSize();
const auto round_up = [](const Index idx) {
return (idx / packet_size) * packet_size;
};
const Index max_row = filter_count;
const Index max_col = filter_rows * filter_cols * input_depth;
for (auto s : state) {
int filter_idx =
num_filters == 1 ? 1 : internal::random<int>(0, num_filters - 1);
Index row_offset = round_up(internal::random<Index>(0, max_row - 10));
Index col_offset = round_up(internal::random<Index>(0, max_col - 10));
Index rows = std::min(block_rows, max_row - row_offset);
Index cols = std::min(block_cols, max_col - col_offset);
Index packed_offset = round_up(
internal::random<Index>(0, packed_total_size - rows * cols - 1));
SubMapper sub_mapper =
input_mappers[filter_idx].getSubMapper(row_offset, col_offset);
#if defined(TENSORFLOW_USE_MKLDNN_CONTRACTION_KERNEL)
pack_lhs(packed.data() + packed_offset, sub_mapper, rows, cols);
#else
pack_lhs(packed.data() + packed_offset, sub_mapper, cols, rows);
#endif
}
state.SetLabel(absl::StrCat(
"filter: count=", filter_count, " dims=", filter_rows, "x", filter_cols,
"; input: depth=", input_depth, "; num_filers=", num_filters));
}
#define BM_CONCAT(a, b) a##b
#define BM_RHS_NAME(prefix, T, N, H, W, C, FC, FH, FW, PAD, SH, SW, ISH, ISW, \
BR, BC) \
BM_CONCAT( \
BM_##prefix##_##T##_##N##_##H##x##W##_IC##C##_FC##FC##_##FH##x##FW, \
_##PAD##_s##SH##x##SW##_is##ISH##x##ISW##_B##BR##x##BC)
#define BM_PackRhs(T, N, H, W, C, FC, FH, FW, PAD, SH, SW, ISH, ISW, BR, BC) \
static void BM_RHS_NAME(PackRhs, T, N, H, W, C, FC, FH, FW, PAD, SH, SW, \
ISH, ISW, BR, \
BC)(::testing::benchmark::State & state) { \
PackRhsHelper<T>(state, N, H, W, C, FC, FH, FW, PADDING_##PAD, SH, SW, \
ISH, ISW, BR, BC); \
} \
BENCHMARK(BM_RHS_NAME(PackRhs, T, N, H, W, C, FC, FH, FW, PAD, SH, SW, ISH, \
ISW, BR, BC)) \
->UseRealTime()
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
VALID,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
SAME,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
VALID,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
32,
64,
5, 5,
SAME,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
SAME,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
VALID,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
SAME,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
30,
64,
5, 5,
VALID,
2, 2,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
SAME,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
VALID,
1, 1,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
SAME,
2, 4,
1, 1,
256, 56);
BM_PackRhs( float,
32,
256, 256,
4,
16,
8, 8,
VALID,
2, 4,
1, 1,
256, 56);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
SAME,
1, 1,
1, 1,
36, 432);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
VALID,
1, 1,
1, 1,
36, 432);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
SAME,
2, 2,
1, 1,
36, 432);
BM_PackRhs( float,
32,
64, 64,
4,
16,
3, 3,
VALID,
2, 2,
1, 1,
36, 432);
BM_PackRhs( float,
32,
32, 32,
96,
96,
5, 5,
SAME,
1, 1,
2, 2,
272, 240);
BM_PackRhs( float,
32,
32, 32,
96,
96,
5, 5,
VALID,
1, 1,
2, 2,
272, 240);
#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
using qint8 = Eigen::QInt8;
BM_PackRhs( qint8,
32,
64, 64,
32,
64,
5, 5,
SAME,
1, 1,
1, 1,
256, 56);
#endif
#define BM_LHS_NAME(prefix, T, C, FC, FH, FW, BR, BC) \
BM_CONCAT(BM_##prefix##_##T##_##C##_FC##FC##_##FH##x##FW, _B##BR##x##BC)
#define BM_PackLhs(T, C, FC, FH, FW, BR, BC) \
static void BM_LHS_NAME(PackLhs, T, C, FC, FH, FW, BR, \
BC)(::testing::benchmark::State & state) { \
PackLhsHelper<T>(state, C, FC, FH, FW, BR, BC); \
} \
BENCHMARK(BM_LHS_NAME(PackLhs, T, C, FC, FH, FW, BR, BC))->UseRealTime()
BM_PackLhs( float,
128,
1024,
3, 3,
256, 56);
BM_PackLhs( float,
128,
1024,
3, 3,
56, 256);
BM_PackLhs( float,
30,
64,
3, 3,
256, 56);
BM_PackLhs( float,
50,
64,
3, 3,
56, 256);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/convolution/eigen_spatial_convolutions_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a7cbd52-50d8-49c8-b25e-0aefde08b0fb | cpp | tensorflow/tensorflow | runtime_fallback_kernels | tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc | tensorflow/core/runtime_fallback/test/runtime_fallback_kernels_test.cc | #include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_split.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/c/tf_tensor_internal.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/eager_operation.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.h"
#include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_tensor.h"
#include "tensorflow/core/runtime_fallback/runtime/kernel_utils.h"
#include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_op_handler.h"
#include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_tensor.h"
#include "tensorflow/core/runtime_fallback/util/attr_util.h"
#include "tensorflow/core/runtime_fallback/util/tensor_util.h"
#include "tensorflow/core/runtime_fallback/util/type_util.h"
#include "tensorflow/core/tfrt/utils/error_util.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tensorflow/core/tfrt/utils/tensor_util.h"
#include "tfrt/cpu/core_runtime/cpu_op_handler.h"
#include "tfrt/core_runtime/core_runtime.h"
#include "tfrt/core_runtime/core_runtime_op.h"
#include "tfrt/core_runtime/execute_op_impl.h"
#include "tfrt/core_runtime/op_attr_type.h"
#include "tfrt/core_runtime/tensor_handle.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/async_value_ref.h"
#include "tfrt/host_context/attribute_utils.h"
#include "tfrt/host_context/device.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/host_buffer.h"
#include "tfrt/host_context/host_context.h"
#include "tfrt/host_context/kernel_frame.h"
#include "tfrt/host_context/kernel_utils.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/host_context/sync_kernel_frame.h"
#include "tfrt/support/error_util.h"
#include "tfrt/support/forward_decls.h"
#include "tfrt/support/ref_count.h"
#include "tfrt/tensor/conversion_registry.h"
#include "tfrt/tensor/dense_host_tensor.h"
#include "tfrt/tensor/scalar_host_tensor.h"
#include "tfrt/tensor/string_host_tensor.h"
#include "tfrt/tensor/tensor_serialize_utils.h"
namespace tensorflow {
namespace tfd {
namespace {
constexpr char kHostContextPtrAttrName[] = "host_ptr";
constexpr char kDefaultCpuDevice[] =
"/job:localhost/replica:0/task:0/device:CPU:0";
}
using tfrt::AggregateAttr;
using tfrt::Argument;
using tfrt::AsyncValue;
using tfrt::AsyncValueRef;
using tfrt::BEFAttributeType;
using tfrt::Chain;
using tfrt::DenseAttr;
using tfrt::DenseHostTensor;
using tfrt::ExecutionContext;
using tfrt::Expected;
using tfrt::FuncAttr;
using tfrt::HostBuffer;
using tfrt::HostContext;
using tfrt::KernelErrorHandler;
using tfrt::OpAttrs;
using tfrt::OpAttrsRawEntry;
using tfrt::OpAttrsRef;
using tfrt::OpAttrType;
using tfrt::raw_ostream;
using tfrt::RCReference;
using tfrt::RemainingArguments;
using tfrt::RemainingAttributes;
using tfrt::RemainingResults;
using tfrt::Result;
using tfrt::ShapeAttr;
using tfrt::string_view;
using tfrt::StringAttr;
using tfrt::StringAttribute;
using tfrt::Tensor;
using tfrt::TensorShape;
#define TFD_REPORT_AND_RETURN_IF_ERROR(handler, status) \
if (!status.ok()) { \
handler.ReportError(status.message()); \
return; \
}
static AsyncValueRef<RuntimeFallbackTensor> CreateRuntimeFallbackTensor(
TensorHandle* handle, HostContext* host) {
OwnedTensorHandle th(handle);
int rank;
tensorflow::Status status = th->NumDims(&rank);
if (!status.ok())
return tfrt::MakeErrorAsyncValueRef(tfrt::StrCat(
"error getting rank from TF tensor handle: ", status.message()));
llvm::SmallVector<tfrt::Index, 4> dims;
for (auto i = 0; i < rank; ++i) {
int64_t dim;
status = th->Dim(i, &dim);
if (!status.ok())
return tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat("error getting dimension from TFE tensor handle: ",
status.message()));
dims.push_back(dim);
}
TensorShape shape{dims};
DataType dtype = th->DataType();
return tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
shape, GetTfrtDtype(dtype), std::move(th));
}
static std::pair<RuntimeFallbackTensor, Chain> TfdMoveDHTToTFT(
Argument<DenseHostTensor> dht, Argument<Chain> in_chain,
const ExecutionContext& exec_ctx) {
return std::make_pair(
MoveDHTToRuntimeFallbackTensor(std::move(dht.get()), exec_ctx.host()),
in_chain.get());
}
static void TfdConvertTFTToDHT(Argument<RuntimeFallbackTensor> tft,
Argument<Chain> in_chain,
Result<DenseHostTensor> dht,
Result<Chain> out_chain,
KernelErrorHandler handler,
const ExecutionContext& exec_ctx) {
dht.Set(tfrt::ConvertTensorOnHost(exec_ctx, tft.get(),
DenseHostTensor::kTensorType)
.ReleaseRCRef());
out_chain.Set(in_chain);
}
static void TfdPrintTFT(Argument<RuntimeFallbackTensor> tft,
Argument<Chain> in_chain, Result<Chain> out_chain) {
llvm::outs() << tft.get() << "\n";
llvm::outs().flush();
out_chain.Set(in_chain);
}
static void TfdInitEagerContext(Argument<Chain> in_chain,
Result<Chain> out_chain,
KernelErrorHandler handler,
const ExecutionContext& exec_ctx) {
tfrt::ResourceContext* resource_context = exec_ctx.resource_context();
tensorflow::tfd::EagerContextResource* eager_context_resource =
resource_context
->GetOrCreateResource<tensorflow::tfd::EagerContextResource>(
tensorflow::tfd::kEagerContextResourceName);
(void)eager_context_resource;
out_chain.Set(in_chain);
}
OwnedTFTensor MoveDHTToTFTensor(DenseHostTensor&& dht, HostContext* host) {
llvm::SmallVector<tfrt::Index, 4> dims;
dht.shape().GetDimensions(&dims);
HostBuffer* host_buffer = dht.ReleaseBuffer().release();
auto deallocator = [](void* data, size_t len, void* arg) {
auto* host_buffer = reinterpret_cast<HostBuffer*>(arg);
host_buffer->DropRef();
};
CheckBoolCompatibility();
OwnedTFTensor tf_tensor{
TF_NewTensor(static_cast<TF_DataType>(GetTfDataType(dht.dtype())),
dims.data(), dims.size(), host_buffer->data(),
host_buffer->size(), deallocator, host_buffer)};
return tf_tensor;
}
static tensorflow::Status DecodeDenseAttrToTensorInterface(
const DenseAttr& dense_attr, HostContext* host,
tensorflow::TensorInterface* result) {
Expected<DenseHostTensor> dht =
tfrt::DeserializeDenseHostTensorFromDenseAttr(dense_attr, host);
if (!dht)
return tensorflow::errors::Internal(tfrt::StrCat(
"cannot create DenseHostTensor in DecodeDenseAttrToTensorInterface:",
dht.takeError()));
OwnedTFTensor tf_tensor = MoveDHTToTFTensor(std::move(*dht), host);
tensorflow::Tensor t;
TF_RETURN_IF_ERROR(TF_TensorToTensor(tf_tensor.get(), &t));
*result = tensorflow::TensorInterface(std::move(t));
return absl::OkStatus();
}
static tensorflow::Status PrepareAttributes(EagerOperation* eager_op,
const OpAttrsRef& attrs,
HostContext* host,
EagerContext* eager_ctx) {
tensorflow::Status status;
attrs.IterateEntries([eager_op, eager_ctx, status_ptr = &status, host,
&attrs](const OpAttrsRawEntry& entry) {
assert(strcmp(entry.name, "device") != 0);
if (IsUnusedAttribute(entry.name)) {
return;
} else if (entry.IsArray()) {
if (entry.element_count == 0) {
if (entry.type == OpAttrType::CHAR) {
std::string empty_str;
*status_ptr = eager_op->SetAttrString(entry.name, empty_str.data(),
empty_str.size());
} else {
AttrValue empty_attr_value;
eager_op->MutableAttrs()->Set(entry.name, empty_attr_value);
}
} else if (entry.type == OpAttrType::CHAR) {
string_view attr_value = attrs.GetStringAsserting(entry.name);
*status_ptr = eager_op->SetAttrString(entry.name, attr_value.data(),
attr_value.size());
} else if (entry.type == OpAttrType::FUNC) {
string_view attr_value = attrs.GetFuncNameAsserting(entry.name);
*status_ptr = eager_op->SetAttrFunctionName(
entry.name, attr_value.data(), attr_value.size());
} else if (entry.type == OpAttrType::I64) {
llvm::ArrayRef<int64_t> int_array =
attrs.GetArrayAsserting<int64_t>(entry.name);
*status_ptr = eager_op->SetAttrIntList(entry.name, int_array.data(),
int_array.size());
} else if (entry.type == OpAttrType::F32) {
llvm::ArrayRef<float> float_array =
attrs.GetArrayAsserting<float>(entry.name);
*status_ptr = eager_op->SetAttrFloatList(entry.name, float_array.data(),
float_array.size());
} else if (entry.type == OpAttrType::BOOL) {
llvm::ArrayRef<bool> bool_array =
attrs.GetArrayAsserting<bool>(entry.name);
std::vector<unsigned char> bool_char_array(bool_array.begin(),
bool_array.end());
*status_ptr = eager_op->SetAttrBoolList(
entry.name, bool_char_array.data(), bool_char_array.size());
} else if (entry.type == OpAttrType::DTYPE) {
const auto& op_attr = attrs.GetRawAsserting(entry.name);
assert(op_attr.IsArray());
auto bef_dtypes =
llvm::ArrayRef(static_cast<const tfrt::DType*>(op_attr.GetData()),
op_attr.element_count);
llvm::SmallVector<tensorflow::DataType, 4> tf_dtypes;
tf_dtypes.reserve(bef_dtypes.size());
for (auto bef_dtype : bef_dtypes) {
tf_dtypes.push_back(ConvertBefAttrTypeToTfDataType(bef_dtype));
}
*status_ptr = eager_op->SetAttrTypeList(entry.name, tf_dtypes.data(),
tf_dtypes.size());
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported array attribute type");
}
} else {
if (entry.type == OpAttrType::I64) {
int64_t attr_value = attrs.GetAsserting<int64_t>(entry.name);
*status_ptr = eager_op->SetAttrInt(entry.name, attr_value);
} else if (entry.type == OpAttrType::F32) {
float attr_value = attrs.GetAsserting<float>(entry.name);
*status_ptr = eager_op->SetAttrFloat(entry.name, attr_value);
} else if (entry.type == OpAttrType::BOOL) {
bool attr_value = attrs.GetAsserting<bool>(entry.name);
*status_ptr = eager_op->SetAttrBool(entry.name, attr_value);
} else if (entry.type == OpAttrType::DTYPE) {
OpAttrType op_attr_type = attrs.GetAsserting<OpAttrType>(entry.name);
DataType tf_dtype = ConvertToTfDataType(op_attr_type);
*status_ptr = eager_op->SetAttrType(entry.name, tf_dtype);
} else if (entry.type == OpAttrType::SHAPE) {
tfrt::ShapeAttr shape_attr =
attrs.GetAsserting<tfrt::ShapeAttr>(entry.name);
if (shape_attr.HasRank()) {
*status_ptr = eager_op->SetAttrShape(
entry.name, shape_attr.GetShape().data(), shape_attr.GetRank());
} else {
*status_ptr = eager_op->SetAttrShape(entry.name, nullptr,
-1);
}
} else if (entry.type == OpAttrType::DENSE) {
DenseAttr dense_attr = attrs.GetAsserting<DenseAttr>(entry.name);
tensorflow::TensorInterface interface;
*status_ptr =
DecodeDenseAttrToTensorInterface(dense_attr, host, &interface);
if (!status_ptr->ok()) return;
*status_ptr = eager_op->SetAttrTensor(entry.name, &interface);
} else if (entry.type == OpAttrType::AGGREGATE) {
AggregateAttr list_attr = attrs.GetAsserting<AggregateAttr>(entry.name);
int num_values = list_attr.GetNumElements();
if (num_values == 0) {
std::vector<int> dummy_attr;
eager_op->MutableAttrs()->Set(
entry.name, gtl::ArraySlice<const int>(dummy_attr.data(), 0));
return;
}
auto attr_base = list_attr.GetAttribute(0);
if (IsDataTypeAttribute(attr_base.type()) &&
GetDataType(attr_base.type()) == tfrt::DType::String) {
llvm::SmallVector<const void*, 8> values;
llvm::SmallVector<size_t, 8> lengths;
values.reserve(num_values);
lengths.reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto string_attr = list_attr.GetAttributeOfType<StringAttr>(i);
values.push_back(string_attr.GetValue().data());
lengths.push_back(string_attr.GetValue().size());
}
*status_ptr = eager_op->SetAttrStringList(entry.name, values.data(),
lengths.data(), num_values);
} else if (IsFuncAttribute(attr_base.type())) {
std::vector<const AbstractOperation*> funcs(num_values);
for (int i = 0; i < num_values; ++i) {
auto func_attr = list_attr.GetAttributeOfType<FuncAttr>(i);
ImmediateExecutionOperation* new_op = eager_ctx->CreateOperation();
auto func_name = func_attr.GetFunctionName();
*status_ptr = new_op->Reset(func_name.str().c_str(),
nullptr);
funcs[i] = new_op;
}
*status_ptr =
eager_op->SetAttrFunctionList(entry.name, absl::MakeSpan(funcs));
} else if (attr_base.type() == BEFAttributeType::kShape) {
llvm::SmallVector<int, 8> ranks;
llvm::SmallVector<const int64_t*, 8> dims;
ranks.reserve(num_values);
dims.reserve(num_values);
for (int i = 0; i < num_values; ++i) {
auto shape_attr = list_attr.GetAttributeOfType<ShapeAttr>(i);
if (shape_attr.HasRank()) {
ranks.push_back(shape_attr.GetRank());
dims.push_back(shape_attr.GetShape().data());
} else {
ranks.push_back(-1);
dims.push_back(nullptr);
}
}
*status_ptr = eager_op->SetAttrShapeList(entry.name, dims.data(),
ranks.data(), num_values);
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported list attribute type");
}
} else {
*status_ptr =
tensorflow::errors::Internal("unsupported scalar attribute type");
}
}
});
return status;
}
Status CallEagerExecute(const tfrt::ExecutionContext& exec_ctx,
EagerContext* eager_ctx, const char* op_name,
const char* device_name,
llvm::ArrayRef<TensorHandle*> input_tensor_handles,
const OpAttrsRef& attrs,
llvm::MutableArrayRef<tensorflow::AbstractTensorHandle*>
result_tensor_handles) {
assert(eager_ctx != nullptr && "EagerContext is NULL");
OwnedEagerOperation eager_op{new EagerOperation(eager_ctx)};
TF_RETURN_IF_ERROR(eager_op->Reset(op_name, device_name));
for (TensorHandle* input_tensor : input_tensor_handles) {
TF_RETURN_IF_ERROR(eager_op->AddInput(input_tensor));
}
auto* host = exec_ctx.host();
TF_RETURN_IF_ERROR(PrepareAttributes(eager_op.get(), attrs, host, eager_ctx));
int num_retvals = result_tensor_handles.size();
TF_RETURN_IF_ERROR(eager_op->Execute(
absl::MakeSpan(result_tensor_handles.data(), num_retvals), &num_retvals));
return absl::OkStatus();
}
static bool ShouldAddHostContextAttr(const char* op_name) {
return strcmp(op_name, "TFRTMakeIterator") == 0;
}
AsyncValueRef<Chain> RuntimeFallbackExecute(
const tfrt::ExecutionContext& exec_ctx, EagerContext* eager_ctx,
const char* op_name, const char* device_name,
llvm::ArrayRef<Tensor*> arguments, const OpAttrsRef& attrs,
llvm::MutableArrayRef<RCReference<AsyncValue>> results) {
auto emit_error = [&exec_ctx, results](const tensorflow::Status& status) {
auto error = EmitErrorAsync(exec_ctx, status);
std::fill(results.begin(), results.end(), error);
return error;
};
llvm::SmallVector<TensorHandle*, 4> input_tensor_handles;
input_tensor_handles.reserve(arguments.size());
for (Tensor* input_tensor : arguments) {
input_tensor_handles.push_back(
llvm::cast<RuntimeFallbackTensor>(input_tensor)->GetTensorHandle());
}
int num_retvals = results.size();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 4> result_tensor_handles(
num_retvals);
Status status;
if (!ShouldAddHostContextAttr(op_name)) {
status =
CallEagerExecute(exec_ctx, eager_ctx, op_name, device_name,
input_tensor_handles, attrs, result_tensor_handles);
} else {
assert(attrs.GetNumEntries() == 1);
OpAttrs updated;
updated.Set(kHostContextPtrAttrName,
reinterpret_cast<int64_t>(exec_ctx.host()));
status = CallEagerExecute(
exec_ctx, eager_ctx, op_name, device_name, input_tensor_handles,
OpAttrsRef(std::move(updated)), result_tensor_handles);
}
if (!status.ok()) return emit_error(status);
auto host = exec_ctx.host();
for (int i = 0; i < num_retvals; ++i) {
auto expected_fallback_tensor =
CreateRuntimeFallbackTensorFromTfTensorHandle(
OwnedTensorHandle{
TensorHandleFromInterface(result_tensor_handles[i])},
host);
if (!expected_fallback_tensor)
results[i] = EmitErrorAsync(
exec_ctx, tfrt::StrCat(expected_fallback_tensor.takeError()));
else
results[i] = tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
std::move(*expected_fallback_tensor));
}
return tfrt::GetReadyChain();
}
AsyncValueRef<Chain> RuntimeFallbackExecute(
const tfrt::ExecutionContext& exec_ctx, const char* op_name,
const char* device_name, llvm::ArrayRef<Tensor*> arguments,
const OpAttrsRef& attrs,
llvm::MutableArrayRef<RCReference<AsyncValue>> results) {
auto eager_ctx_expected = GetEagerContext(exec_ctx);
if (!eager_ctx_expected) {
auto error =
EmitErrorAsync(exec_ctx, toString(eager_ctx_expected.takeError()));
std::fill(results.begin(), results.end(), error);
return std::move(error);
}
EagerContext* eager_ctx = eager_ctx_expected.get();
return RuntimeFallbackExecute(exec_ctx, eager_ctx, op_name, device_name,
arguments, attrs, results);
}
static void RuntimeFallbackKernel(
Argument<Chain> in_chain, RemainingArguments input_tensors,
Result<Chain> out_chain, RemainingResults output_tensors,
StringAttribute op_name, RemainingAttributes remaining_attributes,
KernelErrorHandler handler, const ExecutionContext& exec_ctx) {
HostContext* host = exec_ctx.host();
tfrt::ResourceContext* resource_context = exec_ctx.resource_context();
EagerContextResource* eager_context_resource =
resource_context->GetOrCreateResource<EagerContextResource>(
tensorflow::tfd::kEagerContextResourceName);
tfrt::Expected<EagerContext*> eager_ctx_expected =
eager_context_resource->GetTFEagerContext();
if (!eager_ctx_expected) {
handler.ReportError("eager_ctx_expected.takeError()");
return;
}
EagerContext* eager_ctx = eager_ctx_expected.get();
std::string op_name_str = [&] {
auto view = op_name.get();
view.consume_front("tf.");
return view.str();
}();
OwnedEagerOperation eager_op{new EagerOperation(eager_ctx)};
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->Reset(op_name_str.c_str(), nullptr));
for (AsyncValue* input_tensor_av : input_tensors.values()) {
auto input_tensor_handle =
input_tensor_av->get<RuntimeFallbackTensor>().GetTensorHandle();
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
eager_op->AddInput(input_tensor_handle));
}
assert(remaining_attributes.size() % 2 == 0);
int num_tf_attrs = remaining_attributes.size() / 2;
for (int i = 0; i < num_tf_attrs; ++i) {
std::string attr_name =
remaining_attributes.GetStringAttribute(i * 2).str();
absl::string_view attr_value = ToAbslStringView(
remaining_attributes.GetStringAttribute(i * 2 + 1).get());
std::vector<absl::string_view> value_split =
tfd::AttrValueSplit(attr_value);
if (value_split[0] == "string") {
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->SetAttrString(attr_name.c_str(), value_split[1].data(),
value_split[1].size()));
} else if (value_split[0] == "bool") {
bool bool_val;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseBoolAttrValue(value_split[1], &bool_val));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrBool(attr_name.c_str(), bool_val));
} else if (value_split[0] == "int") {
int64_t int_val;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseIntAttrValue(value_split[1], &int_val));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrInt(attr_name.c_str(), int_val));
} else if (value_split[0] == "tftensor") {
tensorflow::Tensor t;
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
ParseTensorAttrValue(value_split[1], &t));
tensorflow::TensorInterface interface(t);
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrTensor(attr_name.c_str(), &interface));
} else if (value_split[0] == "tfdtype") {
DataType dtype;
TFD_REPORT_AND_RETURN_IF_ERROR(handler,
ParseTfDataType(value_split[1], &dtype));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, eager_op->SetAttrType(attr_name.c_str(), dtype));
} else if (value_split[0] == "tfshape") {
std::vector<int64_t> dims;
TFD_REPORT_AND_RETURN_IF_ERROR(
handler, ParseTensorShapeAttrValue(value_split[1], &dims));
TFD_REPORT_AND_RETURN_IF_ERROR(
handler,
eager_op->SetAttrShape(attr_name.c_str(), dims.data(), dims.size()));
} else {
handler.ReportError("attribute type not yet supported");
return;
}
}
int num_retvals = output_tensors.size();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 4> retvals(num_retvals);
tensorflow::Status status = eager_op->Execute(
absl::MakeSpan(retvals.data(), num_retvals), &num_retvals);
TFD_REPORT_AND_RETURN_IF_ERROR(handler, status);
if (num_retvals != output_tensors.size()) {
handler.ReportError("Incorrect number of output values");
return;
}
for (int i = 0; i < num_retvals; ++i) {
OwnedTensorHandle owned_th{TensorHandleFromInterface(retvals[i])};
if (!owned_th) handler.ReportError("TensorHandleFromInterface failed");
auto fallback_tensor = CreateRuntimeFallbackTensorFromTfTensorHandle(
std::move(owned_th), host);
if (!fallback_tensor) {
output_tensors[i] = tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat(fallback_tensor.takeError()));
} else {
output_tensors[i] =
tfrt::MakeAvailableAsyncValueRef<RuntimeFallbackTensor>(
std::move(*fallback_tensor));
}
}
out_chain.Set(in_chain);
}
static void EmitErrorAndSetInResults(
const tfrt::ExecutionContext& exec_ctx,
const tfrt::DecodedDiagnostic& error,
llvm::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>> results) {
auto error_av = tfrt::EmitErrorAsync(exec_ctx, error.status);
std::fill(results.begin(), results.end(), error_av);
}
void CoreRTTensorHandleToFallbackTensorInternal(
llvm::ArrayRef<tfrt::AsyncValue*> tensorhandle_args,
llvm::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>>
tf_tensor_results,
tfrt::string_view device, const tfrt::ExecutionContext& exec_ctx) {
assert(tensorhandle_args.size() == tf_tensor_results.size());
auto set_result = [&](tfrt::RCReference<tfrt::AsyncValue>& result,
llvm::Expected<tensorflow::Tensor> tf_tensor) {
auto result_ref = tfrt::MakeUnconstructedAsyncValueRef<
tensorflow::tfrt_stub::FallbackTensor>();
if (!tf_tensor) {
result_ref.SetError(tfrt::StrCat(tf_tensor.takeError()));
} else {
result_ref.emplace(std::move(tf_tensor.get()));
}
result = std::move(result_ref);
};
auto maybe_convert_runtime_fallback_tensor =
[&exec_ctx](
tfrt::AsyncValueRef<Tensor> tensor_avref,
const tfrt::Device& src_device,
const tfrt::Device& dst_device) -> tfrt::AsyncValueRef<tfrt::Tensor> {
assert(tensor_avref.IsAvailable());
assert(!tensor_avref.IsError());
auto& tensor = tensor_avref.get();
if (!tensor.IsTensorType(DenseHostTensor::kTensorType) ||
!src_device.IsDeviceType(tfrt::CpuDevice::kDeviceType) ||
!dst_device.IsDeviceType(tfrt::CpuDevice::kDeviceType)) {
return tfrt::ConvertTensor(exec_ctx, tensor,
src_device,
dst_device,
KernelFallbackTensor::kTensorType);
}
return tensor_avref;
};
auto dst_device = exec_ctx.host()->GetDeviceRef(device);
for (int i = 0; i < tensorhandle_args.size(); ++i) {
if (!dst_device) {
tf_tensor_results[i] = tfrt::MakeErrorAsyncValueRef(
tfrt::StrCat("Failed to find device with name ", device));
continue;
}
auto& tensor_handle = tensorhandle_args[i]->get<tfrt::TensorHandle>();
assert(tensor_handle.IsDeviceAvailable());
assert(!tensor_handle.IsDeviceError());
auto* tensor_av = tensor_handle.GetAsyncTensor();
auto tensor_avref = tfrt::AsyncValueRef<Tensor>(FormRef(tensor_av));
auto& src_device = *tensor_handle.GetAvailableDevice();
AsyncValueRef<Tensor> knfb_tensor;
if (!tensor_av->IsAvailable()) {
auto ind_av = tfrt::MakeIndirectAsyncValue();
knfb_tensor = AsyncValueRef<Tensor>(ind_av.CopyRef());
tensor_av->AndThen(
[tensor_avref = std::move(tensor_avref), ind_av = std::move(ind_av),
&src_device, dst_device = dst_device.CopyRef(),
maybe_convert_runtime_fallback_tensor, exec_ctx]() mutable {
ind_av->ForwardTo(maybe_convert_runtime_fallback_tensor(
std::move(tensor_avref), src_device, *dst_device));
});
} else {
knfb_tensor = maybe_convert_runtime_fallback_tensor(
std::move(tensor_avref), src_device, *dst_device);
}
if (!knfb_tensor.IsAvailable()) {
auto result_ref = tfrt::MakeIndirectAsyncValue();
tf_tensor_results[i] = result_ref;
auto knfb_tensor_av = knfb_tensor.GetAsyncValue();
knfb_tensor_av->AndThen([knfb_tensor = std::move(knfb_tensor),
result_ref = std::move(result_ref),
dst_device = dst_device.CopyRef(),
exec_ctx]() mutable {
if (knfb_tensor.IsError()) {
result_ref->ForwardTo(std::move(knfb_tensor));
return;
}
auto expected_tf_tensor = tfrt::TFRTTensorToTFTensor(knfb_tensor.get());
if (!expected_tf_tensor) {
auto error = tfrt::EmitErrorAsync(
exec_ctx, toString(expected_tf_tensor.takeError()));
result_ref->ForwardTo(std::move(error));
} else {
auto tf_tensor_ref = tfrt::MakeAvailableAsyncValueRef<
tensorflow::tfrt_stub::FallbackTensor>(
std::move(expected_tf_tensor.get()));
result_ref->ForwardTo(std::move(tf_tensor_ref));
}
});
} else {
set_result(tf_tensor_results[i],
tfrt::TFRTTensorToTFTensor(knfb_tensor.get()));
}
}
}
void CoreRTTensorHandleToFallbackTensor(
RemainingArguments args, RemainingResults results, StringAttr device,
const tfrt::ExecutionContext& exec_ctx) {
tsl::profiler::TraceMe trace_me("corert_tensorhandle_to_fallback_tensor");
trace_me.AppendMetadata([request_id = exec_ctx.request_ctx()->id()]() {
return tsl::profiler::TraceMeEncode({{"id", request_id}});
});
CoreRTTensorHandleToFallbackTensorInternal(args.values(), results.values(),
device.GetValue(), exec_ctx);
}
static void FallbackTensorToCoreRTTensorHandleInternal(
llvm::ArrayRef<tfrt::AsyncValue*> tf_tensor_args,
llvm::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>>
tensorhandle_results,
tfrt::string_view device, const tfrt::ExecutionContext& exec_ctx) {
auto* host = exec_ctx.host();
assert(tf_tensor_args.size() == tensorhandle_results.size());
for (int i = 0; i < tf_tensor_args.size(); ++i) {
auto* av = tf_tensor_args[i];
auto& tf_tensor = av->get<tensorflow::tfrt_stub::FallbackTensor>().tensor();
AsyncValueRef<tfrt::Tensor> kernel_fallback_tensor =
tfrt::MakeAvailableAsyncValueRef<KernelFallbackTensor>(tf_tensor);
auto metadata = kernel_fallback_tensor.get().metadata();
tensorhandle_results[i] =
tfrt::MakeAvailableAsyncValueRef<tfrt::TensorHandle>(
host->GetDeviceRef(device), metadata,
std::move(kernel_fallback_tensor));
}
}
void FallbackTensorToCoreRTTensorHandle(
RemainingArguments args, RemainingResults results, StringAttr device,
const tfrt::ExecutionContext& exec_ctx) {
tsl::profiler::TraceMe trace_me("fallback_tensor_to_corert_tensorhandle");
trace_me.AppendMetadata([request_id = exec_ctx.request_ctx()->id()]() {
return tsl::profiler::TraceMeEncode({{"id", request_id}});
});
FallbackTensorToCoreRTTensorHandleInternal(args.values(), results.values(),
device.GetValue(), exec_ctx);
}
tfrt::Chain PrintFallbackTensor(
const tensorflow::tfrt_stub::FallbackTensor& arg, const tfrt::Chain& ch) {
std::string message;
llvm::raw_string_ostream(message) << arg.tensor().DebugString() << "\n";
printf("%s", message.c_str());
fflush(stdout);
return tfrt::Chain();
}
static void RuntimeFallbackExecuteOp(
RemainingArguments args, RemainingResults results, StringAttr device_attr,
AggregateAttr op_attr_array, AggregateAttr op_func_attr_array,
StringAttr op_name_attr, tfrt::AsyncValueRef<tfrt::Chain>* op_chain,
const ExecutionContext& exec_ctx) {
auto set_error = [&exec_ctx, results](tfrt::string_view msg) {
auto error_av = EmitErrorAsync(exec_ctx, absl::InternalError(msg));
for (int i = 0, n = results.size(); i < n; ++i) results[i] = error_av;
};
auto op_name = op_name_attr.GetValue();
op_name.consume_front("tf.");
std::string device_name = device_attr.GetValue().str();
if (!absl::StartsWith(device_name, "/")) device_name = kDefaultCpuDevice;
tfrt::OpAttrs op_attrs;
tfrt::SetUpOpAttrs(op_attr_array, &op_attrs);
tfrt::SetUpOpFuncAttrs(op_func_attr_array, &op_attrs);
auto eager_ctx_expected = GetEagerContext(exec_ctx);
if (!eager_ctx_expected) {
set_error(tfrt::StrCat(eager_ctx_expected.takeError()));
return;
}
EagerContext* eager_ctx = eager_ctx_expected.get();
Device* device = nullptr;
Status s = eager_ctx->local_device_mgr()->LookupDevice(device_name, &device);
if (!s.ok()) {
VLOG(1) << s.message() << " using default CPU device.";
}
llvm::SmallVector<RuntimeFallbackTensor, 4> tfrt_tensor_args;
tfrt_tensor_args.reserve(args.size());
for (int i = 0; i < args.size(); ++i) {
auto* av = args[i];
auto tf_tensor = av->get<tensorflow::Tensor>();
tfrt::TensorMetadata md = tfd::GetTensorMetadata(tf_tensor);
OwnedTensorHandle tensor_handle{tensorflow::TensorHandle::CreateLocalHandle(
std::move(tf_tensor), device, device, eager_ctx)};
tfrt_tensor_args.push_back(
RuntimeFallbackTensor(md.shape, md.dtype, std::move(tensor_handle)));
}
llvm::SmallVector<tfrt::Tensor*, 4> tfrt_tensor_arg_ptrs;
tfrt_tensor_arg_ptrs.reserve(args.size());
for (auto& tensor : tfrt_tensor_args) tfrt_tensor_arg_ptrs.push_back(&tensor);
llvm::SmallVector<RCReference<tfrt::AsyncValue>, 4> tfrt_tensor_results;
tfrt_tensor_results.resize(results.size());
auto chain = RuntimeFallbackExecute(
exec_ctx, op_name.str().c_str(), device_name.c_str(),
tfrt_tensor_arg_ptrs, tfrt::OpAttrsRef(op_attrs), tfrt_tensor_results);
if (op_chain) *op_chain = chain.CopyRef();
DCHECK(chain.IsAvailable());
if (chain.IsError()) {
EmitErrorAndSetInResults(
exec_ctx, tfrt::DecodedDiagnostic(chain.GetError()), results.values());
return;
}
for (int i = 0; i < results.size(); ++i) {
auto& runtime_fallback_tensor =
tfrt_tensor_results[i]->get<RuntimeFallbackTensor>();
const tensorflow::Tensor* tf_tensor = nullptr;
tensorflow::Status s =
runtime_fallback_tensor.GetTensorHandle()->Tensor(&tf_tensor);
DCHECK(s.ok()) << s;
results[i] =
tfrt::MakeAvailableAsyncValueRef<tensorflow::Tensor>(*tf_tensor);
}
}
Chain AddRuntimeFallbackImplicitConversionKernel(
Argument<tfrt::OpHandler*> op_handler, const ExecutionContext& exec_ctx) {
assert(op_handler.get()->GetName() == tfrt::CpuOpHandler::kName);
tfrt::CpuOpHandler* cpu_op_handler =
reinterpret_cast<tfrt::CpuOpHandler*>(op_handler.get());
cpu_op_handler->AddImplicitConversion(RuntimeFallbackTensor::kTensorType,
DenseHostTensor::kTensorType);
cpu_op_handler->AddImplicitConversion(RuntimeFallbackTensor::kTensorType,
tfrt::AnyScalarHostTensor::kTensorType);
cpu_op_handler->AddImplicitConversion(RuntimeFallbackTensor::kTensorType,
tfrt::StringHostTensor::kTensorType);
return {};
}
void CreateRuntimeFallbackOpHandlerKernel(Result<tfrt::OpHandler*> op_handler,
StringAttribute tf_device_name,
const ExecutionContext& exec_ctx) {
auto* runtime = tfrt::CoreRuntime::GetFromHostContext(exec_ctx.host());
assert(runtime);
auto op_handler_ptr =
CreateRuntimeFallbackOpHandler(runtime, tf_device_name.get());
assert(op_handler_ptr);
op_handler.Emplace(op_handler_ptr.get());
}
static OwnedTensorHandle ConvertTFRTTensorToTFTensorHandle(
tfrt::Tensor* tensor) {
if (auto* dht = llvm::dyn_cast<tfrt::DenseHostTensor>(tensor)) {
tensorflow::Tensor tensor =
MoveHostBufferToTfTensor(dht->buffer(), dht->dtype(), dht->shape());
return OwnedTensorHandle{
tensorflow::TensorHandle::CreateLocalHandle(tensor)};
}
if (auto* sht = llvm::dyn_cast<tfrt::StringHostTensor>(tensor)) {
tensorflow::Tensor tensor = CopyShtToTfTensor(*sht);
return OwnedTensorHandle{
tensorflow::TensorHandle::CreateLocalHandle(tensor)};
}
llvm_unreachable("unsupported tensor type");
}
static llvm::Expected<tfrt::Value> ConvertTFTensorHandleToTFRTTensor(
OwnedTensorHandle tensor_handle, HostContext* host) {
tensorflow::Status status;
OwnedAbstractTensorInterface tensor_interface{
tensor_handle->Resolve(&status)};
if (!status.ok()) {
return tfrt::MakeStringError("error resolving TensorHandle: ",
status.message());
}
auto tf_dtype = tensor_interface->Type();
if (tf_dtype == DT_STRING) {
auto string_host_tensor =
CopyTfStringTensorToStringHostTensor(tensor_interface.get(), host);
if (!string_host_tensor)
return tfrt::MakeStringError(
"error converting TF string tensor to tfrt::StringHostTensor: ",
string_host_tensor.takeError());
return tfrt::Value(std::move(*string_host_tensor));
}
tfrt::TensorMetadata metadata(GetTfrtDtype(tf_dtype),
GetShape(tensor_interface.get()));
CheckBoolCompatibility();
void* data = tensor_interface->Data();
size_t size = tensor_interface->ByteSize();
auto host_buffer = HostBuffer::CreateFromExternal(
data, size,
[tensor_interface = std::move(tensor_interface)](void*, size_t) {});
tfrt::Value value;
value.emplace<DenseHostTensor>(metadata, std::move(host_buffer));
return std::move(value);
}
void RegisterTfdDelegateKernels(tfrt::KernelRegistry* registry) {
registry->AddKernel("tfd.init_eager_context",
TFRT_KERNEL(TfdInitEagerContext));
registry->AddKernel("tfd.delegate_kernel",
TFRT_KERNEL(RuntimeFallbackKernel));
registry->AddKernel("tfd.move_dht_to_tft", TFRT_KERNEL(TfdMoveDHTToTFT));
registry->AddKernel("tfd.convert_tft_to_dht",
TFRT_KERNEL(TfdConvertTFTToDHT));
registry->AddKernel("tfd.print_tft", TFRT_KERNEL(TfdPrintTFT));
registry->AddKernel(
"tfrt_fallback_async.corert_tensorhandle_to_fallback_tensor",
TFRT_KERNEL(CoreRTTensorHandleToFallbackTensor));
registry->AddKernel(
"tfrt_fallback_async.fallback_tensor_to_corert_tensorhandle",
TFRT_KERNEL(FallbackTensorToCoreRTTensorHandle));
registry->AddKernel("tfrt_fallback_async.print_tensor",
TFRT_KERNEL(PrintFallbackTensor));
registry->AddKernel("corert.create_runtime_fallback_op_handler",
TFRT_KERNEL(CreateRuntimeFallbackOpHandlerKernel));
registry->AddKernel("corert.add_runtime_fallback_implicit_conversions",
TFRT_KERNEL(AddRuntimeFallbackImplicitConversionKernel));
}
}
} | #include "tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.h"
#include <gtest/gtest.h>
#include "llvm/ADT/SmallVector.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/runtime_fallback/test/coreruntime_driver.h"
#include "tfrt/core_runtime/op_attrs.h"
namespace tfrt {
namespace {
TEST(RuntimeFallbackKernelsTest, CallEagerExecute) {
auto driver = CoreRuntimeDriver();
driver.InitializeCpuRuntimeFallbackOpHandler();
auto exec_ctx = driver.CreateExecutionContext(__FILE__, __LINE__);
tensorflow::Tensor input(tensorflow::DT_FLOAT, {2, 2});
tensorflow::test::FillValues<float>(&input, {1, 1, 1, 1});
tensorflow::TensorHandle* input_th =
tensorflow::TensorHandle::CreateLocalHandle(input);
tfrt::OpAttrs matmul_attrs;
matmul_attrs.Set<bool>("transpose_a", false);
matmul_attrs.Set<bool>("transpose_b", false);
tfrt::OpAttrsRef matmul_attrs_ref = matmul_attrs.freeze();
llvm::SmallVector<tensorflow::AbstractTensorHandle*, 1> results;
results.resize(1);
auto eager_ctx_expected = tensorflow::tfd::GetEagerContext(exec_ctx);
ASSERT_FALSE(!eager_ctx_expected);
tensorflow::EagerContext* eager_ctx = eager_ctx_expected.get();
TF_EXPECT_OK(tensorflow::tfd::CallEagerExecute(
exec_ctx, eager_ctx, "MatMul", "", {input_th, input_th},
matmul_attrs_ref, results));
ASSERT_EQ(results.size(), 1);
tensorflow::TensorHandle* res_th =
tensorflow::TensorHandleFromInterface(results[0]);
const tensorflow::Tensor* res_tensor;
TF_EXPECT_OK(res_th->Tensor(&res_tensor));
EXPECT_EQ(res_th->DataType(), tensorflow::DT_FLOAT);
int64_t num_elements;
TF_EXPECT_OK(res_th->NumElements(&num_elements));
EXPECT_EQ(num_elements, 4);
tensorflow::Tensor expected(tensorflow::DT_FLOAT, {2, 2});
tensorflow::test::FillValues<float>(&expected, {2, 2, 2, 2});
tensorflow::test::ExpectTensorEqual<float>(*res_tensor, expected);
input_th->Unref();
res_th->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/test/runtime_fallback_kernels_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a59765c-2c63-495e-b33c-60154ba0061c | cpp | abseil/abseil-cpp | utf8 | absl/strings/internal/utf8.cc | absl/strings/internal/utf8_test.cc | #include "absl/strings/internal/utf8.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
size_t EncodeUTF8Char(char *buffer, char32_t utf8_char) {
if (utf8_char <= 0x7F) {
*buffer = static_cast<char>(utf8_char);
return 1;
} else if (utf8_char <= 0x7FF) {
buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[0] = static_cast<char>(0xC0 | utf8_char);
return 2;
} else if (utf8_char <= 0xFFFF) {
buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[0] = static_cast<char>(0xE0 | utf8_char);
return 3;
} else {
buffer[3] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[2] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[1] = static_cast<char>(0x80 | (utf8_char & 0x3F));
utf8_char >>= 6;
buffer[0] = static_cast<char>(0xF0 | utf8_char);
return 4;
}
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/utf8.h"
#include <cstdint>
#include <utility>
#include "gtest/gtest.h"
#include "absl/base/port.h"
namespace {
#if !defined(__cpp_char8_t)
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wc++2a-compat"
#endif
TEST(EncodeUTF8Char, BasicFunction) {
std::pair<char32_t, std::string> tests[] = {{0x0030, u8"\u0030"},
{0x00A3, u8"\u00A3"},
{0x00010000, u8"\U00010000"},
{0x0000FFFF, u8"\U0000FFFF"},
{0x0010FFFD, u8"\U0010FFFD"}};
for (auto &test : tests) {
char buf0[7] = {'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00'};
char buf1[7] = {'\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF', '\xFF'};
char *buf0_written =
&buf0[absl::strings_internal::EncodeUTF8Char(buf0, test.first)];
char *buf1_written =
&buf1[absl::strings_internal::EncodeUTF8Char(buf1, test.first)];
int apparent_length = 7;
while (buf0[apparent_length - 1] == '\x00' &&
buf1[apparent_length - 1] == '\xFF') {
if (--apparent_length == 0) break;
}
EXPECT_EQ(apparent_length, buf0_written - buf0);
EXPECT_EQ(apparent_length, buf1_written - buf1);
EXPECT_EQ(apparent_length, test.second.length());
EXPECT_EQ(std::string(buf0, apparent_length), test.second);
EXPECT_EQ(std::string(buf1, apparent_length), test.second);
}
char buf[32] = "Don't Tread On Me";
EXPECT_LE(absl::strings_internal::EncodeUTF8Char(buf, 0x00110000),
absl::strings_internal::kMaxEncodedUTF8Size);
char buf2[32] = "Negative is invalid but sane";
EXPECT_LE(absl::strings_internal::EncodeUTF8Char(buf2, -1),
absl::strings_internal::kMaxEncodedUTF8Size);
}
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/utf8.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/utf8_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
13a3f2e4-d5ef-4e08-b0cb-49f8f2fdec84 | cpp | tensorflow/tensorflow | exec_on_stall | tensorflow/core/util/exec_on_stall.h | tensorflow/core/util/exec_on_stall_test.cc | #ifndef TENSORFLOW_CORE_UTIL_EXEC_ON_STALL_H_
#define TENSORFLOW_CORE_UTIL_EXEC_ON_STALL_H_
#include <functional>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
class ExecuteOnStall {
public:
ExecuteOnStall(int delay_secs, std::function<void()> f,
int32_t poll_microseconds = 100)
: disabled_(false),
joined_(false),
env_(Env::Default()),
f_(f),
poll_microseconds_(poll_microseconds) {
deadline_ = env_->NowMicros() + 1000000 * delay_secs;
env_->SchedClosure([this]() {
while (env_->NowMicros() < deadline_) {
{
mutex_lock l(mu_);
if (disabled_) {
break;
}
}
env_->SleepForMicroseconds(poll_microseconds_);
}
{
mutex_lock l(mu_);
if (!disabled_) {
f_();
}
joined_ = true;
cond_var_.notify_all();
}
});
}
~ExecuteOnStall() {
mutex_lock l(mu_);
disabled_ = true;
if (!joined_) {
cond_var_.wait(l);
}
}
private:
mutex mu_;
condition_variable cond_var_;
bool disabled_ TF_GUARDED_BY(mu_);
bool joined_ TF_GUARDED_BY(mu_);
Env* env_;
std::function<void()> f_;
int64_t deadline_;
int32 poll_microseconds_;
};
}
#endif | #include "tensorflow/core/util/exec_on_stall.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
struct Chunk {
std::unique_ptr<ExecuteOnStall> stall_closure;
};
Chunk* NewChunk(int stall_seconds, std::function<void()> f) {
Chunk* c = new Chunk;
c->stall_closure =
std::make_unique<ExecuteOnStall>(stall_seconds, std::move(f));
return c;
}
TEST(ExecuteOnStallTest, BothWays) {
mutex mu;
bool a_triggered(false);
bool b_triggered(false);
Chunk* a = NewChunk(1, [&mu, &a_triggered]() {
mutex_lock l(mu);
a_triggered = true;
});
Chunk* b = NewChunk(1, [&mu, &b_triggered]() {
mutex_lock l(mu);
b_triggered = true;
});
delete a;
Env::Default()->SleepForMicroseconds(2000000);
{
mutex_lock l(mu);
EXPECT_FALSE(a_triggered);
EXPECT_TRUE(b_triggered);
}
delete b;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/exec_on_stall.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/exec_on_stall_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b8bdcc1-21e7-4a3e-8266-111b382e0bdf | cpp | google/quiche | quic_crypto_server_config | quiche/quic/core/crypto/quic_crypto_server_config.cc | quiche/quic/core/crypto/quic_crypto_server_config_test.cc | #include "quiche/quic/core/crypto/quic_crypto_server_config.h"
#include <algorithm>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "openssl/sha.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/aes_128_gcm_12_decrypter.h"
#include "quiche/quic/core/crypto/aes_128_gcm_12_encrypter.h"
#include "quiche/quic/core/crypto/cert_compressor.h"
#include "quiche/quic/core/crypto/certificate_view.h"
#include "quiche/quic/core/crypto/chacha20_poly1305_encrypter.h"
#include "quiche/quic/core/crypto/channel_id.h"
#include "quiche/quic/core/crypto/crypto_framer.h"
#include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/crypto/curve25519_key_exchange.h"
#include "quiche/quic/core/crypto/key_exchange.h"
#include "quiche/quic/core/crypto/p256_key_exchange.h"
#include "quiche/quic/core/crypto/proof_source.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/crypto/quic_hkdf.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/crypto/tls_server_connection.h"
#include "quiche/quic/core/proto/crypto_server_config_proto.h"
#include "quiche/quic/core/proto/source_address_token_proto.h"
#include "quiche/quic/core/quic_clock.h"
#include "quiche/quic/core/quic_connection_context.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_socket_address_coder.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_hostname_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_testvalue.h"
#include "quiche/common/platform/api/quiche_reference_counted.h"
namespace quic {
namespace {
const size_t kMultiplier = 3;
const int kMaxTokenAddresses = 4;
std::string DeriveSourceAddressTokenKey(
absl::string_view source_address_token_secret) {
QuicHKDF hkdf(source_address_token_secret, absl::string_view() ,
"QUIC source address token key",
CryptoSecretBoxer::GetKeySize(), 0 ,
0 );
return std::string(hkdf.server_write_key());
}
class DefaultKeyExchangeSource : public KeyExchangeSource {
public:
DefaultKeyExchangeSource() = default;
~DefaultKeyExchangeSource() override = default;
std::unique_ptr<AsynchronousKeyExchange> Create(
std::string , bool , QuicTag type,
absl::string_view private_key) override {
if (private_key.empty()) {
QUIC_LOG(WARNING) << "Server config contains key exchange method without "
"corresponding private key of type "
<< QuicTagToString(type);
return nullptr;
}
std::unique_ptr<SynchronousKeyExchange> ka =
CreateLocalSynchronousKeyExchange(type, private_key);
if (!ka) {
QUIC_LOG(WARNING) << "Failed to create key exchange method of type "
<< QuicTagToString(type);
}
return ka;
}
};
bool ClientDemandsX509Proof(const CryptoHandshakeMessage& client_hello) {
QuicTagVector their_proof_demands;
if (client_hello.GetTaglist(kPDMD, &their_proof_demands) != QUIC_NO_ERROR) {
return false;
}
for (const QuicTag tag : their_proof_demands) {
if (tag == kX509) {
return true;
}
}
return false;
}
std::string FormatCryptoHandshakeMessageForTrace(
const CryptoHandshakeMessage* message) {
if (message == nullptr) {
return "<null message>";
}
std::string s = QuicTagToString(message->tag());
if (const auto it = message->tag_value_map().find(kRREJ);
it != message->tag_value_map().end()) {
const std::string& value = it->second;
if (value.size() % sizeof(uint32_t) == 0) {
absl::StrAppend(&s, " RREJ:[");
for (size_t j = 0; j < value.size(); j += sizeof(uint32_t)) {
uint32_t reason;
memcpy(&reason, value.data() + j, sizeof(reason));
if (j > 0) {
absl::StrAppend(&s, ",");
}
absl::StrAppend(&s, CryptoUtils::HandshakeFailureReasonToString(
static_cast<HandshakeFailureReason>(reason)));
}
absl::StrAppend(&s, "]");
} else {
absl::StrAppendFormat(&s, " RREJ:[unexpected length:%u]", value.size());
}
}
return s;
}
}
std::unique_ptr<KeyExchangeSource> KeyExchangeSource::Default() {
return std::make_unique<DefaultKeyExchangeSource>();
}
class ValidateClientHelloHelper {
public:
ValidateClientHelloHelper(
quiche::QuicheReferenceCountedPointer<
ValidateClientHelloResultCallback::Result>
result,
std::unique_ptr<ValidateClientHelloResultCallback>* done_cb)
: result_(std::move(result)), done_cb_(done_cb) {}
ValidateClientHelloHelper(const ValidateClientHelloHelper&) = delete;
ValidateClientHelloHelper& operator=(const ValidateClientHelloHelper&) =
delete;
~ValidateClientHelloHelper() {
QUIC_BUG_IF(quic_bug_12963_1, done_cb_ != nullptr)
<< "Deleting ValidateClientHelloHelper with a pending callback.";
}
void ValidationComplete(
QuicErrorCode error_code, const char* error_details,
std::unique_ptr<ProofSource::Details> proof_source_details) {
result_->error_code = error_code;
result_->error_details = error_details;
(*done_cb_)->Run(std::move(result_), std::move(proof_source_details));
DetachCallback();
}
void DetachCallback() {
QUIC_BUG_IF(quic_bug_10630_1, done_cb_ == nullptr)
<< "Callback already detached.";
done_cb_ = nullptr;
}
private:
quiche::QuicheReferenceCountedPointer<
ValidateClientHelloResultCallback::Result>
result_;
std::unique_ptr<ValidateClientHelloResultCallback>* done_cb_;
};
const char QuicCryptoServerConfig::TESTING[] = "secret string for testing";
ClientHelloInfo::ClientHelloInfo(const QuicIpAddress& in_client_ip,
QuicWallTime in_now)
: client_ip(in_client_ip), now(in_now), valid_source_address_token(false) {}
ClientHelloInfo::ClientHelloInfo(const ClientHelloInfo& other) = default;
ClientHelloInfo::~ClientHelloInfo() {}
PrimaryConfigChangedCallback::PrimaryConfigChangedCallback() {}
PrimaryConfigChangedCallback::~PrimaryConfigChangedCallback() {}
ValidateClientHelloResultCallback::Result::Result(
const CryptoHandshakeMessage& in_client_hello, QuicIpAddress in_client_ip,
QuicWallTime in_now)
: client_hello(in_client_hello),
info(in_client_ip, in_now),
error_code(QUIC_NO_ERROR) {}
ValidateClientHelloResultCallback::Result::~Result() {}
ValidateClientHelloResultCallback::ValidateClientHelloResultCallback() {}
ValidateClientHelloResultCallback::~ValidateClientHelloResultCallback() {}
ProcessClientHelloResultCallback::ProcessClientHelloResultCallback() {}
ProcessClientHelloResultCallback::~ProcessClientHelloResultCallback() {}
QuicCryptoServerConfig::ConfigOptions::ConfigOptions()
: expiry_time(QuicWallTime::Zero()),
channel_id_enabled(false),
p256(false) {}
QuicCryptoServerConfig::ConfigOptions::ConfigOptions(
const ConfigOptions& other) = default;
QuicCryptoServerConfig::ConfigOptions::~ConfigOptions() {}
QuicCryptoServerConfig::ProcessClientHelloContext::
~ProcessClientHelloContext() {
if (done_cb_ != nullptr) {
QUIC_LOG(WARNING)
<< "Deleting ProcessClientHelloContext with a pending callback.";
}
}
void QuicCryptoServerConfig::ProcessClientHelloContext::Fail(
QuicErrorCode error, const std::string& error_details) {
QUIC_TRACEPRINTF("ProcessClientHello failed: error=%s, details=%s",
QuicErrorCodeToString(error), error_details);
done_cb_->Run(error, error_details, nullptr, nullptr, nullptr);
done_cb_ = nullptr;
}
void QuicCryptoServerConfig::ProcessClientHelloContext::Succeed(
std::unique_ptr<CryptoHandshakeMessage> message,
std::unique_ptr<DiversificationNonce> diversification_nonce,
std::unique_ptr<ProofSource::Details> proof_source_details) {
QUIC_TRACEPRINTF("ProcessClientHello succeeded: %s",
FormatCryptoHandshakeMessageForTrace(message.get()));
done_cb_->Run(QUIC_NO_ERROR, std::string(), std::move(message),
std::move(diversification_nonce),
std::move(proof_source_details));
done_cb_ = nullptr;
}
QuicCryptoServerConfig::QuicCryptoServerConfig(
absl::string_view source_address_token_secret,
QuicRandom* server_nonce_entropy, std::unique_ptr<ProofSource> proof_source,
std::unique_ptr<KeyExchangeSource> key_exchange_source)
: replay_protection_(true),
chlo_multiplier_(kMultiplier),
configs_lock_(),
primary_config_(nullptr),
next_config_promotion_time_(QuicWallTime::Zero()),
proof_source_(std::move(proof_source)),
key_exchange_source_(std::move(key_exchange_source)),
ssl_ctx_(TlsServerConnection::CreateSslCtx(proof_source_.get())),
source_address_token_future_secs_(3600),
source_address_token_lifetime_secs_(86400),
enable_serving_sct_(false),
rejection_observer_(nullptr),
pad_rej_(true),
pad_shlo_(true),
validate_chlo_size_(true),
validate_source_address_token_(true) {
QUICHE_DCHECK(proof_source_.get());
source_address_token_boxer_.SetKeys(
{DeriveSourceAddressTokenKey(source_address_token_secret)});
server_nonce_entropy->RandBytes(server_nonce_orbit_,
sizeof(server_nonce_orbit_));
const size_t key_size = server_nonce_boxer_.GetKeySize();
std::unique_ptr<uint8_t[]> key_bytes(new uint8_t[key_size]);
server_nonce_entropy->RandBytes(key_bytes.get(), key_size);
server_nonce_boxer_.SetKeys(
{std::string(reinterpret_cast<char*>(key_bytes.get()), key_size)});
}
QuicCryptoServerConfig::~QuicCryptoServerConfig() {}
QuicServerConfigProtobuf QuicCryptoServerConfig::GenerateConfig(
QuicRandom* rand, const QuicClock* clock, const ConfigOptions& options) {
CryptoHandshakeMessage msg;
const std::string curve25519_private_key =
Curve25519KeyExchange::NewPrivateKey(rand);
std::unique_ptr<Curve25519KeyExchange> curve25519 =
Curve25519KeyExchange::New(curve25519_private_key);
absl::string_view curve25519_public_value = curve25519->public_value();
std::string encoded_public_values;
QUICHE_DCHECK_LT(curve25519_public_value.size(), (1U << 24));
encoded_public_values.push_back(
static_cast<char>(curve25519_public_value.size()));
encoded_public_values.push_back(
static_cast<char>(curve25519_public_value.size() >> 8));
encoded_public_values.push_back(
static_cast<char>(curve25519_public_value.size() >> 16));
encoded_public_values.append(curve25519_public_value.data(),
curve25519_public_value.size());
std::string p256_private_key;
if (options.p256) {
p256_private_key = P256KeyExchange::NewPrivateKey();
std::unique_ptr<P256KeyExchange> p256(
P256KeyExchange::New(p256_private_key));
absl::string_view p256_public_value = p256->public_value();
QUICHE_DCHECK_LT(p256_public_value.size(), (1U << 24));
encoded_public_values.push_back(
static_cast<char>(p256_public_value.size()));
encoded_public_values.push_back(
static_cast<char>(p256_public_value.size() >> 8));
encoded_public_values.push_back(
static_cast<char>(p256_public_value.size() >> 16));
encoded_public_values.append(p256_public_value.data(),
p256_public_value.size());
}
msg.set_tag(kSCFG);
if (options.p256) {
msg.SetVector(kKEXS, QuicTagVector{kC255, kP256});
} else {
msg.SetVector(kKEXS, QuicTagVector{kC255});
}
msg.SetVector(kAEAD, QuicTagVector{kAESG, kCC20});
msg.SetStringPiece(kPUBS, encoded_public_values);
if (options.expiry_time.IsZero()) {
const QuicWallTime now = clock->WallNow();
const QuicWallTime expiry = now.Add(QuicTime::Delta::FromSeconds(
60 * 60 * 24 * 180 ));
const uint64_t expiry_seconds = expiry.ToUNIXSeconds();
msg.SetValue(kEXPY, expiry_seconds);
} else {
msg.SetValue(kEXPY, options.expiry_time.ToUNIXSeconds());
}
char orbit_bytes[kOrbitSize];
if (options.orbit.size() == sizeof(orbit_bytes)) {
memcpy(orbit_bytes, options.orbit.data(), sizeof(orbit_bytes));
} else {
QUICHE_DCHECK(options.orbit.empty());
rand->RandBytes(orbit_bytes, sizeof(orbit_bytes));
}
msg.SetStringPiece(kORBT,
absl::string_view(orbit_bytes, sizeof(orbit_bytes)));
if (options.channel_id_enabled) {
msg.SetVector(kPDMD, QuicTagVector{kCHID});
}
if (options.id.empty()) {
std::unique_ptr<QuicData> serialized =
CryptoFramer::ConstructHandshakeMessage(msg);
uint8_t scid_bytes[SHA256_DIGEST_LENGTH];
SHA256(reinterpret_cast<const uint8_t*>(serialized->data()),
serialized->length(), scid_bytes);
static_assert(16 <= SHA256_DIGEST_LENGTH, "SCID length too high.");
msg.SetStringPiece(
kSCID,
absl::string_view(reinterpret_cast<const char*>(scid_bytes), 16));
} else {
msg.SetStringPiece(kSCID, options.id);
}
std::unique_ptr<QuicData> serialized =
CryptoFramer::ConstructHandshakeMessage(msg);
QuicServerConfigProtobuf config;
config.set_config(std::string(serialized->AsStringPiece()));
QuicServerConfigProtobuf::PrivateKey* curve25519_key = config.add_key();
curve25519_key->set_tag(kC255);
curve25519_key->set_private_key(curve25519_private_key);
if (options.p256) {
QuicServerConfigProtobuf::PrivateKey* p256_key = config.add_key();
p256_key->set_tag(kP256);
p256_key->set_private_key(p256_private_key);
}
return config;
}
std::unique_ptr<CryptoHandshakeMessage> QuicCryptoServerConfig::AddConfig(
const QuicServerConfigProtobuf& protobuf, const QuicWallTime now) {
std::unique_ptr<CryptoHandshakeMessage> msg =
CryptoFramer::ParseMessage(protobuf.config());
if (!msg) {
QUIC_LOG(WARNING) << "Failed to parse server config message";
return nullptr;
}
quiche::QuicheReferenceCountedPointer<Config> config =
ParseConfigProtobuf(protobuf, false);
if (!config) {
QUIC_LOG(WARNING) << "Failed to parse server config message";
return nullptr;
}
{
quiche::QuicheWriterMutexLock locked(&configs_lock_);
if (configs_.find(config->id) != configs_.end()) {
QUIC_LOG(WARNING) << "Failed to add config because another with the same "
"server config id already exists: "
<< absl::BytesToHexString(config->id);
return nullptr;
}
configs_[config->id] = config;
SelectNewPrimaryConfig(now);
QUICHE_DCHECK(primary_config_.get());
QUICHE_DCHECK_EQ(configs_.find(primary_config_->id)->second.get(),
primary_config_.get());
}
return msg;
}
std::unique_ptr<CryptoHandshakeMessage>
QuicCryptoServerConfig::AddDefaultConfig(QuicRandom* rand,
const QuicClock* clock,
const ConfigOptions& options) {
return AddConfig(GenerateConfig(rand, clock, options), clock->WallNow());
}
bool QuicCryptoServerConfig::SetConfigs(
const std::vector<QuicServerConfigProtobuf>& protobufs,
const QuicServerConfigProtobuf* fallback_protobuf, const QuicWallTime now) {
std::vector<quiche::QuicheReferenceCountedPointer<Config>> parsed_configs;
for (auto& protobuf : protobufs) {
quiche::QuicheReferenceCountedPointer<Config> config =
ParseConfigProtobuf(protobuf, false);
if (!config) {
QUIC_LOG(WARNING) << "Rejecting QUIC configs because of above errors";
return false;
}
parsed_configs.push_back(config);
}
quiche::QuicheReferenceCountedPointer<Config> fallback_config;
if (fallback_protobuf != nullptr) {
fallback_config =
ParseConfigProtobuf(*fallback_protobuf, true);
if (!fallback_config) {
QUIC_LOG(WARNING) << "Rejecting QUIC configs because of above errors";
return false;
}
QUIC_LOG(INFO) << "Fallback config has scid "
<< absl::BytesToHexString(fallback_config->id);
parsed_configs.push_back(fallback_config);
} else {
QUIC_LOG(INFO) << "No fallback config provided";
}
if (parsed_configs.empty()) {
QUIC_LOG(WARNING)
<< "Rejecting QUIC configs because new config list is empty.";
return false;
}
QUIC_LOG(INFO) << "Updating configs:";
quiche::QuicheWriterMutexLock locked(&configs_lock_);
ConfigMap new_configs;
for (const quiche::QuicheReferenceCountedPointer<Config>& config :
parsed_configs) {
auto it = configs_.find(config->id);
if (it != configs_.end()) {
QUIC_LOG(INFO) << "Keeping scid: " << absl::BytesToHexString(config->id)
<< " orbit: "
<< absl::BytesToHexString(absl::string_view(
reinterpret_cast<const char*>(config->orbit),
kOrbitSize))
<< " new primary_time "
<< config->primary_time.ToUNIXSeconds()
<< " old primary_time "
<< it->second->primary_time.ToUNIXSeconds()
<< " new priority " << config->priority << " old priority "
<< it->second->priority;
it->second->primary_time = config->primary_time;
it->second->priority = config->priority;
new_configs.insert(*it);
} else {
QUIC_LOG(INFO) << "Adding scid: " << absl::BytesToHexString(config->id)
<< " orbit: "
<< absl::BytesToHexString(absl::string_view(
reinterpret_cast<const char*>(config->orbit),
kOrbitSize))
<< " primary_time " << config->primary_time.ToUNIXSeconds()
<< " priority " << config->priority;
new_configs.emplace(config->id, config);
}
}
configs_ = std::move(new_configs);
fallback_config_ = fallback_config;
SelectNewPrimaryConfig(now);
QUICHE_DCHECK(primary_config_.get());
QUICHE_DCHECK_EQ(configs_.find(primary_config_->id)->second.get(),
primary_config_.get());
return true;
}
void QuicCryptoServerConfig::SetSourceAddressTokenKeys(
const std::vector<std::string>& keys) {
source_address_token_boxer_.SetKeys(keys);
}
std::vector<std::string> QuicCryptoServerConfig::GetConfigIds() const {
quiche::QuicheReaderMutexLock locked(&configs_lock_);
std::vector<std::string> scids;
for (auto it = configs_.begin(); it != configs_.end(); ++it) {
scids.push_back(it->first);
}
return scids;
}
void QuicCryptoServerConfig::ValidateClientHello(
const CryptoHandshakeMessage& client_hello,
const QuicSocketAddress& client_address,
const QuicSocketAddress& server_address, QuicTransportVersion version,
const QuicClock* clock,
quiche::QuicheReferenceCountedPointer<QuicSignedServerConfig> signed_config,
std::unique_ptr<ValidateClientHelloResultCallback> done_cb) const {
const QuicWallTime now(clock->WallNow());
quiche::QuicheReferenceCountedPointer<
ValidateClientHelloResultCallback::Result>
result(new ValidateClientHelloResultCallback::Result(
client_hello, client_address.host(), now));
absl::string_view requested_scid;
client_hello.GetStringPiece(kSCID, &requested_scid);
Configs configs;
if (!GetCurrentConfigs(now, requested_scid,
nullptr, &configs)) {
result->error_code = QUIC_CRYPTO_INTERNAL_ERROR;
result->error_details = "No configurations loaded";
}
signed_config->config = configs.primary;
if (result->error_code == QUIC_NO_ERROR) {
signed_config->chain = nullptr;
signed_config->proof.signature = "";
signed_config->proof.leaf_cert_scts = "";
EvaluateClientHello(server_address, client_address, version, configs,
result, std::move(done_cb));
} else {
done_cb->Run(result, nullptr);
}
}
class QuicCryptoServerConfig::ProcessClientHelloCallback
: public ProofSource::Callback {
public:
ProcessClientHelloCallback(const QuicCryptoServerConfig* config,
std::unique_ptr<ProcessClientHelloContext> context,
const Configs& configs)
: config_(config), context_(std::move(context)), configs_(configs) {}
void Run(
bool ok,
const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain,
const QuicCryptoProof& proof,
std::unique_ptr<ProofSource::Details> details) override {
if (ok) {
context_->signed_config()->chain = chain;
context_->signed_config()->proof = proof;
}
config_->ProcessClientHelloAfterGetProof(!ok, std::move(details),
std::move(context_), configs_);
}
private:
const QuicCryptoServerConfig* config_;
std::unique_ptr<ProcessClientHelloContext> context_;
const Configs configs_;
};
class QuicCryptoServerConfig::ProcessClientHelloAfterGetProofCallback
: public AsynchronousKeyExchange::Callback {
public:
ProcessClientHelloAfterGetProofCallback(
const QuicCryptoServerConfig* config,
std::unique_ptr<ProofSource::Details> proof_source_details,
QuicTag key_exchange_type, std::unique_ptr<CryptoHandshakeMessage> out,
absl::string_view public_value,
std::unique_ptr<ProcessClientHelloContext> context,
const Configs& configs)
: config_(config),
proof_source_details_(std::move(proof_source_details)),
key_exchange_type_(key_exchange_type),
out_(std::move(out)),
public_value_(public_value),
context_(std::move(context)),
configs_(configs) {}
void Run(bool ok) override {
config_->ProcessClientHelloAfterCalculateSharedKeys(
!ok, std::move(proof_source_details_), key_exchange_type_,
std::move(out_), public_value_, std::move(context_), configs_);
}
private:
const QuicCryptoServerConfig* config_;
std::unique_ptr<ProofSource::Details> proof_source_details_;
const QuicTag key_exchange_type_;
std::unique_ptr<CryptoHandshakeMessage> out_;
const std::string public_value_;
std::unique_ptr<ProcessClientHelloContext> context_;
const Configs configs_;
std::unique_ptr<ProcessClientHelloResultCallback> done_cb_;
};
class QuicCryptoServerConfig::SendRejectWithFallbackConfigCallback
: public ProofSource::Callback {
public:
SendRejectWithFallbackConfigCallback(
const QuicCryptoServerConfig* config,
std::unique_ptr<ProcessClientHelloContext> context,
quiche::QuicheReferenceCountedPointer<Config> fallback_config)
: config_(config),
context_(std::move(context)),
fallback_config_(fallback_config) {}
void Run(
bool ok,
const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain,
const QuicCryptoProof& proof,
std::unique_ptr<ProofSource::Details> details) override {
if (ok) {
context_->signed_config()->chain = chain;
context_->signed_config()->proof = proof;
}
config_->SendRejectWithFallbackConfigAfterGetProof(
!ok, std::move(details), std::move(context_), fallback_config_);
}
private:
const QuicCryptoServerConfig* config_;
std::unique_ptr<ProcessClientHelloContext> context_;
quiche::QuicheReferenceCountedPointer<Config> fallback_config_;
};
void QuicCryptoServerConfig::ProcessClientHello(
quiche::QuicheReferenceCountedPointer<
ValidateClientHelloResultCallback::Result>
validate_chlo_result,
bool reject_only, QuicConnectionId connection_id,
const QuicSocketAddress& server_address,
const QuicSocketAddress& client_address, ParsedQuicVersion version,
const ParsedQuicVersionVector& supported_versions, const QuicClock* clock,
QuicRandom* rand, QuicCompressedCertsCache* compressed_certs_cache,
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters>
params,
quiche::QuicheReferenceCountedPointer<QuicSignedServerConfig> signed_config,
QuicByteCount total_framing_overhead, QuicByteCount chlo_packet_size,
std::shared_ptr<ProcessClientHelloResultCallback> done_cb) const {
QUICHE_DCHECK(done_cb);
auto context = std::make_unique<ProcessClientHelloContext>(
validate_chlo_result, reject_only, connection_id, server_address,
client_address, version, supported_versions, clock, rand,
compressed_certs_cache, params, signed_config, total_framing_overhead,
chlo_packet_size, std::move(done_cb));
std::string error_details;
QuicErrorCode valid = CryptoUtils::ValidateClientHello(
context->client_hello(), context->version(),
context->supported_versions(), &error_details);
if (valid != QUIC_NO_ERROR) {
context->Fail(valid, error_details);
return;
}
absl::string_view requested_scid;
context->client_hello().GetStringPiece(kSCID, &requested_scid);
Configs configs;
if (!GetCurrentConfigs(context->clock()->WallNow(), requested_scid,
signed_config->config, &configs)) {
context->Fail(QUIC_CRYPTO_INTERNAL_ERROR, "No configurations loaded");
return;
}
if (context->validate_chlo_result()->error_code != QUIC_NO_ERROR) {
context->Fail(context->validate_chlo_result()->error_code,
context->validate_chlo_result()->error_details);
return;
}
if (!ClientDemandsX509Proof(context->client_hello())) {
context->Fail(QUIC_UNSUPPORTED_PROOF_DEMAND, "Missing or invalid PDMD");
return;
}
if (!context->signed_config()->chain) {
const std::string chlo_hash = CryptoUtils::HashHandshakeMessage(
context->client_hello(), Perspective::IS_SERVER);
const QuicSocketAddress context_server_address = context->server_address();
const std::string sni = std::string(context->info().sni);
const QuicTransportVersion transport_version = context->transport_version();
auto cb = std::make_unique<ProcessClientHelloCallback>(
this, std::move(context), configs);
QUICHE_DCHECK(proof_source_.get());
proof_source_->GetProof(context_server_address, client_address, sni,
configs.primary->serialized, transport_version,
chlo_hash, std::move(cb));
return;
}
ProcessClientHelloAfterGetProof(
false, nullptr,
std::move(context), configs);
}
void QuicCryptoServerConfig::ProcessClientHelloAfterGetProof(
bool found_error,
std::unique_ptr<ProofSource::Details> proof_source_details,
std::unique_ptr<ProcessClientHelloContext> context,
const Configs& configs) const {
QUIC_BUG_IF(quic_bug_12963_2,
!QuicUtils::IsConnectionIdValidForVersion(
context->connection_id(), context->transport_version()))
<< "ProcessClientHelloAfterGetProof: attempted to use connection ID "
<< context->connection_id() << " which is invalid with version "
<< context->version();
if (context->info().reject_reasons.empty()) {
if (!context->signed_config() || !context->signed_config()->chain) {
context->validate_chlo_result()->info.reject_reasons.push_back(
SERVER_CONFIG_UNKNOWN_CONFIG_FAILURE);
} else if (!ValidateExpectedLeafCertificate(
context->client_hello(),
context->signed_config()->chain->certs)) {
context->validate_chlo_result()->info.reject_reasons.push_back(
INVALID_EXPECTED_LEAF_CERTIFICATE);
}
}
if (found_error) {
context->Fail(QUIC_HANDSHAKE_FAILED, "Failed to get proof");
return;
}
auto out_diversification_nonce = std::make_unique<DiversificationNonce>();
absl::string_view cert_sct;
if (context->client_hello().GetStringPiece(kCertificateSCTTag, &cert_sct) &&
cert_sct.empty()) {
context->params()->sct_supported_by_client = true;
}
auto out = std::make_unique<CryptoHandshakeMessage>();
if (!context->info().reject_reasons.empty() || !configs.requested) {
BuildRejectionAndRecordStats(*context, *configs.primary,
context->info().reject_reasons, out.get());
context->Succeed(std::move(out), std::move(out_diversification_nonce),
std::move(proof_source_details));
return;
}
if (context->reject_only()) {
context->Succeed(std::move(out), std::move(out_diversification_nonce),
std::move(proof_source_details));
return;
}
QuicTagVector their_aeads;
QuicTagVector their_key_exchanges;
if (context->client_hello().GetTaglist(kAEAD, &their_aeads) !=
QUIC_NO_ERROR ||
context->client_hello().GetTaglist(kKEXS, &their_key_exchanges) !=
QUIC_NO_ERROR ||
their_aeads.size() != 1 || their_key_exchanges.size() != 1) {
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"Missing or invalid AEAD or KEXS");
return;
}
size_t key_exchange_index;
if (!FindMutualQuicTag(configs.requested->aead, their_aeads,
&context->params()->aead, nullptr) ||
!FindMutualQuicTag(configs.requested->kexs, their_key_exchanges,
&context->params()->key_exchange,
&key_exchange_index)) {
context->Fail(QUIC_CRYPTO_NO_SUPPORT, "Unsupported AEAD or KEXS");
return;
}
absl::string_view public_value;
if (!context->client_hello().GetStringPiece(kPUBS, &public_value)) {
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"Missing public value");
return;
}
AdjustTestValue("quic::QuicCryptoServerConfig::public_value_adjust",
&public_value);
const AsynchronousKeyExchange* key_exchange =
configs.requested->key_exchanges[key_exchange_index].get();
std::string* initial_premaster_secret =
&context->params()->initial_premaster_secret;
auto cb = std::make_unique<ProcessClientHelloAfterGetProofCallback>(
this, std::move(proof_source_details), key_exchange->type(),
std::move(out), public_value, std::move(context), configs);
key_exchange->CalculateSharedKeyAsync(public_value, initial_premaster_secret,
std::move(cb));
}
void QuicCryptoServerConfig::ProcessClientHelloAfterCalculateSharedKeys(
bool found_error,
std::unique_ptr<ProofSource::Details> proof_source_details,
QuicTag key_exchange_type, std::unique_ptr<CryptoHandshakeMessage> out,
absl::string_view public_value,
std::unique_ptr<ProcessClientHelloContext> context,
const Configs& configs) const {
QUIC_BUG_IF(quic_bug_12963_3,
!QuicUtils::IsConnectionIdValidForVersion(
context->connection_id(), context->transport_version()))
<< "ProcessClientHelloAfterCalculateSharedKeys:"
" attempted to use connection ID "
<< context->connection_id() << " which is invalid with version "
<< context->version();
if (found_error) {
if (configs.fallback == nullptr ||
context->signed_config()->config == configs.fallback) {
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"Failed to calculate shared key");
} else {
SendRejectWithFallbackConfig(std::move(context), configs.fallback);
}
return;
}
if (!context->info().sni.empty()) {
context->params()->sni =
QuicHostnameUtils::NormalizeHostname(context->info().sni);
}
std::string hkdf_suffix;
const QuicData& client_hello_serialized =
context->client_hello().GetSerialized();
hkdf_suffix.reserve(context->connection_id().length() +
client_hello_serialized.length() +
configs.requested->serialized.size());
hkdf_suffix.append(context->connection_id().data(),
context->connection_id().length());
hkdf_suffix.append(client_hello_serialized.data(),
client_hello_serialized.length());
hkdf_suffix.append(configs.requested->serialized);
QUICHE_DCHECK(proof_source_.get());
if (context->signed_config()->chain->certs.empty()) {
context->Fail(QUIC_CRYPTO_INTERNAL_ERROR, "Failed to get certs");
return;
}
hkdf_suffix.append(context->signed_config()->chain->certs[0]);
absl::string_view cetv_ciphertext;
if (configs.requested->channel_id_enabled &&
context->client_hello().GetStringPiece(kCETV, &cetv_ciphertext)) {
CryptoHandshakeMessage client_hello_copy(context->client_hello());
client_hello_copy.Erase(kCETV);
client_hello_copy.Erase(kPAD);
const QuicData& client_hello_copy_serialized =
client_hello_copy.GetSerialized();
std::string hkdf_input;
hkdf_input.append(QuicCryptoConfig::kCETVLabel,
strlen(QuicCryptoConfig::kCETVLabel) + 1);
hkdf_input.append(context->connection_id().data(),
context->connection_id().length());
hkdf_input.append(client_hello_copy_serialized.data(),
client_hello_copy_serialized.length());
hkdf_input.append(configs.requested->serialized);
CrypterPair crypters;
if (!CryptoUtils::DeriveKeys(
context->version(), context->params()->initial_premaster_secret,
context->params()->aead, context->info().client_nonce,
context->info().server_nonce, pre_shared_key_, hkdf_input,
Perspective::IS_SERVER, CryptoUtils::Diversification::Never(),
&crypters, nullptr )) {
context->Fail(QUIC_CRYPTO_SYMMETRIC_KEY_SETUP_FAILED,
"Symmetric key setup failed");
return;
}
char plaintext[kMaxOutgoingPacketSize];
size_t plaintext_length = 0;
const bool success = crypters.decrypter->DecryptPacket(
0 , absl::string_view() ,
cetv_ciphertext, plaintext, &plaintext_length, kMaxOutgoingPacketSize);
if (!success) {
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"CETV decryption failure");
return;
}
std::unique_ptr<CryptoHandshakeMessage> cetv(CryptoFramer::ParseMessage(
absl::string_view(plaintext, plaintext_length)));
if (!cetv) {
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER, "CETV parse error");
return;
}
absl::string_view key, signature;
if (cetv->GetStringPiece(kCIDK, &key) &&
cetv->GetStringPiece(kCIDS, &signature)) {
if (!ChannelIDVerifier::Verify(key, hkdf_input, signature)) {
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"ChannelID signature failure");
return;
}
context->params()->channel_id = std::string(key);
}
}
std::string hkdf_input;
size_t label_len = strlen(QuicCryptoConfig::kInitialLabel) + 1;
hkdf_input.reserve(label_len + hkdf_suffix.size());
hkdf_input.append(QuicCryptoConfig::kInitialLabel, label_len);
hkdf_input.append(hkdf_suffix);
auto out_diversification_nonce = std::make_unique<DiversificationNonce>();
context->rand()->RandBytes(out_diversification_nonce->data(),
out_diversification_nonce->size());
CryptoUtils::Diversification diversification =
CryptoUtils::Diversification::Now(out_diversification_nonce.get());
if (!CryptoUtils::DeriveKeys(
context->version(), context->params()->initial_premaster_secret,
context->params()->aead, context->info().client_nonce,
context->info().server_nonce, pre_shared_key_, hkdf_input,
Perspective::IS_SERVER, diversification,
&context->params()->initial_crypters,
&context->params()->initial_subkey_secret)) {
context->Fail(QUIC_CRYPTO_SYMMETRIC_KEY_SETUP_FAILED,
"Symmetric key setup failed");
return;
}
std::string forward_secure_public_value;
std::unique_ptr<SynchronousKeyExchange> forward_secure_key_exchange =
CreateLocalSynchronousKeyExchange(key_exchange_type, context->rand());
if (!forward_secure_key_exchange) {
QUIC_DLOG(WARNING) << "Failed to create keypair";
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"Failed to create keypair");
return;
}
forward_secure_public_value =
std::string(forward_secure_key_exchange->public_value());
if (!forward_secure_key_exchange->CalculateSharedKeySync(
public_value, &context->params()->forward_secure_premaster_secret)) {
context->Fail(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"Invalid public value");
return;
}
std::string forward_secure_hkdf_input;
label_len = strlen(QuicCryptoConfig::kForwardSecureLabel) + 1;
forward_secure_hkdf_input.reserve(label_len + hkdf_suffix.size());
forward_secure_hkdf_input.append(QuicCryptoConfig::kForwardSecureLabel,
label_len);
forward_secure_hkdf_input.append(hkdf_suffix);
std::string shlo_nonce;
shlo_nonce = NewServerNonce(context->rand(), context->info().now);
out->SetStringPiece(kServerNonceTag, shlo_nonce);
if (!CryptoUtils::DeriveKeys(
context->version(),
context->params()->forward_secure_premaster_secret,
context->params()->aead, context->info().client_nonce,
shlo_nonce.empty() ? context->info().server_nonce : shlo_nonce,
pre_shared_key_, forward_secure_hkdf_input, Perspective::IS_SERVER,
CryptoUtils::Diversification::Never(),
&context->params()->forward_secure_crypters,
&context->params()->subkey_secret)) {
context->Fail(QUIC_CRYPTO_SYMMETRIC_KEY_SETUP_FAILED,
"Symmetric key setup failed");
return;
}
out->set_tag(kSHLO);
out->SetVersionVector(kVER, context->supported_versions());
out->SetStringPiece(
kSourceAddressTokenTag,
NewSourceAddressToken(*configs.requested->source_address_token_boxer,
context->info().source_address_tokens,
context->client_address().host(), context->rand(),
context->info().now, nullptr));
QuicSocketAddressCoder address_coder(context->client_address());
out->SetStringPiece(kCADR, address_coder.Encode());
out->SetStringPiece(kPUBS, forward_secure_public_value);
context->Succeed(std::move(out), std::move(out_diversification_nonce),
std::move(proof_source_details));
}
void QuicCryptoServerConfig::SendRejectWithFallbackConfig(
std::unique_ptr<ProcessClientHelloContext> context,
quiche::QuicheReferenceCountedPointer<Config> fallback_config) const {
const std::string chlo_hash = CryptoUtils::HashHandshakeMessage(
context->client_hello(), Perspective::IS_SERVER);
const QuicSocketAddress server_address = context->server_address();
const std::string sni(context->info().sni);
const QuicTransportVersion transport_version = context->transport_version();
const QuicSocketAddress& client_address = context->client_address();
auto cb = std::make_unique<SendRejectWithFallbackConfigCallback>(
this, std::move(context), fallback_config);
proof_source_->GetProof(server_address, client_address, sni,
fallback_config->serialized, transport_version,
chlo_hash, std::move(cb));
}
void QuicCryptoServerConfig::SendRejectWithFallbackConfigAfterGetProof(
bool found_error,
std::unique_ptr<ProofSource::Details> proof_source_details,
std::unique_ptr<ProcessClientHelloContext> context,
quiche::QuicheReferenceCountedPointer<Config> fallback_config) const {
if (found_error) {
context->Fail(QUIC_HANDSHAKE_FAILED, "Failed to get proof");
return;
}
auto out = std::make_unique<CryptoHandshakeMessage>();
BuildRejectionAndRecordStats(*context, *fallback_config,
{SERVER_CONFIG_UNKNOWN_CONFIG_FAILURE},
out.get());
context->Succeed(std::move(out), std::make_unique<DiversificationNonce>(),
std::move(proof_source_details));
}
quiche::QuicheReferenceCountedPointer<QuicCryptoServerConfig::Config>
QuicCryptoServerConfig::GetConfigWithScid(
absl::string_view requested_scid) const {
configs_lock_.AssertReaderHeld();
if (!requested_scid.empty()) {
auto it = configs_.find((std::string(requested_scid)));
if (it != configs_.end()) {
return quiche::QuicheReferenceCountedPointer<Config>(it->second);
}
}
return quiche::QuicheReferenceCountedPointer<Config>();
}
bool QuicCryptoServerConfig::GetCurrentConfigs(
const QuicWallTime& now, absl::string_view requested_scid,
quiche::QuicheReferenceCountedPointer<Config> old_primary_config,
Configs* configs) const {
quiche::QuicheReaderMutexLock locked(&configs_lock_);
if (!primary_config_) {
return false;
}
if (IsNextConfigReady(now)) {
configs_lock_.ReaderUnlock();
configs_lock_.WriterLock();
SelectNewPrimaryConfig(now);
QUICHE_DCHECK(primary_config_.get());
QUICHE_DCHECK_EQ(configs_.find(primary_config_->id)->second.get(),
primary_config_.get());
configs_lock_.WriterUnlock();
configs_lock_.ReaderLock();
}
if (old_primary_config != nullptr) {
configs->primary = old_primary_config;
} else {
configs->primary = primary_config_;
}
configs->requested = GetConfigWithScid(requested_scid);
configs->fallback = fallback_config_;
return true;
}
bool QuicCryptoServerConfig::ConfigPrimaryTimeLessThan(
const quiche::QuicheReferenceCountedPointer<Config>& a,
const quiche::QuicheReferenceCountedPointer<Config>& b) {
if (a->primary_time.IsBefore(b->primary_time) ||
b->primary_time.IsBefore(a->primary_time)) {
return a->primary_time.IsBefore(b->primary_time);
} else if (a->priority != b->priority) {
return a->priority < b->priority;
} else {
return a->id < b->id;
}
}
void QuicCryptoServerConfig::SelectNewPrimaryConfig(
const QuicWallTime now) const {
std::vector<quiche::QuicheReferenceCountedPointer<Config>> configs;
configs.reserve(configs_.size());
for (auto it = configs_.begin(); it != configs_.end(); ++it) {
configs.push_back(it->second);
}
if (configs.empty()) {
if (primary_config_ != nullptr) {
QUIC_BUG(quic_bug_10630_2)
<< "No valid QUIC server config. Keeping the current config.";
} else {
QUIC_BUG(quic_bug_10630_3) << "No valid QUIC server config.";
}
return;
}
std::sort(configs.begin(), configs.end(), ConfigPrimaryTimeLessThan);
quiche::QuicheReferenceCountedPointer<Config> best_candidate = configs[0];
for (size_t i = 0; i < configs.size(); ++i) {
const quiche::QuicheReferenceCountedPointer<Config> config(configs[i]);
if (!config->primary_time.IsAfter(now)) {
if (config->primary_time.IsAfter(best_candidate->primary_time)) {
best_candidate = config;
}
continue;
}
quiche::QuicheReferenceCountedPointer<Config> new_primary = best_candidate;
if (i == 0) {
if (configs.size() > 1) {
next_config_promotion_time_ = configs[1]->primary_time;
} else {
next_config_promotion_time_ = QuicWallTime::Zero();
}
} else {
next_config_promotion_time_ = config->primary_time;
}
if (primary_config_) {
primary_config_->is_primary = false;
}
primary_config_ = new_primary;
new_primary->is_primary = true;
QUIC_DLOG(INFO) << "New primary config. orbit: "
<< absl::BytesToHexString(
absl::string_view(reinterpret_cast<const char*>(
primary_config_->orbit),
kOrbitSize));
if (primary_config_changed_cb_ != nullptr) {
primary_config_changed_cb_->Run(primary_config_->id);
}
return;
}
quiche::QuicheReferenceCountedPointer<Config> new_primary = best_candidate;
if (primary_config_) {
primary_config_->is_primary = false;
}
primary_config_ = new_primary;
new_primary->is_primary = true;
QUIC_DLOG(INFO) << "New primary config. orbit: "
<< absl::BytesToHexString(absl::string_view(
reinterpret_cast<const char*>(primary_config_->orbit),
kOrbitSize))
<< " scid: " << absl::BytesToHexString(primary_config_->id);
next_config_promotion_time_ = QuicWallTime::Zero();
if (primary_config_changed_cb_ != nullptr) {
primary_config_changed_cb_->Run(primary_config_->id);
}
}
void QuicCryptoServerConfig::EvaluateClientHello(
const QuicSocketAddress& ,
const QuicSocketAddress& ,
QuicTransportVersion , const Configs& configs,
quiche::QuicheReferenceCountedPointer<
ValidateClientHelloResultCallback::Result>
client_hello_state,
std::unique_ptr<ValidateClientHelloResultCallback> done_cb) const {
ValidateClientHelloHelper helper(client_hello_state, &done_cb);
const CryptoHandshakeMessage& client_hello = client_hello_state->client_hello;
ClientHelloInfo* info = &(client_hello_state->info);
if (client_hello.GetStringPiece(kSNI, &info->sni) &&
!QuicHostnameUtils::IsValidSNI(info->sni)) {
helper.ValidationComplete(QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER,
"Invalid SNI name", nullptr);
return;
}
client_hello.GetStringPiece(kUAID, &info->user_agent_id);
HandshakeFailureReason source_address_token_error = MAX_FAILURE_REASON;
if (validate_source_address_token_) {
absl::string_view srct;
if (client_hello.GetStringPiece(kSourceAddressTokenTag, &srct)) {
Config& config =
configs.requested != nullptr ? *configs.requested : *configs.primary;
source_address_token_error =
ParseSourceAddressToken(*config.source_address_token_boxer, srct,
info->source_address_tokens);
if (source_address_token_error == HANDSHAKE_OK) {
source_address_token_error = ValidateSourceAddressTokens(
info->source_address_tokens, info->client_ip, info->now,
&client_hello_state->cached_network_params);
}
info->valid_source_address_token =
(source_address_token_error == HANDSHAKE_OK);
} else {
source_address_token_error = SOURCE_ADDRESS_TOKEN_INVALID_FAILURE;
}
} else {
source_address_token_error = HANDSHAKE_OK;
info->valid_source_address_token = true;
}
if (!configs.requested) {
absl::string_view requested_scid;
if (client_hello.GetStringPiece(kSCID, &requested_scid)) {
info->reject_reasons.push_back(SERVER_CONFIG_UNKNOWN_CONFIG_FAILURE);
} else {
info->reject_reasons.push_back(SERVER_CONFIG_INCHOATE_HELLO_FAILURE);
}
helper.ValidationComplete(QUIC_NO_ERROR, "", nullptr);
return;
}
if (!client_hello.GetStringPiece(kNONC, &info->client_nonce)) {
info->reject_reasons.push_back(SERVER_CONFIG_INCHOATE_HELLO_FAILURE);
helper.ValidationComplete(QUIC_NO_ERROR, "", nullptr);
return;
}
if (source_address_token_error != HANDSHAKE_OK) {
info->reject_reasons.push_back(source_address_token_error);
}
if (info->client_nonce.size() != kNonceSize) {
info->reject_reasons.push_back(CLIENT_NONCE_INVALID_FAILURE);
QUIC_LOG_FIRST_N(ERROR, 2)
<< "Invalid client nonce: " << client_hello.DebugString();
QUIC_DLOG(INFO) << "Invalid client nonce.";
}
client_hello.GetStringPiece(kServerNonceTag, &info->server_nonce);
if (GetQuicReloadableFlag(quic_require_handshake_confirmation) &&
info->server_nonce.empty()) {
QUIC_RELOADABLE_FLAG_COUNT(quic_require_handshake_confirmation);
info->reject_reasons.push_back(SERVER_NONCE_REQUIRED_FAILURE);
}
helper.ValidationComplete(QUIC_NO_ERROR, "",
std::unique_ptr<ProofSource::Details>());
}
void QuicCryptoServerConfig::BuildServerConfigUpdateMessage(
QuicTransportVersion version, absl::string_view chlo_hash,
const SourceAddressTokens& previous_source_address_tokens,
const QuicSocketAddress& server_address,
const QuicSocketAddress& client_address, const QuicClock* clock,
QuicRandom* rand, QuicCompressedCertsCache* compressed_certs_cache,
const QuicCryptoNegotiatedParameters& params,
const CachedNetworkParameters* cached_network_params,
std::unique_ptr<BuildServerConfigUpdateMessageResultCallback> cb) const {
std::string serialized;
std::string source_address_token;
{
quiche::QuicheReaderMutexLock locked(&configs_lock_);
serialized = primary_config_->serialized;
source_address_token = NewSourceAddressToken(
*primary_config_->source_address_token_boxer,
previous_source_address_tokens, client_address.host(), rand,
clock->WallNow(), cached_network_params);
}
CryptoHandshakeMessage message;
message.set_tag(kSCUP);
message.SetStringPiece(kSCFG, serialized);
message.SetStringPiece(kSourceAddressTokenTag, source_address_token);
auto proof_source_cb =
std::make_unique<BuildServerConfigUpdateMessageProofSourceCallback>(
this, compressed_certs_cache, params, std::move(message),
std::move(cb));
proof_source_->GetProof(server_address, client_address, params.sni,
serialized, version, chlo_hash,
std::move(proof_source_cb));
}
QuicCryptoServerConfig::BuildServerConfigUpdateMessageProofSourceCallback::
~BuildServerConfigUpdateMessageProofSourceCallback() {}
QuicCryptoServerConfig::BuildServerConfigUpdateMessageProofSourceCallback::
BuildServerConfigUpdateMessageProofSourceCallback(
const QuicCryptoServerConfig* config,
QuicCompressedCertsCache* compressed_certs_cache,
const QuicCryptoNegotiatedParameters& params,
CryptoHandshakeMessage message,
std::unique_ptr<BuildServerConfigUpdateMessageResultCallback> cb)
: config_(config),
compressed_certs_cache_(compressed_certs_cache),
client_cached_cert_hashes_(params.client_cached_cert_hashes),
sct_supported_by_client_(params.sct_supported_by_client),
sni_(params.sni),
message_(std::move(message)),
cb_(std::move(cb)) {}
void QuicCryptoServerConfig::BuildServerConfigUpdateMessageProofSourceCallback::
Run(bool ok,
const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain,
const QuicCryptoProof& proof,
std::unique_ptr<ProofSource::Details> details) {
config_->FinishBuildServerConfigUpdateMessage(
compressed_certs_cache_, client_cached_cert_hashes_,
sct_supported_by_client_, sni_, ok, chain, proof.signature,
proof.leaf_cert_scts, std::move(details), std::move(message_),
std::move(cb_));
}
void QuicCryptoServerConfig::FinishBuildServerConfigUpdateMessage(
QuicCompressedCertsCache* compressed_certs_cache,
const std::string& client_cached_cert_hashes, bool sct_supported_by_client,
const std::string& sni, bool ok,
const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain,
const std::string& signature, const std::string& leaf_cert_sct,
std::unique_ptr<ProofSource::Details> ,
CryptoHandshakeMessage message,
std::unique_ptr<BuildServerConfigUpdateMessageResultCallback> cb) const {
if (!ok) {
cb->Run(false, message);
return;
}
const std::string compressed =
CompressChain(compressed_certs_cache, chain, client_cached_cert_hashes);
message.SetStringPiece(kCertificateTag, compressed);
message.SetStringPiece(kPROF, signature);
if (sct_supported_by_client && enable_serving_sct_) {
if (leaf_cert_sct.empty()) {
QUIC_LOG_EVERY_N_SEC(WARNING, 60)
<< "SCT is expected but it is empty. SNI: " << sni;
} else {
message.SetStringPiece(kCertificateSCTTag, leaf_cert_sct);
}
}
cb->Run(true, message);
}
void QuicCryptoServerConfig::BuildRejectionAndRecordStats(
const ProcessClientHelloContext& context, const Config& config,
const std::vector<uint32_t>& reject_reasons,
CryptoHandshakeMessage* out) const {
BuildRejection(context, config, reject_reasons, out);
if (rejection_observer_ != nullptr) {
rejection_observer_->OnRejectionBuilt(reject_reasons, out);
}
}
void QuicCryptoServerConfig::BuildRejection(
const ProcessClientHelloContext& context, const Config& config,
const std::vector<uint32_t>& reject_reasons,
CryptoHandshakeMessage* out) const {
const QuicWallTime now = context.clock()->WallNow();
out->set_tag(kREJ);
out->SetStringPiece(kSCFG, config.serialized);
out->SetStringPiece(
kSourceAddressTokenTag,
NewSourceAddressToken(
*config.source_address_token_boxer,
context.info().source_address_tokens, context.info().client_ip,
context.rand(), context.info().now,
&context.validate_chlo_result()->cached_network_params));
out->SetValue(kSTTL, config.expiry_time.AbsoluteDifference(now).ToSeconds());
if (replay_protection_) {
out->SetStringPiece(kServerNonceTag,
NewServerNonce(context.rand(), context.info().now));
}
QUICHE_DCHECK_LT(0u, reject_reasons.size());
out->SetVector(kRREJ, reject_reasons);
if (!ClientDemandsX509Proof(context.client_hello())) {
QUIC_BUG(quic_bug_10630_4)
<< "x509 certificates not supported in proof demand";
return;
}
absl::string_view client_cached_cert_hashes;
if (context.client_hello().GetStringPiece(kCCRT,
&client_cached_cert_hashes)) {
context.params()->client_cached_cert_hashes =
std::string(client_cached_cert_hashes);
} else {
context.params()->client_cached_cert_hashes.clear();
}
const std::string compressed = CompressChain(
context.compressed_certs_cache(), context.signed_config()->chain,
context.params()->client_cached_cert_hashes);
QUICHE_DCHECK_GT(context.chlo_packet_size(), context.client_hello().size());
const size_t kREJOverheadBytes = 166;
const size_t max_unverified_size =
chlo_multiplier_ *
(context.chlo_packet_size() - context.total_framing_overhead()) -
kREJOverheadBytes;
static_assert(kClientHelloMinimumSize * kMultiplier >= kREJOverheadBytes,
"overhead calculation may underflow");
bool should_return_sct =
context.params()->sct_supported_by_client && enable_serving_sct_;
const std::string& cert_sct = context.signed_config()->proof.leaf_cert_scts;
const size_t sct_size = should_return_sct ? cert_sct.size() : 0;
const size_t total_size = context.signed_config()->proof.signature.size() +
compressed.size() + sct_size;
if (context.info().valid_source_address_token ||
total_size < max_unverified_size) {
out->SetStringPiece(kCertificateTag, compressed);
out->SetStringPiece(kPROF, context.signed_config()->proof.signature);
if (should_return_sct) {
if (cert_sct.empty()) {
const std::vector<std::string>& certs =
context.signed_config()->chain->certs;
std::string ca_subject;
if (!certs.empty()) {
std::unique_ptr<CertificateView> view =
CertificateView::ParseSingleCertificate(certs[0]);
if (view != nullptr) {
std::optional<std::string> maybe_ca_subject =
view->GetHumanReadableSubject();
if (maybe_ca_subject.has_value()) {
ca_subject = *maybe_ca_subject;
}
}
}
QUIC_LOG_EVERY_N_SEC(WARNING, 60)
<< "SCT is expected but it is empty. sni: '"
<< context.params()->sni << "' cert subject: '" << ca_subject
<< "'";
} else {
out->SetStringPiece(kCertificateSCTTag, cert_sct);
}
}
} else {
QUIC_LOG_EVERY_N_SEC(WARNING, 60)
<< "Sending inchoate REJ for hostname: " << context.info().sni
<< " signature: " << context.signed_config()->proof.signature.size()
<< " cert: " << compressed.size() << " sct:" << sct_size
<< " total: " << total_size << " max: " << max_unverified_size;
}
}
std::string QuicCryptoServerConfig::CompressChain(
QuicCompressedCertsCache* compressed_certs_cache,
const quiche::QuicheReferenceCountedPointer<ProofSource::Chain>& chain,
const std::string& client_cached_cert_hashes) {
QUICHE_DCHECK(compressed_certs_cache);
const std::string* cached_value = compressed_certs_cache->GetCompressedCert(
chain, client_cached_cert_hashes);
if (cached_value) {
return *cached_value;
}
std::string compressed =
CertCompressor::CompressChain(chain->certs, client_cached_cert_hashes);
compressed_certs_cache->Insert(chain, client_cached_cert_hashes, compressed);
return compressed;
}
quiche::QuicheReferenceCountedPointer<QuicCryptoServerConfig::Config>
QuicCryptoServerConfig::ParseConfigProtobuf(
const QuicServerConfigProtobuf& protobuf, bool is_fallback) const {
std::unique_ptr<CryptoHandshakeMessage> msg =
CryptoFramer::ParseMessage(protobuf.config());
if (!msg) {
QUIC_LOG(WARNING) << "Failed to parse server config message";
return nullptr;
}
if (msg->tag() != kSCFG) {
QUIC_LOG(WARNING) << "Server config message has tag " << msg->tag()
<< ", but expected " << kSCFG;
return nullptr;
}
quiche::QuicheReferenceCountedPointer<Config> config(new Config);
config->serialized = protobuf.config();
config->source_address_token_boxer = &source_address_token_boxer_;
if (protobuf.has_primary_time()) {
config->primary_time =
QuicWallTime::FromUNIXSeconds(protobuf.primary_time());
}
config->priority = protobuf.priority();
absl::string_view scid;
if (!msg->GetStringPiece(kSCID, &scid)) {
QUIC_LOG(WARNING) << "Server config message is missing SCID";
return nullptr;
}
if (scid.empty()) {
QUIC_LOG(WARNING) << "Server config message contains an empty SCID";
return nullptr;
}
config->id = std::string(scid);
if (msg->GetTaglist(kAEAD, &config->aead) != QUIC_NO_ERROR) {
QUIC_LOG(WARNING) << "Server config message is missing AEAD";
return nullptr;
}
QuicTagVector kexs_tags;
if (msg->GetTaglist(kKEXS, &kexs_tags) != QUIC_NO_ERROR) {
QUIC_LOG(WARNING) << "Server config message is missing KEXS";
return nullptr;
}
absl::string_view orbit;
if (!msg->GetStringPiece(kORBT, &orbit)) {
QUIC_LOG(WARNING) << "Server config message is missing ORBT";
return nullptr;
}
if (orbit.size() != kOrbitSize) {
QUIC_LOG(WARNING) << "Orbit value in server config is the wrong length."
" Got "
<< orbit.size() << " want " << kOrbitSize;
return nullptr;
}
static_assert(sizeof(config->orbit) == kOrbitSize, "incorrect orbit size");
memcpy(config->orbit, orbit.data(), sizeof(config->orbit));
QuicTagVector proof_demand_tags;
if (msg->GetTaglist(kPDMD, &proof_demand_tags) == QUIC_NO_ERROR) {
for (QuicTag tag : proof_demand_tags) {
if (tag == kCHID) {
config->channel_id_enabled = true;
break;
}
}
}
for (size_t i = 0; i < kexs_tags.size(); i++) {
const QuicTag tag = kexs_tags[i];
std::string private_key;
config->kexs.push_back(tag);
for (int j = 0; j < protobuf.key_size(); j++) {
const QuicServerConfigProtobuf::PrivateKey& key = protobuf.key(i);
if (key.tag() == tag) {
private_key = key.private_key();
break;
}
}
std::unique_ptr<AsynchronousKeyExchange> ka =
key_exchange_source_->Create(config->id, is_fallback, tag, private_key);
if (!ka) {
return nullptr;
}
for (const auto& key_exchange : config->key_exchanges) {
if (key_exchange->type() == tag) {
QUIC_LOG(WARNING) << "Duplicate key exchange in config: " << tag;
return nullptr;
}
}
config->key_exchanges.push_back(std::move(ka));
}
uint64_t expiry_seconds;
if (msg->GetUint64(kEXPY, &expiry_seconds) != QUIC_NO_ERROR) {
QUIC_LOG(WARNING) << "Server config message is missing EXPY";
return nullptr;
}
config->expiry_time = QuicWallTime::FromUNIXSeconds(expiry_seconds);
return config;
}
void QuicCryptoServerConfig::set_replay_protection(bool on) {
replay_protection_ = on;
}
void QuicCryptoServerConfig::set_chlo_multiplier(size_t multiplier) {
chlo_multiplier_ = multiplier;
}
void QuicCryptoServerConfig::set_source_address_token_future_secs(
uint32_t future_secs) {
source_address_token_future_secs_ = future_secs;
}
void QuicCryptoServerConfig::set_source_address_token_lifetime_secs(
uint32_t lifetime_secs) {
source_address_token_lifetime_secs_ = lifetime_secs;
}
void QuicCryptoServerConfig::set_enable_serving_sct(bool enable_serving_sct) {
enable_serving_sct_ = enable_serving_sct;
}
void QuicCryptoServerConfig::AcquirePrimaryConfigChangedCb(
std::unique_ptr<PrimaryConfigChangedCallback> cb) {
quiche::QuicheWriterMutexLock locked(&configs_lock_);
primary_config_changed_cb_ = std::move(cb);
}
std::string QuicCryptoServerConfig::NewSourceAddressToken(
const CryptoSecretBoxer& crypto_secret_boxer,
const SourceAddressTokens& previous_tokens, const QuicIpAddress& ip,
QuicRandom* rand, QuicWallTime now,
const CachedNetworkParameters* cached_network_params) const {
SourceAddressTokens source_address_tokens;
SourceAddressToken* source_address_token = source_address_tokens.add_tokens();
source_address_token->set_ip(ip.DualStacked().ToPackedString());
source_address_token->set_timestamp(now.ToUNIXSeconds());
if (cached_network_params != nullptr) {
*(source_address_token->mutable_cached_network_parameters()) =
*cached_network_params;
}
for (const SourceAddressToken& token : previous_tokens.tokens()) {
if (source_address_tokens.tokens_size() > kMaxTokenAddresses) {
break;
}
if (token.ip() == source_address_token->ip()) {
continue;
}
if (ValidateSourceAddressTokenTimestamp(token, now) != HANDSHAKE_OK) {
continue;
}
*(source_address_tokens.add_tokens()) = token;
}
return crypto_secret_boxer.Box(rand,
source_address_tokens.SerializeAsString());
}
int QuicCryptoServerConfig::NumberOfConfigs() const {
quiche::QuicheReaderMutexLock locked(&configs_lock_);
return configs_.size();
}
ProofSource* QuicCryptoServerConfig::proof_source() const {
return proof_source_.get();
}
SSL_CTX* QuicCryptoServerConfig::ssl_ctx() const { return ssl_ctx_.get(); }
HandshakeFailureReason QuicCryptoServerConfig::ParseSourceAddressToken(
const CryptoSecretBoxer& crypto_secret_boxer, absl::string_view token,
SourceAddressTokens& tokens) const {
std::string storage;
absl::string_view plaintext;
if (!crypto_secret_boxer.Unbox(token, &storage, &plaintext)) {
return SOURCE_ADDRESS_TOKEN_DECRYPTION_FAILURE;
}
if (!tokens.ParseFromArray(plaintext.data(), plaintext.size())) {
SourceAddressToken old_source_token;
if (!old_source_token.ParseFromArray(plaintext.data(), plaintext.size())) {
return SOURCE_ADDRESS_TOKEN_PARSE_FAILURE;
}
*tokens.add_tokens() = old_source_token;
}
return HANDSHAKE_OK;
}
HandshakeFailureReason QuicCryptoServerConfig::ValidateSourceAddressTokens(
const SourceAddressTokens& source_address_tokens, const QuicIpAddress& ip,
QuicWallTime now, CachedNetworkParameters* cached_network_params) const {
HandshakeFailureReason reason =
SOURCE_ADDRESS_TOKEN_DIFFERENT_IP_ADDRESS_FAILURE;
for (const SourceAddressToken& token : source_address_tokens.tokens()) {
reason = ValidateSingleSourceAddressToken(token, ip, now);
if (reason == HANDSHAKE_OK) {
if (cached_network_params != nullptr &&
token.has_cached_network_parameters()) {
*cached_network_params = token.cached_network_parameters();
}
break;
}
}
return reason;
}
HandshakeFailureReason QuicCryptoServerConfig::ValidateSingleSourceAddressToken(
const SourceAddressToken& source_address_token, const QuicIpAddress& ip,
QuicWallTime now) const {
if (source_address_token.ip() != ip.DualStacked().ToPackedString()) {
return SOURCE_ADDRESS_TOKEN_DIFFERENT_IP_ADDRESS_FAILURE;
}
return ValidateSourceAddressTokenTimestamp(source_address_token, now);
}
HandshakeFailureReason
QuicCryptoServerConfig::ValidateSourceAddressTokenTimestamp(
const SourceAddressToken& source_address_token, QuicWallTime now) const {
const QuicWallTime timestamp(
QuicWallTime::FromUNIXSeconds(source_address_token.timestamp()));
const QuicTime::Delta delta(now.AbsoluteDifference(timestamp));
if (now.IsBefore(timestamp) &&
delta.ToSeconds() > source_address_token_future_secs_) {
return SOURCE_ADDRESS_TOKEN_CLOCK_SKEW_FAILURE;
}
if (now.IsAfter(timestamp) &&
delta.ToSeconds() > source_address_token_lifetime_secs_) {
return SOURCE_ADDRESS_TOKEN_EXPIRED_FAILURE;
}
return HANDSHAKE_OK;
}
static const size_t kServerNoncePlaintextSize =
4 + 20 ;
std::string QuicCryptoServerConfig::NewServerNonce(QuicRandom* rand,
QuicWallTime now) const {
const uint32_t timestamp = static_cast<uint32_t>(now.ToUNIXSeconds());
uint8_t server_nonce[kServerNoncePlaintextSize];
static_assert(sizeof(server_nonce) > sizeof(timestamp), "nonce too small");
server_nonce[0] = static_cast<uint8_t>(timestamp >> 24);
server_nonce[1] = static_cast<uint8_t>(timestamp >> 16);
server_nonce[2] = static_cast<uint8_t>(timestamp >> 8);
server_nonce[3] = static_cast<uint8_t>(timestamp);
rand->RandBytes(&server_nonce[sizeof(timestamp)],
sizeof(server_nonce) - sizeof(timestamp));
return server_nonce_boxer_.Box(
rand, absl::string_view(reinterpret_cast<char*>(server_nonce),
sizeof(server_nonce)));
}
bool QuicCryptoServerConfig::ValidateExpectedLeafCertificate(
const CryptoHandshakeMessage& client_hello,
const std::vector<std::string>& certs) const {
if (certs.empty()) {
return false;
}
uint64_t hash_from_client;
if (client_hello.GetUint64(kXLCT, &hash_from_client) != QUIC_NO_ERROR) {
return false;
}
return CryptoUtils::ComputeLeafCertHash(certs[0]) == hash_from_client;
}
bool QuicCryptoServerConfig::IsNextConfigReady(QuicWallTime now) const {
return !next_config_promotion_time_.IsZero() &&
!next_config_promotion_time_.IsAfter(now);
}
QuicCryptoServerConfig::Config::Config()
: channel_id_enabled(false),
is_primary(false),
primary_time(QuicWallTime::Zero()),
expiry_time(QuicWallTime::Zero()),
priority(0),
source_address_token_boxer(nullptr) {}
QuicCryptoServerConfig::Config::~Config() {}
QuicSignedServerConfig::QuicSignedServerConfig() {}
QuicSignedServerConfig::~QuicSignedServerConfig() {}
} | #include "quiche/quic/core/crypto/quic_crypto_server_config.h"
#include <stdarg.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/cert_compressor.h"
#include "quiche/quic/core/crypto/chacha20_poly1305_encrypter.h"
#include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include "quiche/quic/core/crypto/crypto_secret_boxer.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/proto/crypto_server_config_proto.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/quic_crypto_server_config_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
using ::testing::Not;
MATCHER_P(SerializedProtoEquals, message, "") {
std::string expected_serialized, actual_serialized;
message.SerializeToString(&expected_serialized);
arg.SerializeToString(&actual_serialized);
return expected_serialized == actual_serialized;
}
class QuicCryptoServerConfigTest : public QuicTest {};
TEST_F(QuicCryptoServerConfigTest, ServerConfig) {
QuicRandom* rand = QuicRandom::GetInstance();
QuicCryptoServerConfig server(QuicCryptoServerConfig::TESTING, rand,
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default());
MockClock clock;
std::unique_ptr<CryptoHandshakeMessage> message(server.AddDefaultConfig(
rand, &clock, QuicCryptoServerConfig::ConfigOptions()));
QuicTagVector aead;
ASSERT_THAT(message->GetTaglist(kAEAD, &aead), IsQuicNoError());
EXPECT_THAT(aead, ::testing::Contains(kAESG));
EXPECT_LE(1u, aead.size());
}
TEST_F(QuicCryptoServerConfigTest, CompressCerts) {
QuicCompressedCertsCache compressed_certs_cache(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize);
QuicRandom* rand = QuicRandom::GetInstance();
QuicCryptoServerConfig server(QuicCryptoServerConfig::TESTING, rand,
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default());
QuicCryptoServerConfigPeer peer(&server);
std::vector<std::string> certs = {"testcert"};
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain(
new ProofSource::Chain(certs));
std::string compressed = QuicCryptoServerConfigPeer::CompressChain(
&compressed_certs_cache, chain, "");
EXPECT_EQ(compressed_certs_cache.Size(), 1u);
}
TEST_F(QuicCryptoServerConfigTest, CompressSameCertsTwice) {
QuicCompressedCertsCache compressed_certs_cache(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize);
QuicRandom* rand = QuicRandom::GetInstance();
QuicCryptoServerConfig server(QuicCryptoServerConfig::TESTING, rand,
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default());
QuicCryptoServerConfigPeer peer(&server);
std::vector<std::string> certs = {"testcert"};
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain(
new ProofSource::Chain(certs));
std::string cached_certs = "";
std::string compressed = QuicCryptoServerConfigPeer::CompressChain(
&compressed_certs_cache, chain, cached_certs);
EXPECT_EQ(compressed_certs_cache.Size(), 1u);
std::string compressed2 = QuicCryptoServerConfigPeer::CompressChain(
&compressed_certs_cache, chain, cached_certs);
EXPECT_EQ(compressed, compressed2);
EXPECT_EQ(compressed_certs_cache.Size(), 1u);
}
TEST_F(QuicCryptoServerConfigTest, CompressDifferentCerts) {
QuicCompressedCertsCache compressed_certs_cache(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize);
QuicRandom* rand = QuicRandom::GetInstance();
QuicCryptoServerConfig server(QuicCryptoServerConfig::TESTING, rand,
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default());
QuicCryptoServerConfigPeer peer(&server);
std::vector<std::string> certs = {"testcert"};
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain(
new ProofSource::Chain(certs));
std::string cached_certs = "";
std::string compressed = QuicCryptoServerConfigPeer::CompressChain(
&compressed_certs_cache, chain, cached_certs);
EXPECT_EQ(compressed_certs_cache.Size(), 1u);
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain2(
new ProofSource::Chain(certs));
std::string compressed2 = QuicCryptoServerConfigPeer::CompressChain(
&compressed_certs_cache, chain2, cached_certs);
EXPECT_EQ(compressed_certs_cache.Size(), 2u);
}
class SourceAddressTokenTest : public QuicTest {
public:
SourceAddressTokenTest()
: ip4_(QuicIpAddress::Loopback4()),
ip4_dual_(ip4_.DualStacked()),
ip6_(QuicIpAddress::Loopback6()),
original_time_(QuicWallTime::Zero()),
rand_(QuicRandom::GetInstance()),
server_(QuicCryptoServerConfig::TESTING, rand_,
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default()),
peer_(&server_) {
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1000000));
original_time_ = clock_.WallNow();
primary_config_ = server_.AddDefaultConfig(
rand_, &clock_, QuicCryptoServerConfig::ConfigOptions());
}
std::string NewSourceAddressToken(std::string config_id,
const QuicIpAddress& ip) {
return NewSourceAddressToken(config_id, ip, nullptr);
}
std::string NewSourceAddressToken(
std::string config_id, const QuicIpAddress& ip,
const SourceAddressTokens& previous_tokens) {
return peer_.NewSourceAddressToken(config_id, previous_tokens, ip, rand_,
clock_.WallNow(), nullptr);
}
std::string NewSourceAddressToken(
std::string config_id, const QuicIpAddress& ip,
CachedNetworkParameters* cached_network_params) {
SourceAddressTokens previous_tokens;
return peer_.NewSourceAddressToken(config_id, previous_tokens, ip, rand_,
clock_.WallNow(), cached_network_params);
}
HandshakeFailureReason ValidateSourceAddressTokens(std::string config_id,
absl::string_view srct,
const QuicIpAddress& ip) {
return ValidateSourceAddressTokens(config_id, srct, ip, nullptr);
}
HandshakeFailureReason ValidateSourceAddressTokens(
std::string config_id, absl::string_view srct, const QuicIpAddress& ip,
CachedNetworkParameters* cached_network_params) {
return peer_.ValidateSourceAddressTokens(
config_id, srct, ip, clock_.WallNow(), cached_network_params);
}
const std::string kPrimary = "<primary>";
const std::string kOverride = "Config with custom source address token key";
QuicIpAddress ip4_;
QuicIpAddress ip4_dual_;
QuicIpAddress ip6_;
MockClock clock_;
QuicWallTime original_time_;
QuicRandom* rand_ = QuicRandom::GetInstance();
QuicCryptoServerConfig server_;
QuicCryptoServerConfigPeer peer_;
std::unique_ptr<CryptoHandshakeMessage> primary_config_;
std::unique_ptr<QuicServerConfigProtobuf> override_config_protobuf_;
};
TEST_F(SourceAddressTokenTest, SourceAddressToken) {
const std::string token4 = NewSourceAddressToken(kPrimary, ip4_);
const std::string token4d = NewSourceAddressToken(kPrimary, ip4_dual_);
const std::string token6 = NewSourceAddressToken(kPrimary, ip6_);
EXPECT_EQ(HANDSHAKE_OK, ValidateSourceAddressTokens(kPrimary, token4, ip4_));
ASSERT_EQ(HANDSHAKE_OK,
ValidateSourceAddressTokens(kPrimary, token4, ip4_dual_));
ASSERT_EQ(SOURCE_ADDRESS_TOKEN_DIFFERENT_IP_ADDRESS_FAILURE,
ValidateSourceAddressTokens(kPrimary, token4, ip6_));
ASSERT_EQ(HANDSHAKE_OK, ValidateSourceAddressTokens(kPrimary, token4d, ip4_));
ASSERT_EQ(HANDSHAKE_OK,
ValidateSourceAddressTokens(kPrimary, token4d, ip4_dual_));
ASSERT_EQ(SOURCE_ADDRESS_TOKEN_DIFFERENT_IP_ADDRESS_FAILURE,
ValidateSourceAddressTokens(kPrimary, token4d, ip6_));
ASSERT_EQ(HANDSHAKE_OK, ValidateSourceAddressTokens(kPrimary, token6, ip6_));
}
TEST_F(SourceAddressTokenTest, SourceAddressTokenExpiration) {
const std::string token = NewSourceAddressToken(kPrimary, ip4_);
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(-3600 * 2));
ASSERT_EQ(SOURCE_ADDRESS_TOKEN_CLOCK_SKEW_FAILURE,
ValidateSourceAddressTokens(kPrimary, token, ip4_));
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(86400 * 7));
ASSERT_EQ(SOURCE_ADDRESS_TOKEN_EXPIRED_FAILURE,
ValidateSourceAddressTokens(kPrimary, token, ip4_));
}
TEST_F(SourceAddressTokenTest, SourceAddressTokenWithNetworkParams) {
CachedNetworkParameters cached_network_params_input;
cached_network_params_input.set_bandwidth_estimate_bytes_per_second(1234);
const std::string token4_with_cached_network_params =
NewSourceAddressToken(kPrimary, ip4_, &cached_network_params_input);
CachedNetworkParameters cached_network_params_output;
EXPECT_THAT(cached_network_params_output,
Not(SerializedProtoEquals(cached_network_params_input)));
ValidateSourceAddressTokens(kPrimary, token4_with_cached_network_params, ip4_,
&cached_network_params_output);
EXPECT_THAT(cached_network_params_output,
SerializedProtoEquals(cached_network_params_input));
}
TEST_F(SourceAddressTokenTest, SourceAddressTokenMultipleAddresses) {
QuicWallTime now = clock_.WallNow();
SourceAddressToken previous_token;
previous_token.set_ip(ip6_.DualStacked().ToPackedString());
previous_token.set_timestamp(now.ToUNIXSeconds());
SourceAddressTokens previous_tokens;
(*previous_tokens.add_tokens()) = previous_token;
const std::string token4or6 =
NewSourceAddressToken(kPrimary, ip4_, previous_tokens);
EXPECT_EQ(HANDSHAKE_OK,
ValidateSourceAddressTokens(kPrimary, token4or6, ip4_));
ASSERT_EQ(HANDSHAKE_OK,
ValidateSourceAddressTokens(kPrimary, token4or6, ip6_));
}
class CryptoServerConfigsTest : public QuicTest {
public:
CryptoServerConfigsTest()
: rand_(QuicRandom::GetInstance()),
config_(QuicCryptoServerConfig::TESTING, rand_,
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default()),
test_peer_(&config_) {}
void SetUp() override {
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1000));
}
struct ServerConfigIDWithTimeAndPriority {
ServerConfigID server_config_id;
int primary_time;
int priority;
};
void SetConfigs(std::vector<ServerConfigIDWithTimeAndPriority> configs) {
const char kOrbit[] = "12345678";
bool has_invalid = false;
std::vector<QuicServerConfigProtobuf> protobufs;
for (const auto& config : configs) {
const ServerConfigID& server_config_id = config.server_config_id;
const int primary_time = config.primary_time;
const int priority = config.priority;
QuicCryptoServerConfig::ConfigOptions options;
options.id = server_config_id;
options.orbit = kOrbit;
QuicServerConfigProtobuf protobuf =
QuicCryptoServerConfig::GenerateConfig(rand_, &clock_, options);
protobuf.set_primary_time(primary_time);
protobuf.set_priority(priority);
if (absl::StartsWith(std::string(server_config_id), "INVALID")) {
protobuf.clear_key();
has_invalid = true;
}
protobufs.push_back(std::move(protobuf));
}
ASSERT_EQ(!has_invalid && !configs.empty(),
config_.SetConfigs(protobufs, nullptr,
clock_.WallNow()));
}
protected:
QuicRandom* const rand_;
MockClock clock_;
QuicCryptoServerConfig config_;
QuicCryptoServerConfigPeer test_peer_;
};
TEST_F(CryptoServerConfigsTest, NoConfigs) {
test_peer_.CheckConfigs(std::vector<std::pair<std::string, bool>>());
}
TEST_F(CryptoServerConfigsTest, MakePrimaryFirst) {
SetConfigs({{"a", 1100, 1}, {"b", 900, 1}});
test_peer_.CheckConfigs({{"a", false}, {"b", true}});
}
TEST_F(CryptoServerConfigsTest, MakePrimarySecond) {
SetConfigs({{"a", 900, 1}, {"b", 1100, 1}});
test_peer_.CheckConfigs({{"a", true}, {"b", false}});
}
TEST_F(CryptoServerConfigsTest, Delete) {
SetConfigs({{"a", 800, 1}, {"b", 900, 1}, {"c", 1100, 1}});
test_peer_.CheckConfigs({{"a", false}, {"b", true}, {"c", false}});
SetConfigs({{"b", 900, 1}, {"c", 1100, 1}});
test_peer_.CheckConfigs({{"b", true}, {"c", false}});
}
TEST_F(CryptoServerConfigsTest, DeletePrimary) {
SetConfigs({{"a", 800, 1}, {"b", 900, 1}, {"c", 1100, 1}});
test_peer_.CheckConfigs({{"a", false}, {"b", true}, {"c", false}});
SetConfigs({{"a", 800, 1}, {"c", 1100, 1}});
test_peer_.CheckConfigs({{"a", true}, {"c", false}});
}
TEST_F(CryptoServerConfigsTest, FailIfDeletingAllConfigs) {
SetConfigs({{"a", 800, 1}, {"b", 900, 1}});
test_peer_.CheckConfigs({{"a", false}, {"b", true}});
SetConfigs(std::vector<ServerConfigIDWithTimeAndPriority>());
test_peer_.CheckConfigs({{"a", false}, {"b", true}});
}
TEST_F(CryptoServerConfigsTest, ChangePrimaryTime) {
SetConfigs({{"a", 400, 1}, {"b", 800, 1}, {"c", 1200, 1}});
test_peer_.SelectNewPrimaryConfig(500);
test_peer_.CheckConfigs({{"a", true}, {"b", false}, {"c", false}});
SetConfigs({{"a", 1200, 1}, {"b", 800, 1}, {"c", 400, 1}});
test_peer_.SelectNewPrimaryConfig(500);
test_peer_.CheckConfigs({{"a", false}, {"b", false}, {"c", true}});
}
TEST_F(CryptoServerConfigsTest, AllConfigsInThePast) {
SetConfigs({{"a", 400, 1}, {"b", 800, 1}, {"c", 1200, 1}});
test_peer_.SelectNewPrimaryConfig(1500);
test_peer_.CheckConfigs({{"a", false}, {"b", false}, {"c", true}});
}
TEST_F(CryptoServerConfigsTest, AllConfigsInTheFuture) {
SetConfigs({{"a", 400, 1}, {"b", 800, 1}, {"c", 1200, 1}});
test_peer_.SelectNewPrimaryConfig(100);
test_peer_.CheckConfigs({{"a", true}, {"b", false}, {"c", false}});
}
TEST_F(CryptoServerConfigsTest, SortByPriority) {
SetConfigs({{"a", 900, 1}, {"b", 900, 2}, {"c", 900, 3}});
test_peer_.CheckConfigs({{"a", true}, {"b", false}, {"c", false}});
test_peer_.SelectNewPrimaryConfig(800);
test_peer_.CheckConfigs({{"a", true}, {"b", false}, {"c", false}});
test_peer_.SelectNewPrimaryConfig(1000);
test_peer_.CheckConfigs({{"a", true}, {"b", false}, {"c", false}});
SetConfigs({{"a", 900, 2}, {"b", 900, 1}, {"c", 900, 0}});
test_peer_.CheckConfigs({{"a", false}, {"b", false}, {"c", true}});
test_peer_.SelectNewPrimaryConfig(800);
test_peer_.CheckConfigs({{"a", false}, {"b", false}, {"c", true}});
test_peer_.SelectNewPrimaryConfig(1000);
test_peer_.CheckConfigs({{"a", false}, {"b", false}, {"c", true}});
}
TEST_F(CryptoServerConfigsTest, AdvancePrimary) {
SetConfigs({{"a", 900, 1}, {"b", 1100, 1}});
test_peer_.SelectNewPrimaryConfig(1000);
test_peer_.CheckConfigs({{"a", true}, {"b", false}});
test_peer_.SelectNewPrimaryConfig(1101);
test_peer_.CheckConfigs({{"a", false}, {"b", true}});
}
class ValidateCallback : public ValidateClientHelloResultCallback {
public:
void Run(quiche::QuicheReferenceCountedPointer<Result> ,
std::unique_ptr<ProofSource::Details> ) override {}
};
TEST_F(CryptoServerConfigsTest, AdvancePrimaryViaValidate) {
SetConfigs({{"a", 900, 1}, {"b", 1100, 1}});
test_peer_.SelectNewPrimaryConfig(1000);
test_peer_.CheckConfigs({{"a", true}, {"b", false}});
CryptoHandshakeMessage client_hello;
QuicSocketAddress client_address;
QuicSocketAddress server_address;
QuicTransportVersion transport_version = QUIC_VERSION_UNSUPPORTED;
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
if (version.handshake_protocol == PROTOCOL_QUIC_CRYPTO) {
transport_version = version.transport_version;
break;
}
}
ASSERT_NE(transport_version, QUIC_VERSION_UNSUPPORTED);
MockClock clock;
quiche::QuicheReferenceCountedPointer<QuicSignedServerConfig> signed_config(
new QuicSignedServerConfig);
std::unique_ptr<ValidateClientHelloResultCallback> done_cb(
new ValidateCallback);
clock.AdvanceTime(QuicTime::Delta::FromSeconds(1100));
config_.ValidateClientHello(client_hello, client_address, server_address,
transport_version, &clock, signed_config,
std::move(done_cb));
test_peer_.CheckConfigs({{"a", false}, {"b", true}});
}
TEST_F(CryptoServerConfigsTest, InvalidConfigs) {
SetConfigs({{"a", 800, 1}, {"b", 900, 1}, {"c", 1100, 1}});
test_peer_.CheckConfigs({{"a", false}, {"b", true}, {"c", false}});
SetConfigs({{"a", 800, 1}, {"c", 1100, 1}, {"INVALID1", 1000, 1}});
test_peer_.CheckConfigs({{"a", false}, {"b", true}, {"c", false}});
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_crypto_server_config.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_crypto_server_config_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
c64344e6-ea88-492c-9734-1b829227dfc1 | cpp | tensorflow/tensorflow | autotune_serialize | tensorflow/core/util/autotune_maps/autotune_serialize.cc | tensorflow/core/util/autotune_maps/autotune_serialize_test.cc | #include "tensorflow/core/util/autotune_maps/autotune_serialize.h"
#include <map>
#include <string>
#include <unordered_map>
#include <vector>
#include "xla/status_macros.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/util/activation_mode.h"
#include "tensorflow/core/util/autotune_maps/autotune_map.pb.h"
#include "tensorflow/core/util/autotune_maps/conv_autotune_maps.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
namespace tensorflow {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace {
using stream_executor::dnn::AlgorithmConfig;
using stream_executor::dnn::AlgorithmConfigProto;
using stream_executor::dnn::AlgorithmDesc;
using stream_executor::dnn::AlgorithmProto;
template <typename Op>
StatusOr<ConvMapProto> ConvMapToProto(
const AutotuneMap<ConvParameters, AutotuneEntry<Op>> &autotune_map) {
ConvMapProto proto;
std::map<string, ConvMapProto::Entry> sorted_map;
for (auto const &p : autotune_map.GetMap()) {
const ConvParameters ¶ms = p.first;
const ConvParametersProto ¶ms_proto = params.proto();
VLOG(1) << "Reading: " << params.ToString();
ConvMapProto::Entry kv;
*kv.mutable_key() = params_proto;
if (p.second.is_algorithm_config()) {
*kv.mutable_value() = p.second.GetAlgorithmConfig().ToProto();
} else {
const auto &runners = p.second.GetOpRunners();
*kv.mutable_value()->mutable_algorithm() =
runners.primary->ToAlgorithmDesc().ToProto();
if (runners.no_scratch_fallback) {
*kv.mutable_value()->mutable_algorithm_no_scratch() =
runners.no_scratch_fallback->ToAlgorithmDesc().ToProto();
}
}
std::string serialized_params;
TF_RET_CHECK(
tsl::SerializeToStringDeterministic(params_proto, &serialized_params));
sorted_map.insert(std::make_pair(std::move(serialized_params), kv));
}
for (auto const &p : sorted_map) {
ConvMapProto::Entry *kv = proto.add_kv_pairs();
*kv = p.second;
}
return proto;
}
template <typename Op>
Status PopulateConvMap(
const ConvMapProto &m,
AutotuneMap<ConvParameters, AutotuneEntry<Op>> *autotune_map) {
if (m.kv_pairs().size() == 0) {
return OkStatus();
}
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
se::PlatformManager::PlatformWithName(se::GpuPlatformName()));
std::vector<std::string> device_descs;
for (int i = 0; i < platform->VisibleDeviceCount(); i++) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::DeviceDescription> device_desc,
platform->DescriptionForDevice(i));
device_descs.push_back(device_desc->model_str());
}
std::set<std::string> unmatched_device_descs;
for (const ConvMapProto::Entry &kv : m.kv_pairs()) {
const ConvParametersProto ¶ms_proto = kv.key();
if (params_proto.version() != ConvParameters::kVersion) {
VLOG(1) << "ConvParametersProto with the incompatible version:"
<< params_proto.DebugString();
return errors::Aborted(
"Aborted because the loaded autotune results for convolution "
"operations have a version different "
"from runtime's version. Expected version: ",
ConvParameters::kVersion,
". Actual version: ", params_proto.version());
}
const AlgorithmConfigProto &algorithm_config_proto = kv.value();
const AlgorithmDesc primary(algorithm_config_proto.algorithm());
const absl::optional<AlgorithmDesc> fallback =
algorithm_config_proto.has_algorithm_no_scratch()
? absl::optional<AlgorithmDesc>(
AlgorithmDesc(algorithm_config_proto.algorithm_no_scratch()))
: absl::nullopt;
bool devices_matched = false;
for (int ordinal = 0; ordinal < device_descs.size(); ordinal++) {
const std::string &desc_str = device_descs[ordinal];
if (desc_str != params_proto.device_identifier()) {
continue;
}
devices_matched = true;
AutotuneEntry<Op> entry;
#if TENSORFLOW_USE_ROCM
entry = AutotuneEntry<Op>(AlgorithmConfig(algorithm_config_proto));
#else
entry = AutotuneEntry<Op>(primary, fallback);
#endif
autotune_map->Insert(ConvParameters(ordinal, params_proto), entry);
}
if (!devices_matched) {
unmatched_device_descs.insert(params_proto.device_identifier());
}
}
if (!unmatched_device_descs.empty()) {
LOG(WARNING) << "Unmatched device id's from AoT autotuning data: "
<< str_util::Join(unmatched_device_descs, ", ")
<< "; existing devices: "
<< str_util::Join(device_descs, ", ");
}
return OkStatus();
}
}
#endif
Status SerializeAutotuneMaps(std::string *output) {
AutotuneMapsProto proto;
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TF_ASSIGN_OR_RETURN(*proto.mutable_conv_map(),
ConvMapToProto(*ConvAutotuneMap::GetInstance()));
TF_ASSIGN_OR_RETURN(*proto.mutable_fused_conv_map(),
ConvMapToProto(*FusedConvAutotuneMap::GetInstance()));
#endif
TF_RET_CHECK(tsl::SerializeToStringDeterministic(proto, output));
return absl::OkStatus();
}
Status LoadSerializedAutotuneMaps(absl::string_view s) {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
AutotuneMapsProto proto;
if (!proto.ParseFromString(string(s))) {
return errors::InvalidArgument(
"Failed to parse the autotune maps from string.");
}
TF_RETURN_IF_ERROR(
PopulateConvMap(proto.conv_map(), ConvAutotuneMap::GetInstance()));
TF_RETURN_IF_ERROR(PopulateConvMap(proto.fused_conv_map(),
FusedConvAutotuneMap::GetInstance()));
#endif
return absl::OkStatus();
}
void ResetAutotuneMaps() {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
ConvAutotuneMap::GetInstance()->ClearMap();
FusedConvAutotuneMap::GetInstance()->ClearMap();
#endif
}
} | #include "xla/stream_executor/platform_manager.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "xla/stream_executor/gpu/gpu_init.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/autotune_maps/autotune_serialize.h"
#include "tensorflow/core/util/autotune_maps/conv_autotune_maps.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
namespace {
using stream_executor::dnn::AlgorithmConfig;
using stream_executor::dnn::AlgorithmDesc;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
se::StreamExecutor* GetStreamExec() {
se::Platform* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
CHECK_GT(platform->VisibleDeviceCount(), 0);
return platform->ExecutorForDevice(0).value();
}
TEST(AutotuneSerializeTest, Empty) {
ResetAutotuneMaps();
std::string output;
TF_CHECK_OK(SerializeAutotuneMaps(&output));
TF_CHECK_OK(LoadSerializedAutotuneMaps(output));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 0);
}
TEST(AutotuneSerializeTest, Consistency) {
ResetAutotuneMaps();
ConvParameters conv_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1};
ConvParameters fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kNone,
false},
};
ConvParameters contrib_fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kRelu,
true}};
AlgorithmDesc algorithm(1, true);
AlgorithmDesc algorithm_no_scratch(1, true);
AutotuneEntry<se::dnn::ConvOp> example_a(algorithm, algorithm_no_scratch);
ConvAutotuneMap::GetInstance()->Insert(conv_params_example_a, example_a);
ConvAutotuneMap::GetInstance()->Insert(fused_params_example_a, example_a);
ConvAutotuneMap::GetInstance()->Insert(contrib_fused_params_example_a,
example_a);
std::string serialized_string;
TF_CHECK_OK(SerializeAutotuneMaps(&serialized_string));
ResetAutotuneMaps();
TF_CHECK_OK(LoadSerializedAutotuneMaps(serialized_string));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 3);
AutotuneEntry<se::dnn::ConvOp> entry;
EXPECT_TRUE(
ConvAutotuneMap::GetInstance()->Find(conv_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
EXPECT_TRUE(
ConvAutotuneMap::GetInstance()->Find(fused_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
EXPECT_TRUE(ConvAutotuneMap::GetInstance()->Find(
contrib_fused_params_example_a, &entry));
EXPECT_EQ(entry, example_a);
}
TEST(AutotuneSerializeTest, VersionControl) {
ResetAutotuneMaps();
ConvParameters fused_params_example_a = {
GetStreamExec(),
1,
1,
{{1, 1}},
TensorFormat::FORMAT_NCHW,
1,
{{1, 1}},
{{1, 1}},
{{1, 1}},
{{1, 1}},
DataType::DT_INT8,
1,
ConvParameters::FusionInfo{1.0, 0., 0.,
se::dnn::ActivationMode::kNone,
false},
ConvParameters::kVersion - 1};
AlgorithmDesc algorithm(1, true);
AlgorithmDesc algorithm_no_scratch(1, true);
AlgorithmConfig algorithm_config_example_a(algorithm, 1,
algorithm_no_scratch);
ConvAutotuneMap::GetInstance()->Insert(
fused_params_example_a,
AutotuneEntry<se::dnn::ConvOp>(algorithm_config_example_a));
std::string serialized_string;
TF_CHECK_OK(SerializeAutotuneMaps(&serialized_string));
ResetAutotuneMaps();
EXPECT_THAT(
LoadSerializedAutotuneMaps(serialized_string),
StatusIs(error::ABORTED,
HasSubstr("Aborted because the loaded autotune results")));
EXPECT_EQ(ConvAutotuneMap::GetInstance()->GetMap().size(), 0);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/autotune_serialize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/autotune_serialize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
51490e3e-50c8-4a6d-a209-b93fee91cf59 | cpp | tensorflow/tensorflow | execute_node | tensorflow/core/common_runtime/eager/execute_node.cc | tensorflow/core/common_runtime/eager/execute_node_test.cc | #include "tensorflow/core/common_runtime/eager/execute_node.h"
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
#if !defined(IS_MOBILE_PLATFORM)
bool ExecuteNodeArgs::IsRemote(EagerContext* ctx, Device* input_device,
TensorHandle* handle) {
uint64 context_view_id = ctx->GetContextViewId();
if (handle->Type() == TensorHandle::REMOTE ||
handle->HasRemoteMirror(input_device, context_view_id)) {
if (!has_remote_inputs_) {
has_remote_inputs_ = true;
}
return true;
}
return false;
}
#endif
Status ExecuteNodeArgs::InitPackedHandle(const int index, EagerContext* ctx,
Device* input_device,
TensorHandle* packed_handle) {
int num_handles = packed_handle->NumPackedHandles();
packed_args_.emplace(index,
absl::InlinedVector<TensorValue, 4UL>(num_handles));
TensorValue* packed_arg_flat = &(packed_args_[index][0]);
for (int i = 0; i < num_handles; ++i) {
TensorHandle* h = nullptr;
TF_RETURN_IF_ERROR(packed_handle->ExtractPackedHandle(i, &h));
const Status status = h->TensorValue(h->device(), &packed_arg_flat[i]);
if (!status.ok()) {
#if !defined(IS_MOBILE_PLATFORM)
if (IsRemote(ctx, input_device, h)) {
continue;
}
#endif
if (h->Type() == TensorHandle::PACKED) {
return errors::InvalidArgument(
"Nested packed handles are not supported");
}
return status;
}
}
return absl::OkStatus();
}
Status ExecuteNodeArgs::Init(
EagerContext* ctx, const absl::InlinedVector<TensorHandle*, 4UL>& op_inputs,
const core::RefCountPtr<KernelAndDevice>& kernel) {
const int n_inputs = op_inputs.size();
if (n_inputs > 0) {
TensorHandle* const* op_inputs_flat = &op_inputs[0];
TensorValue* tensor_args_flat = &tensor_args_[0];
for (int i = 0; i < n_inputs; ++i) {
TensorHandle* in = op_inputs_flat[i];
Device* d = kernel->InputDevice(i);
Status s = in->TensorValue(ctx->CanonicalDevice(d), &tensor_args_flat[i]);
if (!s.ok()) {
#if !defined(IS_MOBILE_PLATFORM)
if (IsRemote(ctx, d, in)) {
continue;
}
#endif
if (in->Type() != TensorHandle::PACKED) {
return s;
}
if (!has_packed_inputs_) {
has_packed_inputs_ = true;
}
TF_RETURN_IF_ERROR(InitPackedHandle(i, ctx, d, in));
}
}
}
#if !defined(IS_MOBILE_PLATFORM)
if (has_remote_inputs_) {
const bool is_function = kernel->IsFunction();
serialize_remote_handle_ =
[ctx, &op_inputs, is_function](
const FunctionArgIndex& index,
eager::RemoteTensorHandle* handle) -> Status {
TensorHandle* h = op_inputs[index.index];
if (op_inputs[index.index]->Type() == TensorHandle::PACKED) {
TF_RETURN_IF_ERROR(
op_inputs[index.index]->ExtractPackedHandle(index.sub_index, &h));
}
Device* device = h->device();
bool wait_until_ready = SkipRemoteHandleWaitReady() ? false : is_function;
return ctx->RemoteMgr()->SerializeRemoteTensorHandle(h, wait_until_ready,
handle, device);
};
}
#endif
return absl::OkStatus();
}
Status ExecuteNodeArgs::GetLocalArg(const FunctionArgIndex& index,
Tensor* val) const {
Status s = EagerKernelArgs::GetLocalArg(index, val);
if (s.ok()) {
return absl::OkStatus();
}
if (packed_args_.contains(index.index)) {
Tensor* arg = packed_args_.at(index.index).at(index.sub_index).tensor;
if (arg) {
*val = *arg;
return absl::OkStatus();
} else {
return errors::NotFound("Argument (", index.index, ",", index.sub_index,
") has no local tensor.");
}
} else {
return s;
}
}
} | #include "tensorflow/core/common_runtime/eager/execute_node.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/common_runtime/eager/kernel_and_device.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class TestKernelAndDeviceFunc final : public KernelAndDeviceFunc {
public:
TestKernelAndDeviceFunc(std::vector<Device*> input_devices,
Device* host_cpu_device)
: KernelAndDeviceFunc(
nullptr, nullptr, {},
{}, {},
nullptr, nullptr,
host_cpu_device, "", false,
false,
false,
true,
false,
std::nullopt,
false,
Rendezvous::Factory(),
nullptr),
test_input_devices_(std::move(input_devices)) {}
Device* InputDevice(int i) const override { return test_input_devices_[i]; }
private:
std::vector<Device*> test_input_devices_;
};
TEST(ExecuteNodeTest, ExecuteNodeArgs) {
StaticDeviceMgr device_mgr(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
Device* device0 = device_mgr.ListDevices().at(0);
auto remote_device_mgr = std::make_unique<DynamicDeviceMgr>();
std::vector<std::unique_ptr<Device>> remote_devices;
remote_devices.emplace_back(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:1"));
TF_ASSERT_OK(remote_device_mgr->AddDevices(std::move(remote_devices)));
Device* device1 = remote_device_mgr->ListDevices().at(0);
Status s;
std::unique_ptr<CompositeDevice> composite_device =
CompositeDevice::MakeDevice({device0->name(), device1->name()},
0,
device_mgr.HostCPU()->parsed_name(), &s);
TF_ASSERT_OK(s);
auto ctx = new EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT, false,
&device_mgr, false, nullptr, nullptr, nullptr,
true);
auto remote_mgr = std::make_unique<eager::RemoteMgr>(
true, ctx);
TF_ASSERT_OK(ctx->InitializeRemoteMaster(
nullptr, nullptr,
nullptr, nullptr,
std::move(remote_device_mgr), {},
EagerContext::NewContextId(),
nullptr, &device_mgr, 600,
nullptr, std::move(remote_mgr)));
DataType dtype = DT_FLOAT;
Tensor t0(dtype, TensorShape({}));
t0.scalar<float>()() = {1.0f};
TensorHandle* h0 =
TensorHandle::CreateLocalHandle(std::move(t0), device0, device0, ctx);
Tensor t1(dtype, TensorShape({}));
t1.scalar<float>()() = {2.0f};
TensorHandle* h1 =
TensorHandle::CreateLocalHandle(std::move(t1), device0, device0, ctx);
TensorHandle* h2 = TensorHandle::CreateLazyRemoteHandle(
1, 0, dtype, device1, true, ctx);
TensorHandle* h3 = TensorHandle::CreateLazyRemoteHandle(
2, 1, dtype, device1, true, ctx);
TensorHandle* packed_h = nullptr;
TF_ASSERT_OK(TensorHandle::CreatePackedHandle({h1, h2}, ctx, &packed_h));
absl::InlinedVector<TensorHandle*, 4> inputs = {h0, packed_h, h3};
std::vector<Device*> input_devices;
for (auto* h : inputs) {
input_devices.push_back(h->DeviceOrHostCPU(*ctx));
}
const core::RefCountPtr<KernelAndDevice> kernel(
new TestKernelAndDeviceFunc(std::move(input_devices), device0));
ExecuteNodeArgs args(inputs.size());
TF_EXPECT_OK(args.Init(ctx, inputs, kernel));
EXPECT_TRUE(args.HasRemoteOrPackedInputs());
Tensor local0;
TF_EXPECT_OK(args.GetLocalArg(FunctionArgIndex(0), &local0));
EXPECT_EQ(local0.flat<float>().size(), 1);
EXPECT_EQ(local0.flat<float>()(0), 1.0);
Tensor local1;
TF_EXPECT_OK(args.GetLocalArg(FunctionArgIndex(1, 0), &local1));
EXPECT_EQ(local1.flat<float>().size(), 1);
EXPECT_EQ(local1.flat<float>()(0), 2.0);
eager::RemoteTensorHandle remote0;
TF_EXPECT_OK(args.GetRemoteArg(FunctionArgIndex(1, 1), &remote0));
EXPECT_EQ(remote0.op_id(), 1);
EXPECT_EQ(remote0.output_num(), 0);
eager::RemoteTensorHandle remote1;
TF_EXPECT_OK(args.GetRemoteArg(FunctionArgIndex(2), &remote1));
EXPECT_EQ(remote1.op_id(), 2);
EXPECT_EQ(remote1.output_num(), 1);
h0->Unref();
h1->Unref();
h2->Unref();
h3->Unref();
packed_h->Unref();
ctx->Unref();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/execute_node.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/execute_node_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
957c48a5-fd83-42db-b8cb-de8f81d5dac2 | cpp | tensorflow/tensorflow | compiler | tensorflow/lite/delegates/gpu/gl/compiler.cc | third_party/xla/xla/service/compiler_test.cc | #include "tensorflow/lite/delegates/gpu/gl/compiler.h"
#include <algorithm>
#include <any>
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_inline.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_inplace.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.h"
#include "tensorflow/lite/delegates/gpu/gl/float16_conversions.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif
namespace tflite {
namespace gpu {
namespace gl {
namespace {
struct ExceedSizeChecker {
bool operator()(uint32_t v) const { return v > max_size.x; }
bool operator()(const uint2& v) const {
return v.x > max_size.x || v.y > max_size.y;
}
bool operator()(const uint3& v) const {
return v.x > max_size.x || v.y > max_size.y || v.z > max_z_size;
}
int2 max_size;
int max_z_size;
};
bool ExceedsMaxSize(const Object& object, const GpuInfo& gpu_info) {
ExceedSizeChecker size_checker;
size_checker.max_size =
int2(gpu_info.GetMaxImage2DWidth(), gpu_info.GetMaxImage2DHeight());
size_checker.max_z_size = gpu_info.GetMaxImage2DArrayLayers();
return std::visit(size_checker, object.size);
}
ObjectType ChooseFastestObjectType(const GpuInfo& gpu_info) {
return gpu_info.IsAdreno() ? ObjectType::TEXTURE : ObjectType::BUFFER;
}
ObjectType ChooseFastestRefObjectType(const GpuInfo& gpu_info,
const CompilationOptions& options) {
if (!gpu_info.IsAdreno()) {
return ObjectType::BUFFER;
}
if (gpu_info.adreno_info.adreno_gpu == AdrenoGpu::kAdreno630) {
return ObjectType::TEXTURE;
} else {
return options.allow_precision_loss ? ObjectType::TEXTURE
: ObjectType::BUFFER;
}
}
class CompilerImpl : public Compiler {
public:
CompilerImpl(const NodeShader* node_shader, const GpuInfo* gpu_info,
const CompilationOptions& options)
: node_shader_(*node_shader), gpu_info_(*gpu_info), options_(options) {
if (options_.preferred_obj_type == ObjectType::UNKNOWN) {
options_.preferred_obj_type = ChooseFastestObjectType(*gpu_info);
}
if (options_.ref_obj_type == ObjectType::UNKNOWN) {
options_.ref_obj_type = ChooseFastestRefObjectType(*gpu_info, options);
}
#ifdef __ANDROID__
if (gpu_info_.IsAdreno() &&
gpu_info_.adreno_info.adreno_gpu == AdrenoGpu::kAdreno660) {
char sdk_version[PROP_VALUE_MAX];
__system_property_get("ro.build.version.sdk", sdk_version);
if (!strcmp(sdk_version, "30")) options_.allow_precision_loss = false;
}
#endif
}
absl::Status Compile(
const GraphFloat32& graph,
const std::unordered_set<int>& tflite_graph_io,
const ShaderCodeCallback& callback) final {
RETURN_IF_ERROR(graph.MakeExactCopy(&compiled_graph_));
if (options_.dynamic_batch) {
for (auto value : compiled_graph_.values()) {
value->tensor.shape.b = 1;
}
}
for (auto node : compiled_graph_.nodes()) {
CompiledNodeAttributes attr;
attr.node_indices.push_back(node->id);
NodeShader::GenerationContext ctx = {&gpu_info_, options_,
node->operation.type,
node->operation.attributes};
for (const auto& tensor : graph.FindInputs(node->id)) {
const auto& shape = tensor->tensor.shape;
ctx.input_shapes.push_back({shape.b, shape.h, shape.w, shape.c});
}
for (const auto& tensor : graph.FindOutputs(node->id)) {
const auto& shape = tensor->tensor.shape;
ctx.output_shapes.push_back({shape.b, shape.h, shape.w, shape.c});
}
RETURN_IF_ERROR(node_shader_.GenerateCode(ctx, &attr.code));
node->operation.attributes = std::move(attr);
}
ModelTransformer transformer(&compiled_graph_);
if (options_.fuse_operations) {
FuseAutoOutputWithInline fuse_inline;
if (!transformer.Apply("fuse_auto_with_inline", &fuse_inline)) {
return absl::InternalError("fuse_auto_with_inline failed");
}
FuseInplaceUpdate fuse_inplace;
if (!transformer.Apply("fuse_inplace_update", &fuse_inplace)) {
return absl::InternalError("fuse_inplace failed");
}
if (options_.auto_input_fusion) {
FuseAutoInput fuse_auto_input;
if (!transformer.Apply("fuse_auto_input", &fuse_auto_input)) {
return absl::InternalError("fuse_auto_input failed");
}
}
}
RemoveUnusedInplaceUpdates remove_inplace_updates;
if (!transformer.Apply("remove_inplace_updates", &remove_inplace_updates)) {
return absl::InternalError("remove_inplace_updates failed");
}
absl::flat_hash_map<ValueId, Object> objects;
for (auto value : compiled_graph_.values()) {
Object object = MakePHWC4Ref(value->id, value->tensor.shape);
object.data_type = value->tensor.type;
const bool is_external =
graph.IsGraphInput(value->id) || graph.IsGraphOutput(value->id) ||
tflite_graph_io.find(value->tensor.ref) != tflite_graph_io.end();
if (is_external) {
object.object_type = ObjectType::BUFFER;
} else if (options_.allow_precision_loss) {
MaybeConvertToFloat16(&object);
}
objects[value->id] = std::move(object);
}
for (auto node : compiled_graph_.nodes()) {
auto& attr =
std::any_cast<CompiledNodeAttributes&>(node->operation.attributes);
if (attr.code.workload == uint3()) {
auto outputs = compiled_graph_.FindOutputs(node->id);
auto shape = outputs[0]->tensor.shape;
for (auto output : outputs) {
if (shape != output->tensor.shape) {
return absl::FailedPreconditionError(
"Workload uint3() requires all output sizes to match");
}
}
attr.code.workload = uint3(shape.w, shape.h, DivideRoundUp(shape.c, 4));
}
int num_textures = 0;
auto set_object_type = [&](Object* object) {
if (object->object_type == ObjectType::BUFFER) {
return;
}
bool is_ref = IsRef(*object);
if (num_textures < gpu_info_.GetMaxImageArguments() &&
!ExceedsMaxSize(*object, gpu_info_) &&
(object->object_type == ObjectType::TEXTURE ||
(is_ref && options_.ref_obj_type == ObjectType::TEXTURE) ||
(!is_ref && options_.preferred_obj_type == ObjectType::TEXTURE))) {
object->object_type = ObjectType::TEXTURE;
num_textures++;
} else {
object->object_type = ObjectType::BUFFER;
}
};
for (auto& object : attr.code.objects) {
if (options_.allow_precision_loss) {
MaybeConvertToFloat16(&object.second);
}
set_object_type(&object.second);
}
for (auto ref : compiled_graph_.FindInputs(node->id)) {
set_object_type(&objects[ref->id]);
}
for (auto ref : compiled_graph_.FindOutputs(node->id)) {
set_object_type(&objects[ref->id]);
}
}
ShaderCodegen codegen(options_, gpu_info_);
for (auto node : compiled_graph_.nodes()) {
auto& attr =
std::any_cast<CompiledNodeAttributes&>(node->operation.attributes);
if (attr.code.source_code.empty()) {
continue;
}
for (auto ref : compiled_graph_.FindInputs(node->id)) {
auto object = objects[ref->id];
object.access = AccessType::READ;
attr.inputs.push_back(object);
}
for (auto ref : compiled_graph_.FindOutputs(node->id)) {
auto object = objects[ref->id];
object.access = AccessType::WRITE;
attr.outputs.push_back(object);
}
uint32_t binding = 0;
auto set_binding = [&](ObjectType type, Object& object) {
if (object.object_type == type) {
object.binding = binding++;
}
};
for (auto& object : attr.inputs) {
set_binding(ObjectType::TEXTURE, object);
}
for (auto& object : attr.outputs) {
set_binding(ObjectType::TEXTURE, object);
}
for (auto& object : attr.code.objects) {
set_binding(ObjectType::TEXTURE, object.second);
}
for (auto& object : attr.inputs) {
set_binding(ObjectType::BUFFER, object);
}
for (auto& object : attr.outputs) {
set_binding(ObjectType::BUFFER, object);
}
for (auto& object : attr.code.objects) {
set_binding(ObjectType::BUFFER, object.second);
}
ShaderCode shader_code;
RETURN_IF_ERROR(codegen.Build(std::move(attr), &shader_code));
RETURN_IF_ERROR(callback(std::move(shader_code)));
}
return absl::OkStatus();
}
private:
const NodeShader& node_shader_;
const GpuInfo& gpu_info_;
CompilationOptions options_;
GraphFloat32 compiled_graph_;
};
}
std::unique_ptr<Compiler> NewCompiler(const NodeShader* node_shader,
const GpuInfo* gpu_info,
const CompilationOptions& options) {
return std::make_unique<CompilerImpl>(node_shader, gpu_info, options);
}
}
}
} | #include "xla/service/compiler.h"
#include <gtest/gtest.h>
#include "xla/autotune_results.pb.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/test_macros.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(TargetConfigTest, DISABLED_ON_CPU(ExecutorConstructorFillsAllFields)) {
TF_ASSERT_OK(stream_executor::ValidateGPUMachineManager());
TF_ASSERT_OK_AND_ASSIGN(
stream_executor::StreamExecutor * executor,
stream_executor::GPUMachineManager()->ExecutorForDevice(0));
Compiler::TargetConfig config(executor);
stream_executor::GpuTargetConfigProto target = config.ToProto();
EXPECT_GT(target.dnn_version_info().major(), 0) << target.DebugString();
EXPECT_GT(target.gpu_device_info().threads_per_block_limit(), 0)
<< target.DebugString();
EXPECT_NE(target.device_description_str(), "") << target.DebugString();
EXPECT_NE(target.platform_name(), "") << target.DebugString();
EXPECT_EQ(target.autotune_results().version(), 0);
EXPECT_EQ(5,
stream_executor::GpuTargetConfigProto::descriptor()->field_count())
<< "Make sure all the fields in GpuTargetConfigProto are set and "
"validated!";
}
TEST(TargetConfigTest, ProtoConstructorFillsAllFields) {
stream_executor::GpuTargetConfigProto config_proto;
config_proto.set_platform_name("platform");
config_proto.mutable_dnn_version_info()->set_major(2);
config_proto.mutable_gpu_device_info()->set_threads_per_block_limit(5);
config_proto.set_device_description_str("foo");
Compiler::TargetConfig config(config_proto);
stream_executor::GpuTargetConfigProto target = config.ToProto();
EXPECT_EQ(target.dnn_version_info().major(),
config_proto.dnn_version_info().major())
<< target.DebugString();
EXPECT_EQ(target.gpu_device_info().threads_per_block_limit(), 5)
<< target.DebugString();
EXPECT_EQ(target.device_description_str(), "foo") << target.DebugString();
EXPECT_EQ(target.platform_name(), "platform") << target.DebugString();
EXPECT_EQ(target.autotune_results().version(), 0);
EXPECT_EQ(5,
stream_executor::GpuTargetConfigProto::descriptor()->field_count())
<< "Make sure all the fields in GpuTargetConfigProto are set and "
"validated!";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b804e38d-6c4c-4a92-943d-a3e71a5cf3d9 | cpp | google/cel-cpp | number | internal/number.h | internal/number_test.cc | #ifndef THIRD_PARTY_CEL_CPP_INTERNAL_NUMBER_H_
#define THIRD_PARTY_CEL_CPP_INTERNAL_NUMBER_H_
#include <cmath>
#include <cstdint>
#include <limits>
#include "absl/types/variant.h"
namespace cel::internal {
constexpr int64_t kInt64Max = std::numeric_limits<int64_t>::max();
constexpr int64_t kInt64Min = std::numeric_limits<int64_t>::lowest();
constexpr uint64_t kUint64Max = std::numeric_limits<uint64_t>::max();
constexpr uint64_t kUintToIntMax = static_cast<uint64_t>(kInt64Max);
constexpr double kDoubleToIntMax = static_cast<double>(kInt64Max);
constexpr double kDoubleToIntMin = static_cast<double>(kInt64Min);
constexpr double kDoubleToUintMax = static_cast<double>(kUint64Max);
template <typename T>
constexpr int RoundingError() {
return 1 << (std::numeric_limits<T>::digits -
std::numeric_limits<double>::digits - 1);
}
constexpr double kMaxDoubleRepresentableAsInt =
static_cast<double>(kInt64Max - RoundingError<int64_t>());
constexpr double kMaxDoubleRepresentableAsUint =
static_cast<double>(kUint64Max - RoundingError<uint64_t>());
#define CEL_ABSL_VISIT_CONSTEXPR
using NumberVariant = absl::variant<double, uint64_t, int64_t>;
enum class ComparisonResult {
kLesser,
kEqual,
kGreater,
kNanInequal
};
constexpr ComparisonResult Invert(ComparisonResult result) {
switch (result) {
case ComparisonResult::kLesser:
return ComparisonResult::kGreater;
case ComparisonResult::kGreater:
return ComparisonResult::kLesser;
case ComparisonResult::kEqual:
return ComparisonResult::kEqual;
case ComparisonResult::kNanInequal:
return ComparisonResult::kNanInequal;
}
}
template <typename OutType>
struct ConversionVisitor {
template <typename InType>
constexpr OutType operator()(InType v) {
return static_cast<OutType>(v);
}
};
template <typename T>
constexpr ComparisonResult Compare(T a, T b) {
return (a > b) ? ComparisonResult::kGreater
: (a == b) ? ComparisonResult::kEqual
: ComparisonResult::kLesser;
}
constexpr ComparisonResult DoubleCompare(double a, double b) {
if (!(a == a) || !(b == b)) {
return ComparisonResult::kNanInequal;
}
return Compare(a, b);
}
struct DoubleCompareVisitor {
constexpr explicit DoubleCompareVisitor(double v) : v(v) {}
constexpr ComparisonResult operator()(double other) const {
return DoubleCompare(v, other);
}
constexpr ComparisonResult operator()(uint64_t other) const {
if (v > kDoubleToUintMax) {
return ComparisonResult::kGreater;
} else if (v < 0) {
return ComparisonResult::kLesser;
} else {
return DoubleCompare(v, static_cast<double>(other));
}
}
constexpr ComparisonResult operator()(int64_t other) const {
if (v > kDoubleToIntMax) {
return ComparisonResult::kGreater;
} else if (v < kDoubleToIntMin) {
return ComparisonResult::kLesser;
} else {
return DoubleCompare(v, static_cast<double>(other));
}
}
double v;
};
struct UintCompareVisitor {
constexpr explicit UintCompareVisitor(uint64_t v) : v(v) {}
constexpr ComparisonResult operator()(double other) const {
return Invert(DoubleCompareVisitor(other)(v));
}
constexpr ComparisonResult operator()(uint64_t other) const {
return Compare(v, other);
}
constexpr ComparisonResult operator()(int64_t other) const {
if (v > kUintToIntMax || other < 0) {
return ComparisonResult::kGreater;
} else {
return Compare(v, static_cast<uint64_t>(other));
}
}
uint64_t v;
};
struct IntCompareVisitor {
constexpr explicit IntCompareVisitor(int64_t v) : v(v) {}
constexpr ComparisonResult operator()(double other) {
return Invert(DoubleCompareVisitor(other)(v));
}
constexpr ComparisonResult operator()(uint64_t other) {
return Invert(UintCompareVisitor(other)(v));
}
constexpr ComparisonResult operator()(int64_t other) {
return Compare(v, other);
}
int64_t v;
};
struct CompareVisitor {
explicit constexpr CompareVisitor(NumberVariant rhs) : rhs(rhs) {}
CEL_ABSL_VISIT_CONSTEXPR ComparisonResult operator()(double v) {
return absl::visit(DoubleCompareVisitor(v), rhs);
}
CEL_ABSL_VISIT_CONSTEXPR ComparisonResult operator()(uint64_t v) {
return absl::visit(UintCompareVisitor(v), rhs);
}
CEL_ABSL_VISIT_CONSTEXPR ComparisonResult operator()(int64_t v) {
return absl::visit(IntCompareVisitor(v), rhs);
}
NumberVariant rhs;
};
struct LosslessConvertibleToIntVisitor {
constexpr bool operator()(double value) const {
return value >= kDoubleToIntMin && value <= kMaxDoubleRepresentableAsInt &&
value == static_cast<double>(static_cast<int64_t>(value));
}
constexpr bool operator()(uint64_t value) const {
return value <= kUintToIntMax;
}
constexpr bool operator()(int64_t value) const { return true; }
};
struct LosslessConvertibleToUintVisitor {
constexpr bool operator()(double value) const {
return value >= 0 && value <= kMaxDoubleRepresentableAsUint &&
value == static_cast<double>(static_cast<uint64_t>(value));
}
constexpr bool operator()(uint64_t value) const { return true; }
constexpr bool operator()(int64_t value) const { return value >= 0; }
};
class Number {
public:
static constexpr Number FromInt64(int64_t value) { return Number(value); }
static constexpr Number FromUint64(uint64_t value) { return Number(value); }
static constexpr Number FromDouble(double value) { return Number(value); }
constexpr explicit Number(double double_value) : value_(double_value) {}
constexpr explicit Number(int64_t int_value) : value_(int_value) {}
constexpr explicit Number(uint64_t uint_value) : value_(uint_value) {}
CEL_ABSL_VISIT_CONSTEXPR double AsDouble() const {
return absl::visit(internal::ConversionVisitor<double>(), value_);
}
CEL_ABSL_VISIT_CONSTEXPR int64_t AsInt() const {
return absl::visit(internal::ConversionVisitor<int64_t>(), value_);
}
CEL_ABSL_VISIT_CONSTEXPR uint64_t AsUint() const {
return absl::visit(internal::ConversionVisitor<uint64_t>(), value_);
}
CEL_ABSL_VISIT_CONSTEXPR bool LosslessConvertibleToInt() const {
return absl::visit(internal::LosslessConvertibleToIntVisitor(), value_);
}
CEL_ABSL_VISIT_CONSTEXPR bool LosslessConvertibleToUint() const {
return absl::visit(internal::LosslessConvertibleToUintVisitor(), value_);
}
CEL_ABSL_VISIT_CONSTEXPR bool operator<(Number other) const {
return Compare(other) == internal::ComparisonResult::kLesser;
}
CEL_ABSL_VISIT_CONSTEXPR bool operator<=(Number other) const {
internal::ComparisonResult cmp = Compare(other);
return cmp != internal::ComparisonResult::kGreater &&
cmp != internal::ComparisonResult::kNanInequal;
}
CEL_ABSL_VISIT_CONSTEXPR bool operator>(Number other) const {
return Compare(other) == internal::ComparisonResult::kGreater;
}
CEL_ABSL_VISIT_CONSTEXPR bool operator>=(Number other) const {
internal::ComparisonResult cmp = Compare(other);
return cmp != internal::ComparisonResult::kLesser &&
cmp != internal::ComparisonResult::kNanInequal;
}
CEL_ABSL_VISIT_CONSTEXPR bool operator==(Number other) const {
return Compare(other) == internal::ComparisonResult::kEqual;
}
CEL_ABSL_VISIT_CONSTEXPR bool operator!=(Number other) const {
return Compare(other) != internal::ComparisonResult::kEqual;
}
template <typename T, typename Op>
T visit(Op&& op) const {
return absl::visit(std::forward<Op>(op), value_);
}
private:
internal::NumberVariant value_;
CEL_ABSL_VISIT_CONSTEXPR internal::ComparisonResult Compare(
Number other) const {
return absl::visit(internal::CompareVisitor(other.value_), value_);
}
};
}
#endif | #include "internal/number.h"
#include <cstdint>
#include <limits>
#include "internal/testing.h"
namespace cel::internal {
namespace {
constexpr double kNan = std::numeric_limits<double>::quiet_NaN();
constexpr double kInfinity = std::numeric_limits<double>::infinity();
TEST(Number, Basic) {
EXPECT_GT(Number(1.1), Number::FromInt64(1));
EXPECT_LT(Number::FromUint64(1), Number(1.1));
EXPECT_EQ(Number(1.1), Number(1.1));
EXPECT_EQ(Number::FromUint64(1), Number::FromUint64(1));
EXPECT_EQ(Number::FromInt64(1), Number::FromUint64(1));
EXPECT_GT(Number::FromUint64(1), Number::FromInt64(-1));
EXPECT_EQ(Number::FromInt64(-1), Number::FromInt64(-1));
}
TEST(Number, Conversions) {
EXPECT_TRUE(Number::FromDouble(1.0).LosslessConvertibleToInt());
EXPECT_TRUE(Number::FromDouble(1.0).LosslessConvertibleToUint());
EXPECT_FALSE(Number::FromDouble(1.1).LosslessConvertibleToInt());
EXPECT_FALSE(Number::FromDouble(1.1).LosslessConvertibleToUint());
EXPECT_TRUE(Number::FromDouble(-1.0).LosslessConvertibleToInt());
EXPECT_FALSE(Number::FromDouble(-1.0).LosslessConvertibleToUint());
EXPECT_TRUE(Number::FromDouble(kDoubleToIntMin).LosslessConvertibleToInt());
EXPECT_FALSE(Number::FromDouble(kMaxDoubleRepresentableAsUint +
RoundingError<uint64_t>())
.LosslessConvertibleToUint());
EXPECT_FALSE(Number::FromDouble(kMaxDoubleRepresentableAsInt +
RoundingError<int64_t>())
.LosslessConvertibleToInt());
EXPECT_FALSE(
Number::FromDouble(kDoubleToIntMin - 1025).LosslessConvertibleToInt());
EXPECT_EQ(Number::FromInt64(1).AsUint(), 1u);
EXPECT_EQ(Number::FromUint64(1).AsInt(), 1);
EXPECT_EQ(Number::FromDouble(1.0).AsUint(), 1);
EXPECT_EQ(Number::FromDouble(1.0).AsInt(), 1);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/number.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/number_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3b0b5c9a-b9f3-4305-a773-0ced3e5c0dfb | cpp | google/cel-cpp | struct_value | common/values/struct_value.cc | common/values/struct_value_test.cc | #include <cstddef>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/casting.h"
#include "common/optional_ref.h"
#include "common/type.h"
#include "common/value.h"
#include "internal/status_macros.h"
namespace cel {
StructType StructValue::GetRuntimeType() const {
AssertIsValid();
return absl::visit(
[](const auto& alternative) -> StructType {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
ABSL_UNREACHABLE();
} else {
return alternative.GetRuntimeType();
}
},
variant_);
}
absl::string_view StructValue::GetTypeName() const {
AssertIsValid();
return absl::visit(
[](const auto& alternative) -> absl::string_view {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
return absl::string_view{};
} else {
return alternative.GetTypeName();
}
},
variant_);
}
std::string StructValue::DebugString() const {
AssertIsValid();
return absl::visit(
[](const auto& alternative) -> std::string {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
return std::string{};
} else {
return alternative.DebugString();
}
},
variant_);
}
absl::Status StructValue::SerializeTo(AnyToJsonConverter& converter,
absl::Cord& value) const {
AssertIsValid();
return absl::visit(
[&converter, &value](const auto& alternative) -> absl::Status {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.SerializeTo(converter, value);
}
},
variant_);
}
absl::StatusOr<Json> StructValue::ConvertToJson(
AnyToJsonConverter& converter) const {
AssertIsValid();
return absl::visit(
[&converter](const auto& alternative) -> absl::StatusOr<Json> {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.ConvertToJson(converter);
}
},
variant_);
}
bool StructValue::IsZeroValue() const {
AssertIsValid();
return absl::visit(
[](const auto& alternative) -> bool {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
return false;
} else {
return alternative.IsZeroValue();
}
},
variant_);
}
absl::StatusOr<bool> StructValue::HasFieldByName(absl::string_view name) const {
AssertIsValid();
return absl::visit(
[name](const auto& alternative) -> absl::StatusOr<bool> {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.HasFieldByName(name);
}
},
variant_);
}
absl::StatusOr<bool> StructValue::HasFieldByNumber(int64_t number) const {
AssertIsValid();
return absl::visit(
[number](const auto& alternative) -> absl::StatusOr<bool> {
if constexpr (std::is_same_v<
absl::monostate,
absl::remove_cvref_t<decltype(alternative)>>) {
return absl::InternalError("use of invalid StructValue");
} else {
return alternative.HasFieldByNumber(number);
}
},
variant_);
}
namespace common_internal {
absl::Status StructValueEqual(ValueManager& value_manager,
const StructValue& lhs, const StructValue& rhs,
Value& result) {
if (lhs.GetTypeName() != rhs.GetTypeName()) {
result = BoolValue{false};
return absl::OkStatus();
}
absl::flat_hash_map<std::string, Value> lhs_fields;
CEL_RETURN_IF_ERROR(lhs.ForEachField(
value_manager,
[&lhs_fields](absl::string_view name,
const Value& lhs_value) -> absl::StatusOr<bool> {
lhs_fields.insert_or_assign(std::string(name), Value(lhs_value));
return true;
}));
bool equal = true;
size_t rhs_fields_count = 0;
CEL_RETURN_IF_ERROR(rhs.ForEachField(
value_manager,
[&value_manager, &result, &lhs_fields, &equal, &rhs_fields_count](
absl::string_view name,
const Value& rhs_value) -> absl::StatusOr<bool> {
auto lhs_field = lhs_fields.find(name);
if (lhs_field == lhs_fields.end()) {
equal = false;
return false;
}
CEL_RETURN_IF_ERROR(
lhs_field->second.Equal(value_manager, rhs_value, result));
if (auto bool_value = As<BoolValue>(result);
bool_value.has_value() && !bool_value->NativeValue()) {
equal = false;
return false;
}
++rhs_fields_count;
return true;
}));
if (!equal || rhs_fields_count != lhs_fields.size()) {
result = BoolValue{false};
return absl::OkStatus();
}
result = BoolValue{true};
return absl::OkStatus();
}
absl::Status StructValueEqual(ValueManager& value_manager,
const ParsedStructValueInterface& lhs,
const StructValue& rhs, Value& result) {
if (lhs.GetTypeName() != rhs.GetTypeName()) {
result = BoolValue{false};
return absl::OkStatus();
}
absl::flat_hash_map<std::string, Value> lhs_fields;
CEL_RETURN_IF_ERROR(lhs.ForEachField(
value_manager,
[&lhs_fields](absl::string_view name,
const Value& lhs_value) -> absl::StatusOr<bool> {
lhs_fields.insert_or_assign(std::string(name), Value(lhs_value));
return true;
}));
bool equal = true;
size_t rhs_fields_count = 0;
CEL_RETURN_IF_ERROR(rhs.ForEachField(
value_manager,
[&value_manager, &result, &lhs_fields, &equal, &rhs_fields_count](
absl::string_view name,
const Value& rhs_value) -> absl::StatusOr<bool> {
auto lhs_field = lhs_fields.find(name);
if (lhs_field == lhs_fields.end()) {
equal = false;
return false;
}
CEL_RETURN_IF_ERROR(
lhs_field->second.Equal(value_manager, rhs_value, result));
if (auto bool_value = As<BoolValue>(result);
bool_value.has_value() && !bool_value->NativeValue()) {
equal = false;
return false;
}
++rhs_fields_count;
return true;
}));
if (!equal || rhs_fields_count != lhs_fields.size()) {
result = BoolValue{false};
return absl::OkStatus();
}
result = BoolValue{true};
return absl::OkStatus();
}
}
absl::optional<MessageValue> StructValue::AsMessage() & {
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<MessageValue> StructValue::AsMessage() const& {
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<MessageValue> StructValue::AsMessage() && {
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<MessageValue> StructValue::AsMessage() const&& {
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
optional_ref<const ParsedMessageValue> StructValue::AsParsedMessage() & {
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
optional_ref<const ParsedMessageValue> StructValue::AsParsedMessage() const& {
if (const auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return *alternative;
}
return absl::nullopt;
}
absl::optional<ParsedMessageValue> StructValue::AsParsedMessage() && {
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
absl::optional<ParsedMessageValue> StructValue::AsParsedMessage() const&& {
if (auto* alternative = absl::get_if<ParsedMessageValue>(&variant_);
alternative != nullptr) {
return std::move(*alternative);
}
return absl::nullopt;
}
StructValue::operator MessageValue() & {
ABSL_DCHECK(IsMessage()) << *this;
return absl::get<ParsedMessageValue>(variant_);
}
StructValue::operator MessageValue() const& {
ABSL_DCHECK(IsMessage()) << *this;
return absl::get<ParsedMessageValue>(variant_);
}
StructValue::operator MessageValue() && {
ABSL_DCHECK(IsMessage()) << *this;
return absl::get<ParsedMessageValue>(std::move(variant_));
}
StructValue::operator MessageValue() const&& {
ABSL_DCHECK(IsMessage()) << *this;
return absl::get<ParsedMessageValue>(std::move(variant_));
}
StructValue::operator const ParsedMessageValue&() & {
ABSL_DCHECK(IsParsedMessage()) << *this;
return absl::get<ParsedMessageValue>(variant_);
}
StructValue::operator const ParsedMessageValue&() const& {
ABSL_DCHECK(IsParsedMessage()) << *this;
return absl::get<ParsedMessageValue>(variant_);
}
StructValue::operator ParsedMessageValue() && {
ABSL_DCHECK(IsParsedMessage()) << *this;
return absl::get<ParsedMessageValue>(std::move(variant_));
}
StructValue::operator ParsedMessageValue() const&& {
ABSL_DCHECK(IsParsedMessage()) << *this;
return absl::get<ParsedMessageValue>(std::move(variant_));
}
common_internal::ValueVariant StructValue::ToValueVariant() const& {
return absl::visit(
[](const auto& alternative) -> common_internal::ValueVariant {
return alternative;
},
variant_);
}
common_internal::ValueVariant StructValue::ToValueVariant() && {
return absl::visit(
[](auto&& alternative) -> common_internal::ValueVariant {
return std::move(alternative);
},
std::move(variant_));
}
} | #include "absl/base/attributes.h"
#include "common/value.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
using ::cel::internal::DynamicParseTextProto;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::testing::An;
using ::testing::Optional;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
TEST(StructValue, Is) {
EXPECT_TRUE(StructValue(ParsedMessageValue()).Is<MessageValue>());
EXPECT_TRUE(StructValue(ParsedMessageValue()).Is<ParsedMessageValue>());
}
template <typename T>
constexpr T& AsLValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return t;
}
template <typename T>
constexpr const T& AsConstLValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return t;
}
template <typename T>
constexpr T&& AsRValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return static_cast<T&&>(t);
}
template <typename T>
constexpr const T&& AsConstRValueRef(T& t ABSL_ATTRIBUTE_LIFETIME_BOUND) {
return static_cast<const T&&>(t);
}
TEST(StructValue, As) {
google::protobuf::Arena arena;
{
StructValue value(
ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
StructValue other_value = value;
EXPECT_THAT(AsLValueRef<StructValue>(value).As<MessageValue>(),
Optional(An<MessageValue>()));
EXPECT_THAT(AsConstLValueRef<StructValue>(value).As<MessageValue>(),
Optional(An<MessageValue>()));
EXPECT_THAT(AsRValueRef<StructValue>(value).As<MessageValue>(),
Optional(An<MessageValue>()));
EXPECT_THAT(AsConstRValueRef<StructValue>(other_value).As<MessageValue>(),
Optional(An<MessageValue>()));
}
{
StructValue value(
ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
StructValue other_value = value;
EXPECT_THAT(AsLValueRef<StructValue>(value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
EXPECT_THAT(AsConstLValueRef<StructValue>(value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
EXPECT_THAT(AsRValueRef<StructValue>(value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
EXPECT_THAT(
AsConstRValueRef<StructValue>(other_value).As<ParsedMessageValue>(),
Optional(An<ParsedMessageValue>()));
}
}
TEST(StructValue, Cast) {
google::protobuf::Arena arena;
{
StructValue value(
ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
StructValue other_value = value;
EXPECT_THAT(static_cast<MessageValue>(AsLValueRef<StructValue>(value)),
An<MessageValue>());
EXPECT_THAT(static_cast<MessageValue>(AsConstLValueRef<StructValue>(value)),
An<MessageValue>());
EXPECT_THAT(static_cast<MessageValue>(AsRValueRef<StructValue>(value)),
An<MessageValue>());
EXPECT_THAT(
static_cast<MessageValue>(AsConstRValueRef<StructValue>(other_value)),
An<MessageValue>());
}
{
StructValue value(
ParsedMessageValue{DynamicParseTextProto<TestAllTypesProto3>(
&arena, R"pb()pb", GetTestingDescriptorPool(),
GetTestingMessageFactory())});
StructValue other_value = value;
EXPECT_THAT(
static_cast<ParsedMessageValue>(AsLValueRef<StructValue>(value)),
An<ParsedMessageValue>());
EXPECT_THAT(
static_cast<ParsedMessageValue>(AsConstLValueRef<StructValue>(value)),
An<ParsedMessageValue>());
EXPECT_THAT(
static_cast<ParsedMessageValue>(AsRValueRef<StructValue>(value)),
An<ParsedMessageValue>());
EXPECT_THAT(static_cast<ParsedMessageValue>(
AsConstRValueRef<StructValue>(other_value)),
An<ParsedMessageValue>());
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/struct_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/struct_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
90a3ea44-24b4-466c-848c-48043121493f | cpp | tensorflow/tensorflow | pjrt_executable | third_party/xla/xla/python/pjrt_ifrt/pjrt_executable.cc | third_party/xla/xla/pjrt/pjrt_executable_test.cc | #include "xla/python/pjrt_ifrt/pjrt_executable.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/primitive_util.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/pjrt_array.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/python/pjrt_ifrt/pjrt_dtype.h"
#include "xla/python/pjrt_ifrt/pjrt_host_callback.h"
#include "xla/python/pjrt_ifrt/pjrt_memory.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
absl::StatusOr<const xla::HloInstructionProto*> FindRootInstruction(
const HloModuleProto& proto) {
for (const auto& computation : proto.computations()) {
if (computation.id() == proto.entry_computation_id()) {
for (const auto& instruction : computation.instructions()) {
if (instruction.id() == computation.root_id()) {
return &instruction;
}
}
}
}
return InvalidArgument("Entry computation not found");
}
absl::StatusOr<std::vector<xla::PrimitiveType>>
GetFirstModuleOutputElementTypes(
xla::PjRtLoadedExecutable* pjrt_loaded_executable) {
auto element_types = pjrt_loaded_executable->GetOutputElementTypes();
TF_RETURN_IF_ERROR(element_types.status());
if (element_types->empty()) {
return FailedPrecondition("No output element types found");
}
return element_types->front();
}
absl::StatusOr<std::vector<xla::DimensionVector>>
GetFirstModuleOutputDimensions(
xla::PjRtLoadedExecutable* pjrt_loaded_executable) {
auto dimensions = pjrt_loaded_executable->GetOutputDimensions();
TF_RETURN_IF_ERROR(dimensions.status());
if (dimensions->empty()) {
return FailedPrecondition("No output dimensions found");
}
return dimensions->front();
}
absl::StatusOr<std::optional<HloSharding>> GetFirstModuleOutputSharding(
xla::PjRtLoadedExecutable* pjrt_loaded_executable,
const xla::Shape& shape) {
auto output_shardings = pjrt_loaded_executable->GetOutputShardings();
std::optional<xla::HloSharding> result_hlo_sharding;
if (output_shardings.has_value()) {
std::vector<HloSharding> hlo_shardings;
hlo_shardings.reserve(output_shardings->size());
for (const auto& sharding : *output_shardings) {
TF_ASSIGN_OR_RETURN(auto hlo_sharding, HloSharding::FromProto(sharding));
hlo_shardings.push_back(hlo_sharding);
}
if (shape.IsTuple()) {
return HloSharding::Tuple(shape, hlo_shardings);
} else {
return hlo_shardings.front();
}
}
return std::nullopt;
}
absl::StatusOr<std::optional<std::vector<absl::string_view>>>
GetFirstModuleOutputMemoryKinds(
xla::PjRtLoadedExecutable* pjrt_loaded_executable) {
auto output_memory_kinds = pjrt_loaded_executable->GetOutputMemoryKinds();
if (absl::IsUnimplemented(output_memory_kinds.status())) {
return std::nullopt;
}
TF_RETURN_IF_ERROR(output_memory_kinds.status());
if (output_memory_kinds->empty()) {
return FailedPrecondition("No output memory kinds found");
}
return std::move(output_memory_kinds)->front();
}
struct ShapePartialInfo {
std::vector<xla::PrimitiveType> element_types;
std::vector<xla::DimensionVector> dimensions;
};
absl::StatusOr<ShapePartialInfo> CreateShapePartialInfo(
absl::Span<const xla::Shape> shapes) {
ShapePartialInfo partial_info;
partial_info.element_types.reserve(shapes.size());
partial_info.dimensions.reserve(shapes.size());
for (const auto& shape : shapes) {
if (shape.IsTuple()) {
return FailedPrecondition(
"Tupled shape is not supported in `CreateShapePartialInfo`.");
}
partial_info.element_types.push_back(shape.element_type());
partial_info.dimensions.push_back(
xla::ShapeUtil::CreateDimensionVectorFromShape(shape));
}
return partial_info;
}
}
char PjRtCompatibleExecutable::ID = 0;
char PjRtCompatibleLoadedExecutable::ID = 0;
char PjRtExecutable::ID = 0;
char PjRtLoadedExecutable::ID = 0;
absl::StatusOr<std::unique_ptr<Executable>> PjRtExecutable::Create(
std::shared_ptr<xla::PjRtExecutable> pjrt_executable,
std::unique_ptr<XlaCompileOptions> compile_options) {
return std::unique_ptr<Executable>(new PjRtExecutable(
std::move(pjrt_executable), std::move(compile_options)));
}
absl::StatusOr<std::optional<std::string>> PjRtExecutable::Fingerprint() const {
DCHECK(this);
return pjrt_executable_->FingerprintExecutable();
}
absl::StatusOr<std::string> PjRtExecutable::Serialize() const {
DCHECK(this);
return pjrt_executable_->SerializeExecutable();
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>> PjRtLoadedExecutable::Create(
PjRtCompatibleClient* client,
std::shared_ptr<xla::PjRtLoadedExecutable> pjrt_loaded_executable,
std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks) {
VLOG(3) << "PjRtLoadedExecutable::Create";
VLOG(3) << "Using per-shard shape";
TF_ASSIGN_OR_RETURN(
auto result_element_types,
GetFirstModuleOutputElementTypes(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_dimensions,
GetFirstModuleOutputDimensions(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_memory_kinds,
GetFirstModuleOutputMemoryKinds(pjrt_loaded_executable.get()));
return CreateInternal(client, std::move(pjrt_loaded_executable),
result_element_types, result_dimensions,
std::nullopt,
result_memory_kinds, loaded_host_callbacks);
}
static absl::StatusOr<std::vector<xla::Shape>> ResultShapesOfModule(
mlir::ModuleOp module) {
auto main = module.lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return InvalidArgument("MLIR module has no main function");
}
auto type = main.getFunctionType();
std::vector<xla::Shape> result_shapes;
result_shapes.reserve(type.getNumResults());
for (unsigned i = 0; i < type.getNumResults(); ++i) {
auto result_type = type.getResult(i);
result_shapes.push_back(xla::TypeToShape(result_type));
}
return result_shapes;
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>> PjRtLoadedExecutable::Create(
PjRtCompatibleClient* client, mlir::ModuleOp module,
xla::CompileOptions compile_options,
std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks) {
VLOG(3) << "PjRtLoadedExecutable::Create";
if (VLOG_IS_ON(3)) {
module.dump();
}
VLOG(3) << compile_options.ToProto()->DebugString();
const auto& build_options = compile_options.executable_build_options;
const bool auto_spmd_partitioning =
build_options.use_spmd_partitioning() &&
build_options.num_partitions() > 1 &&
(build_options.use_auto_spmd_partitioning() ||
build_options.any_allow_spmd_sharding_propagation_to_parameters() ||
build_options.any_allow_spmd_sharding_propagation_to_output());
TF_ASSIGN_OR_RETURN(
auto pjrt_loaded_executable,
client->pjrt_client()->Compile(module, std::move(compile_options)));
if (auto_spmd_partitioning) {
VLOG(3) << "Using per-shard shape";
TF_ASSIGN_OR_RETURN(
auto result_element_types,
GetFirstModuleOutputElementTypes(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_dimensions,
GetFirstModuleOutputDimensions(pjrt_loaded_executable.get()));
TF_ASSIGN_OR_RETURN(
auto result_memory_kinds,
GetFirstModuleOutputMemoryKinds(pjrt_loaded_executable.get()));
return CreateInternal(client, std::move(pjrt_loaded_executable),
result_element_types, result_dimensions,
std::nullopt,
result_memory_kinds,
std::move(loaded_host_callbacks));
} else {
VLOG(3) << "Using full shape";
TF_ASSIGN_OR_RETURN(auto result_shapes, ResultShapesOfModule(module));
bool tuple_output = result_shapes.size() != 1;
xla::Shape result_shape;
std::vector<xla::Shape> output_shapes;
if (tuple_output) {
result_shape = xla::ShapeUtil::MakeTupleShape(result_shapes);
output_shapes = std::move(result_shapes);
} else {
result_shape = result_shapes.front();
output_shapes = result_shape.IsTuple()
? result_shape.tuple_shapes()
: std::vector<xla::Shape>{result_shape};
}
TF_ASSIGN_OR_RETURN(auto shape_partial_info,
CreateShapePartialInfo(output_shapes));
TF_ASSIGN_OR_RETURN(auto result_hlo_sharding,
GetFirstModuleOutputSharding(
pjrt_loaded_executable.get(), result_shape));
TF_ASSIGN_OR_RETURN(
auto result_memory_kinds,
GetFirstModuleOutputMemoryKinds(pjrt_loaded_executable.get()));
return CreateInternal(client, std::move(pjrt_loaded_executable),
shape_partial_info.element_types,
shape_partial_info.dimensions, result_hlo_sharding,
result_memory_kinds,
std::move(loaded_host_callbacks));
}
}
absl::StatusOr<std::unique_ptr<LoadedExecutable>>
PjRtLoadedExecutable::CreateInternal(
PjRtCompatibleClient* client,
std::shared_ptr<xla::PjRtLoadedExecutable> pjrt_loaded_executable,
absl::Span<const xla::PrimitiveType> result_element_types,
absl::Span<const xla::DimensionVector> result_dimensions,
const std::optional<xla::HloSharding>& result_hlo_sharding,
const std::optional<std::vector<absl::string_view>>& result_memory_kinds,
std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks) {
BasicDeviceList::Devices ds;
ds.reserve(pjrt_loaded_executable->addressable_devices().size());
for (xla::PjRtDevice* device :
pjrt_loaded_executable->addressable_devices()) {
TF_ASSIGN_OR_RETURN(Device * ifrt_device, client->LookupPjRtDevice(device));
ds.push_back(ifrt_device);
}
tsl::RCReference<DeviceList> devices = BasicDeviceList::Create(std::move(ds));
std::optional<tsl::RCReference<DeviceList>> sharding_devices;
if (devices->devices().empty()) {
sharding_devices =
BasicDeviceList::Create({client->addressable_devices().front()});
} else {
sharding_devices = devices;
}
std::vector<DType> output_dtypes;
std::vector<Shape> output_shapes;
std::vector<std::shared_ptr<const Sharding>> output_shardings;
auto append_arg = [&](const xla::PrimitiveType& element_type,
const xla::DimensionVector& dimensions,
const xla::HloSharding* sharding,
MemoryKind memory_kind) -> absl::Status {
TF_ASSIGN_OR_RETURN(auto dtype, ToDType(element_type));
output_dtypes.push_back(dtype);
output_shapes.push_back(Shape(dimensions));
CHECK(xla::primitive_util::IsArrayType(element_type));
xla::DimensionVector tile_shape_dimensions = dimensions;
if (sharding != nullptr) {
CHECK(!sharding->IsTuple());
tile_shape_dimensions =
xla::ShapeUtil::CreateDimensionVectorFromShape(sharding->TileShape(
xla::ShapeUtil::MakeShape(element_type, dimensions)));
}
output_shardings.push_back(ifrt::ConcreteEvenSharding::Create(
*sharding_devices, memory_kind,
ifrt::Shape(dimensions),
ifrt::Shape(tile_shape_dimensions)));
return absl::OkStatus();
};
auto append_token = [&](MemoryKind memory_kind) {
output_dtypes.push_back(DType(DType::kToken));
output_shapes.push_back(Shape({}));
output_shardings.push_back(
ifrt::ConcreteEvenSharding::Create(*sharding_devices, memory_kind,
ifrt::Shape({}),
ifrt::Shape({})));
};
auto check_output_sharding_condition =
[](absl::Span<const xla::PrimitiveType> element_types,
const xla::HloSharding& sharding) {
if (sharding.IsTuple()) {
return element_types.size() == sharding.tuple_elements().size() ||
(element_types.empty() &&
sharding.tuple_elements().size() == 1);
}
return element_types.size() == 1;
};
if (result_memory_kinds.has_value() &&
result_memory_kinds->size() != result_element_types.size()) {
return FailedPrecondition(
"Output memory kinds are inconsistent with the output shape");
}
if (result_hlo_sharding.has_value() &&
!check_output_sharding_condition(result_element_types,
*result_hlo_sharding)) {
return FailedPrecondition(
"Output sharding is inconsistent with the output shape");
}
CHECK_EQ(result_element_types.size(), result_dimensions.size());
output_dtypes.reserve(result_element_types.size());
output_shapes.reserve(result_element_types.size());
output_shardings.reserve(result_element_types.size());
for (int i = 0; i < result_element_types.size(); ++i) {
const auto& element_type = result_element_types[i];
MemoryKind element_memory_kind;
if (result_memory_kinds.has_value()) {
element_memory_kind = MemoryKind((*result_memory_kinds)[i]);
}
if (xla::primitive_util::IsArrayType(element_type)) {
const xla::HloSharding* element_hlo_sharding = nullptr;
if (result_hlo_sharding.has_value()) {
element_hlo_sharding = result_hlo_sharding->IsTuple()
? &result_hlo_sharding->tuple_elements()[i]
: &*result_hlo_sharding;
if (element_hlo_sharding->IsTuple()) {
return FailedPrecondition(
"Nested-tupled output sharding is not supported");
}
}
TF_RETURN_IF_ERROR(append_arg(element_type, result_dimensions[i],
element_hlo_sharding, element_memory_kind));
} else if (element_type == TOKEN) {
append_token(element_memory_kind);
} else {
return FailedPrecondition(
"The element type is not a supported type (array, token)");
}
}
std::vector<PjRtHostSendAndRecvLoadedHostCallback*>
host_send_and_recv_callbacks;
host_send_and_recv_callbacks.reserve(loaded_host_callbacks.size());
for (auto& loaded_host_callback : loaded_host_callbacks) {
auto* host_send_and_recv_callback =
llvm::dyn_cast<PjRtHostSendAndRecvLoadedHostCallback>(
loaded_host_callback.get());
if (host_send_and_recv_callback != nullptr) {
host_send_and_recv_callbacks.push_back(host_send_and_recv_callback);
}
}
std::vector<Device*> addressable_devices;
addressable_devices.reserve(
pjrt_loaded_executable->addressable_devices().size());
for (xla::PjRtDevice* device :
pjrt_loaded_executable->addressable_devices()) {
TF_ASSIGN_OR_RETURN(Device * ifrt_device, client->LookupPjRtDevice(device));
addressable_devices.push_back(ifrt_device);
}
return std::unique_ptr<LoadedExecutable>(new PjRtLoadedExecutable(
client, std::move(pjrt_loaded_executable), std::move(devices),
std::move(addressable_devices), std::move(loaded_host_callbacks),
std::move(host_send_and_recv_callbacks), std::move(output_dtypes),
std::move(output_shapes), std::move(output_shardings)));
}
PjRtLoadedExecutable::PjRtLoadedExecutable(
PjRtCompatibleClient* client,
std::shared_ptr<xla::PjRtLoadedExecutable> pjrt_loaded_executable,
tsl::RCReference<DeviceList> devices,
std::vector<Device*> addressable_devices,
std::vector<tsl::RCReference<LoadedHostCallback>> all_loaded_host_callbacks,
std::vector<PjRtHostSendAndRecvLoadedHostCallback*>
host_send_recv_callbacks,
std::vector<DType> output_dtypes, std::vector<Shape> output_shapes,
std::vector<std::shared_ptr<const Sharding>> output_shardings)
: client_(client),
pjrt_loaded_executable_(std::move(pjrt_loaded_executable)),
devices_(std::move(devices)),
addressable_devices_(std::move(addressable_devices)),
all_loaded_host_callbacks_(
std::make_shared<std::vector<tsl::RCReference<LoadedHostCallback>>>(
std::move(all_loaded_host_callbacks))),
host_send_recv_callbacks_(std::move(host_send_recv_callbacks)),
output_dtypes_(std::move(output_dtypes)),
output_shapes_(std::move(output_shapes)),
output_shardings_(std::move(output_shardings)) {}
PjRtLoadedExecutable::~PjRtLoadedExecutable() = default;
absl::StatusOr<PjRtLoadedExecutable::ExecuteResult>
PjRtLoadedExecutable::Execute(
absl::Span<tsl::RCReference<Array>> args, const ExecuteOptions& options,
std::optional<tsl::RCReference<DeviceList>> devices) {
DCHECK(this);
std::vector<std::vector<PjRtBuffer*>> argument_handles;
std::vector<std::unique_ptr<PjRtBuffer>> owned_buffers;
int num_computations;
const bool portable_execution = devices.has_value();
PjRtCompatibleDevice* portable_execution_device = nullptr;
if (portable_execution) {
if ((*devices)->size() != 1) {
return InvalidArgument(
"Only single-shard portable execution is supported");
}
num_computations = 1;
portable_execution_device =
static_cast<PjRtDevice*>((*devices)->devices().front());
} else {
if (devices_->devices().empty()) {
return InvalidArgument("No devices provided for portable executable");
}
num_computations = devices_->size();
}
argument_handles.resize(num_computations);
for (int i = 0; i < num_computations; ++i) {
argument_handles[i].reserve(args.size());
}
for (int i = 0; i < args.size(); ++i) {
auto* pjrt_array =
llvm::dyn_cast_or_null<PjRtCompatibleArray>(args[i].get());
if (!pjrt_array) {
return InvalidArgument(
"Only PjRtCompatibleArray is supported, but argument %d is %s", i,
pjrt_array->DebugString());
}
int j = 0;
for (const auto& pjrt_buffer : pjrt_array->pjrt_buffers()) {
argument_handles[j].push_back(pjrt_buffer.get());
++j;
}
}
const bool returned_future_supported =
pjrt_loaded_executable_->IsReturnedFutureSupported();
xla::ExecuteOptions opts;
opts.untuple_result = true;
opts.launch_id = options.launch_id;
opts.use_major_to_minor_data_layout_for_callbacks = true;
opts.non_donatable_input_indices = options.non_donatable_input_indices;
if (!all_loaded_host_callbacks_->empty() && !returned_future_supported) {
return Internal(
"Host callback not supported without returned future support in "
"runtime: %s",
client_->runtime_type());
}
std::unique_ptr<HostCallbackStates> host_callback_states;
if (!host_send_recv_callbacks_.empty()) {
host_callback_states = std::make_unique<HostCallbackStates>();
for (int i = 0; i < num_computations; ++i) {
auto& contexts = host_callback_states->contexts.emplace_back();
auto& send_callbacks =
host_callback_states->send_callbacks.emplace_back();
auto& recv_callbacks =
host_callback_states->recv_callbacks.emplace_back();
for (const auto& host_send_recv_callback : host_send_recv_callbacks_) {
contexts.push_back(CreateHostCallbackStateAndAppendSendRecvCallbacks(
host_send_recv_callback->host_callback(),
nullptr, send_callbacks,
recv_callbacks, opts.use_major_to_minor_data_layout_for_callbacks));
}
}
opts.send_callbacks = host_callback_states->send_callbacks;
opts.recv_callbacks = host_callback_states->recv_callbacks;
}
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> pjrt_outputs;
xla::ifrt::Future<> status;
if (portable_execution) {
std::optional<PjRtFuture<>> returned_pjrt_future;
TF_RET_CHECK(portable_execution_device->IsAddressable());
TF_ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<PjRtBuffer>> single_device_pjrt_results,
pjrt_loaded_executable_->ExecutePortable(
argument_handles.front(), portable_execution_device->pjrt_device(),
opts, returned_pjrt_future,
returned_future_supported));
pjrt_outputs.push_back(std::move(single_device_pjrt_results));
if (returned_future_supported) {
status = *std::move(returned_pjrt_future);
} else {
status = Future<>(absl::OkStatus());
}
} else {
std::optional<std::vector<PjRtFuture<>>> returned_pjrt_futures;
if (returned_future_supported) {
returned_pjrt_futures.emplace();
}
TF_ASSIGN_OR_RETURN(
pjrt_outputs, pjrt_loaded_executable_->Execute(argument_handles, opts,
returned_pjrt_futures));
if (returned_future_supported) {
status = JoinFutures(absl::MakeSpan(*returned_pjrt_futures));
} else {
status = Future<>(absl::OkStatus());
}
}
if (!all_loaded_host_callbacks_->empty()) {
status.OnReady([all_loaded_host_callbacks = all_loaded_host_callbacks_,
host_callback_states =
std::move(host_callback_states)](absl::Status) mutable {
all_loaded_host_callbacks.reset();
});
}
std::vector<tsl::RCReference<Array>> outputs;
if (pjrt_outputs.size() != num_computations) {
return FailedPrecondition(
"Unexpected number of computations in outputs: %d vs. %d",
pjrt_outputs.size(), num_computations);
}
const int num_outputs = pjrt_outputs.front().size();
if (num_outputs != output_dtypes_.size()) {
return FailedPrecondition("Unexpected number of outputs: %d vs. %d",
num_outputs, output_dtypes_.size());
}
outputs.reserve(num_outputs);
absl::flat_hash_map<MemoryKind, std::shared_ptr<const Sharding>>
single_device_shardings;
for (int i = 0; i < num_outputs; ++i) {
PjRtArray::PjRtBuffers buffers;
buffers.reserve(num_computations);
const MemoryKind first_memory_kind =
MakeMemoryKindFromPjRtBuffer(pjrt_outputs[0][i].get());
const MemoryKind canonical_first_memory_kind =
CanonicalizeMemoryKindWithPjRtDevice(first_memory_kind,
pjrt_outputs[0][i]->device());
for (int j = 0; j < num_computations; ++j) {
if (j > 0) {
if (auto memory_kind =
MakeMemoryKindFromPjRtBuffer(pjrt_outputs[j][i].get());
canonical_first_memory_kind !=
CanonicalizeMemoryKindWithPjRtDevice(
memory_kind, pjrt_outputs[j][i]->device())) {
return FailedPrecondition(
"Memory kind mismatch between PjRtBuffers. Got one buffer with "
"memory kind '%v' and another with memory_kind '%v'",
first_memory_kind, memory_kind);
}
}
buffers.push_back(
std::shared_ptr<PjRtBuffer>(pjrt_outputs[j][i].release()));
}
std::shared_ptr<const Sharding> sharding;
if (portable_execution) {
if (auto it = single_device_shardings.find(first_memory_kind);
it == single_device_shardings.end()) {
sharding =
single_device_shardings
.insert({first_memory_kind,
SingleDeviceSharding::Create(portable_execution_device,
first_memory_kind)})
.first->second;
} else {
sharding = it->second;
}
} else {
sharding = output_shardings_[i];
}
outputs.push_back(*PjRtArray::Create(client_, output_dtypes_[i],
output_shapes_[i], std::move(sharding),
std::move(buffers)));
}
ExecuteResult result;
if (options.fill_status) {
result.status = status;
}
result.outputs = std::move(outputs);
return result;
}
absl::StatusOr<std::optional<std::string>> PjRtLoadedExecutable::Fingerprint()
const {
DCHECK(this);
absl::StatusOr<std::string> fingerprint =
pjrt_loaded_executable_->FingerprintExecutable();
if (fingerprint.ok()) {
return {fingerprint.value()};
} else if (fingerprint.status().code() == absl::StatusCode::kUnimplemented) {
return std::nullopt;
} else {
return fingerprint.status();
}
}
absl::StatusOr<std::string> PjRtLoadedExecutable::Serialize() const {
DCHECK(this);
return pjrt_loaded_executable_->SerializeExecutable();
}
Future<> PjRtLoadedExecutable::Delete() {
DCHECK(this);
pjrt_loaded_executable_->Delete();
return Future<>(absl::OkStatus());
}
}
} | #include "xla/pjrt/pjrt_executable.h"
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/client/executable_build_options.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status_matchers.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
TEST(CompileOptionsTest, Serialization) {
CompileOptions src;
src.compile_portable_executable = true;
src.parameter_is_tupled_arguments = true;
src.profile_version = 1;
src.argument_layouts = {ShapeUtil::MakeShape(S32, {1})};
ExecutableBuildOptions build_option;
build_option.set_device_assignment(DeviceAssignment(1, 1));
src.executable_build_options = build_option;
TF_ASSERT_OK_AND_ASSIGN(CompileOptionsProto proto, src.ToProto());
TF_ASSERT_OK_AND_ASSIGN(CompileOptions output,
CompileOptions::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(CompileOptionsProto output_proto, src.ToProto());
EXPECT_EQ(proto.SerializeAsString(), output_proto.SerializeAsString());
}
TEST(CompileOptionsTest, MultiSliceConfigNotSupported) {
CompileOptionsProto proto;
*proto.mutable_serialized_multi_slice_config() = "multi_size_config";
auto option = CompileOptions::FromProto(proto);
EXPECT_THAT(
option.status(),
StatusIs(
absl::StatusCode::kUnimplemented,
"multi_slice_config not supported in CompileOptions::FromProto."));
}
TEST(ExecuteOptionsTest, Serialization) {
ExecuteOptions src;
src.arguments_are_tupled = true;
src.untuple_result = false;
src.launch_id = 1234;
src.strict_shape_checking = true;
src.execution_mode = ExecuteOptions::ExecutionMode::kAsynchronous;
src.non_donatable_input_indices = {2, 3};
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptionsProto proto, src.ToProto());
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptions output,
ExecuteOptions::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(ExecuteOptionsProto output_proto, src.ToProto());
EXPECT_EQ(proto.SerializeAsString(), output_proto.SerializeAsString());
}
TEST(ExecuteOptionsTest, SendRecvNotSupported) {
ExecuteOptions options;
std::vector<std::vector<SendCallback>> send_callbacks(1);
options.send_callbacks = send_callbacks;
std::vector<std::vector<RecvCallback>> recv_callbacks(1);
options.recv_callbacks = recv_callbacks;
EXPECT_THAT(
options.ToProto(),
StatusIs(absl::StatusCode::kUnimplemented,
"ExecuteOptions with send/recv calbacks is not serializable"));
}
TEST(ExecuteOptionsTest, ApplyOptionsCanParseStringsAndEnums) {
using OptionOverride = std::variant<std::string, bool, int64_t, double>;
std::vector<std::pair<std::string, OptionOverride>> env_override_options;
env_override_options = {
{"xla_gpu_use_runtime_fusion", std::string("True")},
{"xla_gpu_graph_min_graph_size", std::string("2")},
{"xla_gpu_disable_async_collectives", std::string("2")},
{"xla_gpu_redzone_scratch_max_megabytes", std::string("3400")},
{"xla_gpu_auto_spmd_partitioning_memory_budget_ratio", 0.9},
{"xla_gpu_pgle_profile_file_or_directory_path", std::string("abc")}};
CompileOptions src;
src.env_option_overrides = env_override_options;
auto s = src.ApplyAllOptionOverrides();
auto& debug_options = src.executable_build_options.debug_options();
EXPECT_EQ(debug_options.xla_gpu_use_runtime_fusion(), true);
EXPECT_EQ(debug_options.xla_gpu_graph_min_graph_size(), 2);
EXPECT_EQ(debug_options.xla_gpu_redzone_scratch_max_megabytes(), 3400);
EXPECT_FLOAT_EQ(
debug_options.xla_gpu_auto_spmd_partitioning_memory_budget_ratio(), 0.9);
EXPECT_EQ(debug_options.xla_gpu_pgle_profile_file_or_directory_path(), "abc");
EXPECT_EQ(debug_options.xla_gpu_disable_async_collectives().size(), 1);
EXPECT_EQ(debug_options.xla_gpu_disable_async_collectives()[0], 2);
}
TEST(CompiledMemoryStatsTest, Serialization) {
CompiledMemoryStats stats;
stats.generated_code_size_in_bytes = 2;
stats.argument_size_in_bytes = 3;
stats.output_size_in_bytes = 5;
stats.alias_size_in_bytes = 7;
stats.temp_size_in_bytes = 11;
stats.host_generated_code_size_in_bytes = 13;
stats.host_argument_size_in_bytes = 17;
stats.host_output_size_in_bytes = 19;
stats.host_alias_size_in_bytes = 23;
stats.host_temp_size_in_bytes = 29;
CompiledMemoryStatsProto serialized = stats.ToProto();
CompiledMemoryStats deserialized = CompiledMemoryStats::FromProto(serialized);
EXPECT_EQ(serialized.SerializeAsString(),
deserialized.ToProto().SerializeAsString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_executable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_executable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa9a5052-ef10-472d-907c-c663fac19c97 | cpp | google/leveldb | filename | db/filename.cc | db/filename_test.cc | #include "db/filename.h"
#include <cassert>
#include <cstdio>
#include "db/dbformat.h"
#include "leveldb/env.h"
#include "util/logging.h"
namespace leveldb {
Status WriteStringToFileSync(Env* env, const Slice& data,
const std::string& fname);
static std::string MakeFileName(const std::string& dbname, uint64_t number,
const char* suffix) {
char buf[100];
std::snprintf(buf, sizeof(buf), "/%06llu.%s",
static_cast<unsigned long long>(number), suffix);
return dbname + buf;
}
std::string LogFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "log");
}
std::string TableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "ldb");
}
std::string SSTTableFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "sst");
}
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
char buf[100];
std::snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
static_cast<unsigned long long>(number));
return dbname + buf;
}
std::string CurrentFileName(const std::string& dbname) {
return dbname + "/CURRENT";
}
std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
std::string TempFileName(const std::string& dbname, uint64_t number) {
assert(number > 0);
return MakeFileName(dbname, number, "dbtmp");
}
std::string InfoLogFileName(const std::string& dbname) {
return dbname + "/LOG";
}
std::string OldInfoLogFileName(const std::string& dbname) {
return dbname + "/LOG.old";
}
bool ParseFileName(const std::string& filename, uint64_t* number,
FileType* type) {
Slice rest(filename);
if (rest == "CURRENT") {
*number = 0;
*type = kCurrentFile;
} else if (rest == "LOCK") {
*number = 0;
*type = kDBLockFile;
} else if (rest == "LOG" || rest == "LOG.old") {
*number = 0;
*type = kInfoLogFile;
} else if (rest.starts_with("MANIFEST-")) {
rest.remove_prefix(strlen("MANIFEST-"));
uint64_t num;
if (!ConsumeDecimalNumber(&rest, &num)) {
return false;
}
if (!rest.empty()) {
return false;
}
*type = kDescriptorFile;
*number = num;
} else {
uint64_t num;
if (!ConsumeDecimalNumber(&rest, &num)) {
return false;
}
Slice suffix = rest;
if (suffix == Slice(".log")) {
*type = kLogFile;
} else if (suffix == Slice(".sst") || suffix == Slice(".ldb")) {
*type = kTableFile;
} else if (suffix == Slice(".dbtmp")) {
*type = kTempFile;
} else {
return false;
}
*number = num;
}
return true;
}
Status SetCurrentFile(Env* env, const std::string& dbname,
uint64_t descriptor_number) {
std::string manifest = DescriptorFileName(dbname, descriptor_number);
Slice contents = manifest;
assert(contents.starts_with(dbname + "/"));
contents.remove_prefix(dbname.size() + 1);
std::string tmp = TempFileName(dbname, descriptor_number);
Status s = WriteStringToFileSync(env, contents.ToString() + "\n", tmp);
if (s.ok()) {
s = env->RenameFile(tmp, CurrentFileName(dbname));
}
if (!s.ok()) {
env->RemoveFile(tmp);
}
return s;
}
} | #include "db/filename.h"
#include "gtest/gtest.h"
#include "db/dbformat.h"
#include "port/port.h"
#include "util/logging.h"
namespace leveldb {
TEST(FileNameTest, Parse) {
Slice db;
FileType type;
uint64_t number;
static struct {
const char* fname;
uint64_t number;
FileType type;
} cases[] = {
{"100.log", 100, kLogFile},
{"0.log", 0, kLogFile},
{"0.sst", 0, kTableFile},
{"0.ldb", 0, kTableFile},
{"CURRENT", 0, kCurrentFile},
{"LOCK", 0, kDBLockFile},
{"MANIFEST-2", 2, kDescriptorFile},
{"MANIFEST-7", 7, kDescriptorFile},
{"LOG", 0, kInfoLogFile},
{"LOG.old", 0, kInfoLogFile},
{"18446744073709551615.log", 18446744073709551615ull, kLogFile},
};
for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) {
std::string f = cases[i].fname;
ASSERT_TRUE(ParseFileName(f, &number, &type)) << f;
ASSERT_EQ(cases[i].type, type) << f;
ASSERT_EQ(cases[i].number, number) << f;
}
static const char* errors[] = {"",
"foo",
"foo-dx-100.log",
".log",
"",
"manifest",
"CURREN",
"CURRENTX",
"MANIFES",
"MANIFEST",
"MANIFEST-",
"XMANIFEST-3",
"MANIFEST-3x",
"LOC",
"LOCKx",
"LO",
"LOGx",
"18446744073709551616.log",
"184467440737095516150.log",
"100",
"100.",
"100.lop"};
for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) {
std::string f = errors[i];
ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f;
}
}
TEST(FileNameTest, Construction) {
uint64_t number;
FileType type;
std::string fname;
fname = CurrentFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kCurrentFile, type);
fname = LockFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kDBLockFile, type);
fname = LogFileName("foo", 192);
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(192, number);
ASSERT_EQ(kLogFile, type);
fname = TableFileName("bar", 200);
ASSERT_EQ("bar/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(200, number);
ASSERT_EQ(kTableFile, type);
fname = DescriptorFileName("bar", 100);
ASSERT_EQ("bar/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(100, number);
ASSERT_EQ(kDescriptorFile, type);
fname = TempFileName("tmp", 999);
ASSERT_EQ("tmp/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(999, number);
ASSERT_EQ(kTempFile, type);
fname = InfoLogFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kInfoLogFile, type);
fname = OldInfoLogFileName("foo");
ASSERT_EQ("foo/", std::string(fname.data(), 4));
ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type));
ASSERT_EQ(0, number);
ASSERT_EQ(kInfoLogFile, type);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/filename.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/db/filename_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
75fb6a2c-6f24-4264-bf11-5f6743f5b5fa | cpp | google/quiche | qbone_client | quiche/quic/qbone/qbone_client.cc | quiche/quic/qbone/qbone_client_test.cc | #include "quiche/quic/qbone/qbone_client.h"
#include <memory>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/core/quic_default_connection_helper.h"
#include "quiche/quic/platform/api/quic_testvalue.h"
#include "quiche/quic/tools/quic_client_default_network_helper.h"
namespace quic {
namespace {
std::unique_ptr<QuicClientBase::NetworkHelper> CreateNetworkHelper(
QuicEventLoop* event_loop, QboneClient* client) {
std::unique_ptr<QuicClientBase::NetworkHelper> helper =
std::make_unique<QuicClientDefaultNetworkHelper>(event_loop, client);
quic::AdjustTestValue("QboneClient/network_helper", &helper);
return helper;
}
}
QboneClient::QboneClient(QuicSocketAddress server_address,
const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicSession::Visitor* session_owner,
const QuicConfig& config, QuicEventLoop* event_loop,
std::unique_ptr<ProofVerifier> proof_verifier,
QbonePacketWriter* qbone_writer,
QboneClientControlStream::Handler* qbone_handler)
: QuicClientBase(server_id, supported_versions, config,
new QuicDefaultConnectionHelper(),
event_loop->CreateAlarmFactory().release(),
CreateNetworkHelper(event_loop, this),
std::move(proof_verifier), nullptr),
qbone_writer_(qbone_writer),
qbone_handler_(qbone_handler),
session_owner_(session_owner),
max_pacing_rate_(QuicBandwidth::Zero()) {
set_server_address(server_address);
crypto_config()->set_alpn("qbone");
}
QboneClient::~QboneClient() { ResetSession(); }
QboneClientSession* QboneClient::qbone_session() {
return static_cast<QboneClientSession*>(QuicClientBase::session());
}
void QboneClient::ProcessPacketFromNetwork(absl::string_view packet) {
qbone_session()->ProcessPacketFromNetwork(packet);
}
bool QboneClient::EarlyDataAccepted() {
return qbone_session()->EarlyDataAccepted();
}
bool QboneClient::ReceivedInchoateReject() {
return qbone_session()->ReceivedInchoateReject();
}
int QboneClient::GetNumSentClientHellosFromSession() {
return qbone_session()->GetNumSentClientHellos();
}
int QboneClient::GetNumReceivedServerConfigUpdatesFromSession() {
return qbone_session()->GetNumReceivedServerConfigUpdates();
}
void QboneClient::ResendSavedData() {
}
void QboneClient::ClearDataToResend() {
}
bool QboneClient::HasActiveRequests() {
return qbone_session()->HasActiveRequests();
}
class QboneClientSessionWithConnection : public QboneClientSession {
public:
using QboneClientSession::QboneClientSession;
~QboneClientSessionWithConnection() override { DeleteConnection(); }
};
std::unique_ptr<QuicSession> QboneClient::CreateQuicClientSession(
const ParsedQuicVersionVector& supported_versions,
QuicConnection* connection) {
if (max_pacing_rate() > quic::QuicBandwidth::Zero()) {
QUIC_LOG(INFO) << "Setting max pacing rate to " << max_pacing_rate();
connection->SetMaxPacingRate(max_pacing_rate());
}
return std::make_unique<QboneClientSessionWithConnection>(
connection, crypto_config(), session_owner(), *config(),
supported_versions, server_id(), qbone_writer_, qbone_handler_);
}
bool QboneClient::use_quarantine_mode() const { return use_quarantine_mode_; }
void QboneClient::set_use_quarantine_mode(bool use_quarantine_mode) {
use_quarantine_mode_ = use_quarantine_mode;
}
} | #include "quiche/quic/qbone/qbone_client.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/io/quic_default_event_loop.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_alarm_factory.h"
#include "quiche/quic/core/quic_default_clock.h"
#include "quiche/quic/core/quic_default_connection_helper.h"
#include "quiche/quic/core/quic_dispatcher.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_test_loopback.h"
#include "quiche/quic/qbone/qbone_packet_processor_test_tools.h"
#include "quiche/quic/qbone/qbone_server_session.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_dispatcher_peer.h"
#include "quiche/quic/test_tools/quic_server_peer.h"
#include "quiche/quic/test_tools/server_thread.h"
#include "quiche/quic/tools/quic_memory_cache_backend.h"
#include "quiche/quic/tools/quic_server.h"
namespace quic {
namespace test {
namespace {
using ::testing::ElementsAre;
ParsedQuicVersionVector GetTestParams() {
ParsedQuicVersionVector test_versions;
SetQuicReloadableFlag(quic_disable_version_q046, false);
return CurrentSupportedVersionsWithQuicCrypto();
}
std::string TestPacketIn(const std::string& body) {
return PrependIPv6HeaderForTest(body, 5);
}
std::string TestPacketOut(const std::string& body) {
return PrependIPv6HeaderForTest(body, 4);
}
class DataSavingQbonePacketWriter : public QbonePacketWriter {
public:
void WritePacketToNetwork(const char* packet, size_t size) override {
quiche::QuicheWriterMutexLock lock(&mu_);
data_.push_back(std::string(packet, size));
}
std::vector<std::string> data() {
quiche::QuicheWriterMutexLock lock(&mu_);
return data_;
}
private:
quiche::QuicheMutex mu_;
std::vector<std::string> data_;
};
class ConnectionOwningQboneServerSession : public QboneServerSession {
public:
ConnectionOwningQboneServerSession(
const ParsedQuicVersionVector& supported_versions,
QuicConnection* connection, Visitor* owner, const QuicConfig& config,
const QuicCryptoServerConfig* quic_crypto_server_config,
QuicCompressedCertsCache* compressed_certs_cache,
QbonePacketWriter* writer)
: QboneServerSession(supported_versions, connection, owner, config,
quic_crypto_server_config, compressed_certs_cache,
writer, TestLoopback6(), TestLoopback6(), 64,
nullptr),
connection_(connection) {}
private:
std::unique_ptr<QuicConnection> connection_;
};
class QuicQboneDispatcher : public QuicDispatcher {
public:
QuicQboneDispatcher(
const QuicConfig* config, const QuicCryptoServerConfig* crypto_config,
QuicVersionManager* version_manager,
std::unique_ptr<QuicConnectionHelperInterface> helper,
std::unique_ptr<QuicCryptoServerStreamBase::Helper> session_helper,
std::unique_ptr<QuicAlarmFactory> alarm_factory,
QbonePacketWriter* writer, ConnectionIdGeneratorInterface& generator)
: QuicDispatcher(config, crypto_config, version_manager,
std::move(helper), std::move(session_helper),
std::move(alarm_factory), kQuicDefaultConnectionIdLength,
generator),
writer_(writer) {}
std::unique_ptr<QuicSession> CreateQuicSession(
QuicConnectionId id, const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address, absl::string_view alpn,
const ParsedQuicVersion& version,
const ParsedClientHello& ,
ConnectionIdGeneratorInterface& connection_id_generator) override {
QUICHE_CHECK_EQ(alpn, "qbone");
QuicConnection* connection = new QuicConnection(
id, self_address, peer_address, helper(), alarm_factory(), writer(),
false, Perspective::IS_SERVER,
ParsedQuicVersionVector{version}, connection_id_generator);
auto session = std::make_unique<ConnectionOwningQboneServerSession>(
GetSupportedVersions(), connection, this, config(), crypto_config(),
compressed_certs_cache(), writer_);
session->Initialize();
return session;
}
private:
QbonePacketWriter* writer_;
};
class QboneTestServer : public QuicServer {
public:
explicit QboneTestServer(std::unique_ptr<ProofSource> proof_source,
quic::QuicMemoryCacheBackend* response_cache)
: QuicServer(std::move(proof_source), response_cache) {}
QuicDispatcher* CreateQuicDispatcher() override {
return new QuicQboneDispatcher(
&config(), &crypto_config(), version_manager(),
std::make_unique<QuicDefaultConnectionHelper>(),
std::make_unique<QboneCryptoServerStreamHelper>(),
event_loop()->CreateAlarmFactory(), &writer_,
connection_id_generator());
}
std::vector<std::string> data() { return writer_.data(); }
private:
DataSavingQbonePacketWriter writer_;
};
class QboneTestClient : public QboneClient {
public:
QboneTestClient(QuicSocketAddress server_address,
const QuicServerId& server_id,
const ParsedQuicVersionVector& supported_versions,
QuicEventLoop* event_loop,
std::unique_ptr<ProofVerifier> proof_verifier)
: QboneClient(server_address, server_id, supported_versions,
nullptr, QuicConfig(), event_loop,
std::move(proof_verifier), &qbone_writer_, nullptr) {}
~QboneTestClient() override {}
void SendData(const std::string& data) {
qbone_session()->ProcessPacketFromNetwork(data);
}
void WaitForWriteToFlush() {
while (connected() && session()->HasDataToWrite()) {
WaitForEvents();
}
}
bool WaitForDataSize(int n, QuicTime::Delta timeout) {
const QuicClock* clock =
quic::test::QuicConnectionPeer::GetHelper(session()->connection())
->GetClock();
const QuicTime deadline = clock->Now() + timeout;
while (data().size() < n) {
if (clock->Now() > deadline) {
return false;
}
WaitForEvents();
}
return true;
}
std::vector<std::string> data() { return qbone_writer_.data(); }
private:
DataSavingQbonePacketWriter qbone_writer_;
};
class QboneClientTest : public QuicTestWithParam<ParsedQuicVersion> {};
INSTANTIATE_TEST_SUITE_P(Tests, QboneClientTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QboneClientTest, SendDataFromClient) {
quic::QuicMemoryCacheBackend server_backend;
auto server = std::make_unique<QboneTestServer>(
crypto_test_utils::ProofSourceForTesting(), &server_backend);
QboneTestServer* server_ptr = server.get();
QuicSocketAddress server_address(TestLoopback(), 0);
ServerThread server_thread(std::move(server), server_address);
server_thread.Initialize();
server_address =
QuicSocketAddress(server_address.host(), server_thread.GetPort());
server_thread.Start();
std::unique_ptr<QuicEventLoop> event_loop =
GetDefaultEventLoop()->Create(quic::QuicDefaultClock::Get());
QboneTestClient client(
server_address, QuicServerId("test.example.com", server_address.port()),
ParsedQuicVersionVector{GetParam()}, event_loop.get(),
crypto_test_utils::ProofVerifierForTesting());
ASSERT_TRUE(client.Initialize());
ASSERT_TRUE(client.Connect());
ASSERT_TRUE(client.WaitForOneRttKeysAvailable());
client.SendData(TestPacketIn("hello"));
client.SendData(TestPacketIn("world"));
client.WaitForWriteToFlush();
ASSERT_TRUE(
server_thread.WaitUntil([&] { return server_ptr->data().size() >= 2; },
QuicTime::Delta::FromSeconds(5)));
std::string long_data(1000, 'A');
server_thread.Schedule([server_ptr, &long_data]() {
EXPECT_THAT(server_ptr->data(),
ElementsAre(TestPacketOut("hello"), TestPacketOut("world")));
auto server_session = static_cast<QboneServerSession*>(
QuicDispatcherPeer::GetFirstSessionIfAny(
QuicServerPeer::GetDispatcher(server_ptr)));
server_session->ProcessPacketFromNetwork(
TestPacketIn("Somethingsomething"));
server_session->ProcessPacketFromNetwork(TestPacketIn(long_data));
server_session->ProcessPacketFromNetwork(TestPacketIn(long_data));
});
EXPECT_TRUE(client.WaitForDataSize(3, QuicTime::Delta::FromSeconds(5)));
EXPECT_THAT(client.data(),
ElementsAre(TestPacketOut("Somethingsomething"),
TestPacketOut(long_data), TestPacketOut(long_data)));
client.Disconnect();
server_thread.Quit();
server_thread.Join();
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_client.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_client_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d03b60b5-e83c-4edb-98fa-654bafca637a | cpp | tensorflow/tensorflow | compile_tf_graph | tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc | tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/monitoring/sampler.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/tpu/tpu_compile.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
using ::tensorflow::tpu::FunctionToHloArgs;
using ::tensorflow::tpu::GuaranteedConsts;
using ::tensorflow::tpu::MlirToHloArgs;
using ::tensorflow::tpu::ShardingAndIndex;
auto* phase2_bridge_compilation_status =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v1/"
"phase2_compilation_status",
"Tracks the compilation status of the non-mlir bridge",
"status" );
auto* phase2_bridge_compilation_time = tsl::monitoring::Sampler<1>::New(
{"/tensorflow/core/tf2xla/api/v1/phase2_compilation_time",
"The wall-clock time spent on executing graphs in milliseconds.",
"configuration"},
{tsl::monitoring::Buckets::Exponential(1, 1.5, 45)});
constexpr char kOldBridgeNoMlirSuccess[] = "kOldBridgeNoMlirSuccess";
constexpr char kOldBridgeNoMlirFailure[] = "kOldBridgeNoMlirFailure";
namespace {
struct CompilationTimer {
uint64 start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();
uint64 ElapsedCycles() {
return profile_utils::CpuUtils::GetCurrentClockCycle() - start_cycles;
}
int64_t ElapsedCyclesInMilliseconds() {
std::chrono::duration<double> duration =
profile_utils::CpuUtils::ConvertClockCycleToTime(ElapsedCycles());
return std::chrono::duration_cast<std::chrono::milliseconds>(duration)
.count();
}
};
Status PopulateInputOutputAliasing(
mlir::func::FuncOp main_fn,
XlaCompiler::CompilationResult* compilation_result, bool use_tuple_args) {
constexpr char kAliasingAttr[] = "tf.aliasing_output";
llvm::SmallDenseMap<unsigned, unsigned> output_to_input_alias;
unsigned num_arguments = main_fn.getNumArguments();
for (unsigned arg_index = 0; arg_index < num_arguments; ++arg_index) {
if (auto aliasing_output = main_fn.getArgAttrOfType<mlir::IntegerAttr>(
arg_index, kAliasingAttr))
output_to_input_alias[aliasing_output.getInt()] = arg_index;
}
if (output_to_input_alias.empty()) return absl::OkStatus();
xla::HloModuleProto* module_proto =
compilation_result->computation->mutable_proto();
absl::StatusOr<xla::ProgramShape> program_shape_or_status =
compilation_result->computation->GetProgramShape();
TF_RET_CHECK(program_shape_or_status.ok());
xla::ProgramShape& program_shape = program_shape_or_status.value();
if (!program_shape.result().IsTuple())
return errors::Internal("Expect result to have tuple shape");
xla::HloInputOutputAliasConfig config(program_shape.result());
for (auto alias : output_to_input_alias) {
if (use_tuple_args) {
TF_RETURN_IF_ERROR(config.SetUpAlias(
xla::ShapeIndex({alias.first}), 0, xla::ShapeIndex({alias.second}),
xla::HloInputOutputAliasConfig::AliasKind::kMayAlias));
} else {
TF_RETURN_IF_ERROR(config.SetUpAlias(
xla::ShapeIndex({alias.first}), alias.second, xla::ShapeIndex({}),
xla::HloInputOutputAliasConfig::AliasKind::kMayAlias));
}
}
*module_proto->mutable_input_output_alias() = config.ToProto();
return absl::OkStatus();
}
bool failed(const absl::Status& status) { return !status.ok(); }
Status PrepareAndExportToLibrary(mlir::ModuleOp module,
FunctionLibraryDefinition* flib_def) {
mlir::PassManager manager(module.getContext());
applyTensorflowAndCLOptions(manager);
manager.addPass(mlir::TF::CreatePrepareTpuComputationForTfExportPass());
manager.addPass(mlir::TF::CreateTFRegionControlFlowToFunctional());
manager.addPass(mlir::TF::CreateTFShapeInferencePass());
manager.addNestedPass<mlir::func::FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
manager.addPass(mlir::CreateBreakUpIslandsPass());
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
if (VLOG_IS_ON(2)) {
llvm::StringRef module_name = llvm::StringRef();
constexpr const char* kDebugGroupBridgePhase2 =
"v1_prepare_and_export_to_library";
internal::EnablePassIRPrinting(manager, kDebugGroupBridgePhase2,
module_name);
}
auto prepare_status = manager.run(module);
auto diag_handler_status = diag_handler.ConsumeStatus();
if (failed(prepare_status) || failed(diag_handler_status)) {
return diag_handler_status;
}
GraphExportConfig config;
config.export_entry_func_to_flib = true;
absl::flat_hash_set<Node*> control_ret_nodes;
return tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module, config, nullptr, flib_def, &control_ret_nodes);
}
absl::Status CompileTFFunctionWithoutMlir(
FunctionToHloArgs function_computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
Status comp_status = CompileTFFunctionToHlo(
*function_computation.flib_def, function_computation.graph_def_version,
shape_determination_funcs, arg_shapes,
function_computation.guaranteed_constants, *function_computation.function,
metadata, client, arg_core_mapping, per_core_arg_shapes, use_tuple_args,
compilation_result);
if (comp_status.ok()) {
phase2_bridge_compilation_status->GetCell(kOldBridgeNoMlirSuccess)
->IncrementBy(1);
} else {
phase2_bridge_compilation_status->GetCell(kOldBridgeNoMlirFailure)
->IncrementBy(1);
}
return comp_status;
}
absl::Status CompileMLIRTFFunction(
tpu::MlirToHloArgs mlir_computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
TF_RETURN_IF_ERROR(DeserializeMlirModule(mlir_computation.mlir_module,
&context, &mlir_module));
if (!mlir::SetTPUInfeedLayout(mlir_module))
return errors::Internal("Failed to set layouts attribute");
if (VLOG_IS_ON(2)) {
tensorflow::DumpMlirOpToFile("legalize_with_old_bridge", mlir_module.get());
}
constexpr char kEntryFuncName[] = "main";
auto main_fn = mlir_module->lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!main_fn) {
return errors::Internal(
"TPU compile op requires module with a entry function main");
}
auto flib_def = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
TF_RETURN_IF_ERROR(PrepareAndExportToLibrary(*mlir_module, flib_def.get()));
if (VLOG_IS_ON(2)) {
tensorflow::DumpMlirOpToFile("legalize_with_old_bridge_post_transform",
mlir_module.get());
}
VersionDef versions;
if (mlir::failed(ExtractTfVersions(*mlir_module, &versions))) {
return errors::Internal(
"module attribute in _TPUCompileMlir op is missing tf versions.");
}
NameAttrList func;
func.set_name(kEntryFuncName);
GuaranteedConsts consts;
*compilation_result = {};
TF_RETURN_IF_ERROR(CompileTFFunctionToHlo(
*flib_def, versions.producer(), shape_determination_funcs, arg_shapes,
consts, func, metadata, client, arg_core_mapping, per_core_arg_shapes,
use_tuple_args, compilation_result));
return PopulateInputOutputAliasing(main_fn, compilation_result,
use_tuple_args);
}
}
absl::Status CompileTensorflowGraphToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the "
"old (non-MLIR) tf2xla bridge";
CompilationTimer timer;
*compilation_result = {};
bool has_mlir = computation.index() == 0;
std::string mlir_string = has_mlir ? "has_mlir" : "has_function_to_hlo";
const std::string kBridgePhase2Config =
absl::StrCat("graph_old_bridge_", mlir_string);
if (has_mlir) {
TF_RETURN_IF_ERROR(CompileMLIRTFFunction(
std::get<0>(computation), metadata, use_tuple_args,
shape_determination_funcs, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result));
} else {
FunctionToHloArgs function_computation = std::get<1>(computation);
TF_RETURN_IF_ERROR(CompileTFFunctionWithoutMlir(
function_computation, metadata, use_tuple_args,
shape_determination_funcs, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result));
}
phase2_bridge_compilation_time->GetCell(kBridgePhase2Config)
->Add(timer.ElapsedCyclesInMilliseconds());
return absl::OkStatus();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/monitoring/test_utils.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::tpu::FunctionToHloArgs;
using ::tensorflow::tpu::MlirToHloArgs;
using ::tensorflow::tpu::ShardingAndIndex;
using ::tsl::monitoring::testing::Histogram;
static constexpr char kCompilationTimeStreamzName[] =
"/tensorflow/core/tf2xla/api/v1/phase2_compilation_time";
static constexpr char kCompilationStatusStreamzName[] =
"/tensorflow/core/tf2xla/api/v1/phase2_compilation_status";
static constexpr char kPlatformName[] = "Host";
constexpr char kEntryFuncName[] = "main";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
func.return
}
})";
MlirToHloArgs CreateTestMlirToHloArgs(const char* module_str = kMlirModuleStr) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
mlir_to_hlo_args.mlir_module = module_str;
return mlir_to_hlo_args;
}
class CompileTFGraphTest : public ::testing::Test {
public:
absl::StatusOr<XlaCompilationResult> CompileWithComputation(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>
computation) {
XlaCompilationResult compilation_result;
se::Platform* platform =
se::PlatformManager::PlatformWithName(kPlatformName).value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
tpu::TPUCompileMetadataProto metadata_proto;
std::vector<TensorShape> arg_shapes;
if (computation.index() == 0) {
TF_RETURN_IF_ERROR(tensorflow::tf2xla::internal::ConfigureMetadata(
std::get<0>(computation).mlir_module, arg_shapes, metadata_proto));
}
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
absl::Status compilation_status =
tensorflow::tf2xla::v1::CompileTensorflowGraphToHlo(
computation, metadata_proto, use_tuple_args,
shape_determination_fns, arg_shapes, &arg_core_mapping,
&per_core_arg_shapes, client, &compilation_result);
if (!compilation_status.ok()) return compilation_status;
return compilation_result;
}
};
TEST_F(CompileTFGraphTest, RecordsStreamzForMlirFallback) {
CellReader<Histogram> compilation_time(kCompilationTimeStreamzName);
MlirToHloArgs mlir_to_hlo_args = CreateTestMlirToHloArgs();
TF_EXPECT_OK(CompileWithComputation(mlir_to_hlo_args).status());
Histogram histogram = compilation_time.Delta("graph_old_bridge_has_mlir");
EXPECT_EQ(histogram.num(), 1);
}
TEST_F(CompileTFGraphTest, RecordsStreamzForFunctionToHlo) {
CellReader<Histogram> compilation_time(kCompilationTimeStreamzName);
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
FunctionDef empty_function =
tensorflow::FunctionDefHelper::Create("empty", {}, {}, {}, {}, {});
tensorflow::FunctionDefLibrary fdef;
*(fdef.add_function()) = empty_function;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), fdef);
OpInputList guaranteed_constants;
NameAttrList function;
function.set_name("empty");
FunctionToHloArgs function_to_hlo_args = {&function,
&flib_def,
0,
{&guaranteed_constants}};
TF_EXPECT_OK(CompileWithComputation(function_to_hlo_args).status());
Histogram histogram =
compilation_time.Delta("graph_old_bridge_has_function_to_hlo");
EXPECT_EQ(histogram.num(), 1);
EXPECT_EQ(compilation_status.Delta("kOldBridgeNoMlirSuccess"), 1);
}
TEST_F(CompileTFGraphTest, SuccessfullyCompilesWithManualSharding) {
constexpr char kSupportedManualSharding[] = R"(
module @module___inference_tpu_function_41 attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1617 : i32}} {
func.func @main(%arg0: tensor<2x2xf32>) -> (tensor<2x2xf32> {mhlo.sharding = "\08\03\1A\02\02\01\22\02\00\01"}) {
%0 = tf_executor.graph {
%outputs, %control = tf_executor.island wraps "tf.XlaSharding"(%arg0) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x2xf32>) -> tensor<2x2xf32>
%outputs_0, %control_1 = tf_executor.island wraps "tf.XlaSharding"(%outputs) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<2x2xf32>
%outputs_2, %control_3 = tf_executor.island wraps "tf.XlaSpmdFullToShardShape"(%outputs_0) {dim = -1 : i64, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<1x2xf32>
%control_4 = tf_executor.island wraps "tf._XlaHostComputeMlir"(%outputs_2) {host_mlir_module = "", manual_sharding = true, recv_key = "host_compute_channel_0_retvals", send_key = "host_compute_channel_0_args"} : (tensor<1x2xf32>) -> ()
%outputs_5, %control_6 = tf_executor.island(%control_4) wraps "tf._XlaHostComputeMlir"() {host_mlir_module = "module {\0A func.func @host_func() -> tensor<1x2xf32> {\0A %0 = \22tf.Const\22() {value = dense<0.1> : tensor<1x2xf32>} : () -> tensor<1x2xf32> \0A return %0 : tensor<1x2xf32>}}", manual_sharding = true, recv_key = "host_compute_channel_1_retvals", send_key = "host_compute_channel_1_args"} : () -> tensor<1x2xf32>
%outputs_7, %control_8 = tf_executor.island wraps "tf.XlaSpmdShardToFullShape"(%outputs_5) {dim = -1 : i64, full_shape = #tf_type.shape<2x2>, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<1x2xf32>) -> tensor<2x2xf32>
%outputs_9, %control_10 = tf_executor.island wraps "tf.XlaSharding"(%outputs_7) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<2x2xf32>
tf_executor.fetch %outputs_9 : tensor<2x2xf32>
}
return %0 : tensor<2x2xf32>
}
}
)";
auto mlir_to_hlo_args = CreateTestMlirToHloArgs(kSupportedManualSharding);
auto result = CompileWithComputation(mlir_to_hlo_args);
EXPECT_TRUE(result.ok());
}
TEST_F(CompileTFGraphTest, DoesNotInlineStatelessRandomOps) {
static constexpr char kHasReturnValues[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<32x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
%cst = "tf.Const"() {value = dense<[524170, 523952]> : tensor<2xi32>} : () -> tensor<2xi32>
%cst_0 = "tf.Const"() {value = dense<[32, 64]> : tensor<2xi32>} : () -> tensor<2xi32>
%0 = "tf.StatelessRandomNormal"(%cst_0, %cst) : (tensor<2xi32>, tensor<2xi32>) -> tensor<32x64xf32>
return %0 : tensor<32x64xf32>
}
})";
auto compilation_result =
CompileWithComputation(CreateTestMlirToHloArgs(kHasReturnValues));
EXPECT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("tf.StatelessRandomNormal"));
}
TEST_F(CompileTFGraphTest, TestRunsShapeInference) {
static constexpr char kShapeInferenceModule[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = "tf.Const"() <{value = dense<-1> : tensor<3360x8xi32>}> : () -> tensor<3360x8xi32>
%cst_33 = "tf.Const"() <{value = dense<[1120, -1]> : tensor<2xi32>}> : () -> tensor<2xi32>
%cst_34 = "tf.Const"() <{value = dense<[3, 1120, -1]> : tensor<3xi32>}> : () -> tensor<3xi32>
%cst_63 = "tf.Const"() <{value = dense<0> : tensor<i32>}> : () -> tensor<i32>
%1965:4 = "tf._XlaHostComputeMlir"(%0, %cst_34, %cst_63, %cst_33) <{host_mlir_module = "#loc1 = loc(\22Reshape:\22)\0A#loc2 = loc(\22Reshape_4\22)\0A#loc3 = loc(\22Reshape\22)\0A#loc9 = loc(fused[#loc1, #loc2, #loc3])\0Amodule {\0A func.func @host_func(%arg0: tensor<3360x?xi32> loc(fused[#loc1, #loc2, #loc3]), %arg1: tensor<3xi32> loc(fused[#loc1, #loc2, #loc3]), %arg2: tensor<i32> loc(fused[#loc1, #loc2, #loc3]), %arg3: tensor<2xi32> loc(fused[#loc1, #loc2, #loc3])) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>) {\0A %0 = \22tf.Reshape\22(%arg0, %arg1) {_xla_outside_compilation = \220\22} : (tensor<3360x?xi32>, tensor<3xi32>) -> tensor<3x1120x?xi32> loc(#loc9)\0A %1:3 = \22tf.Split\22(%arg2, %0) {_xla_outside_compilation = \220\22} : (tensor<i32>, tensor<3x1120x?xi32>) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1x1120x?xi32>) loc(#loc10)\0A %2 = \22tf.Reshape\22(%1#0, %arg3) {_xla_outside_compilation = \220\22} : (tensor<1x1120x?xi32>, tensor<2xi32>) -> tensor<1120x?xi32> loc(#loc11)\0A %3 = \22tf.Shape\22(%2) {_xla_outside_compilation = \220\22} : (tensor<1120x?xi32>) -> tensor<2xi32> loc(#loc12)\0A return %1#1, %1#2, %2, %3 : tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32> loc(#loc9)\0A } loc(#loc9)\0A} loc(#loc)\0A#loc = loc(unknown)\0A#loc4 = loc(\22Split:\22)\0A#loc5 = loc(\22split\22)\0A#loc6 = loc(\22Reshape_5\22)\0A#loc7 = loc(\22Shape:\22)\0A#loc8 = loc(\22Shape_4\22)\0A#loc10 = loc(fused[#loc4, #loc5])\0A#loc11 = loc(fused[#loc1, #loc6])\0A#loc12 = loc(fused[#loc7, #loc8])\0A", recv_key = "host_compute_channel_0_retvals", send_key = "host_compute_channel_0_args"}> : (tensor<3360x8xi32>, tensor<3xi32>, tensor<i32>, tensor<2xi32>) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>)
return
}
}
)";
auto compilation_result =
CompileWithComputation(CreateTestMlirToHloArgs(kShapeInferenceModule));
EXPECT_TRUE(compilation_result.ok());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
73f81048-c2c7-4461-8373-1ea5fef53afb | cpp | google/cel-cpp | legacy_struct_value | common/values/legacy_struct_value.cc | common/values/legacy_struct_value_test.cc | #include <cstddef>
#include <cstdint>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/call_once.h"
#include "absl/log/absl_check.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "base/internal/message_wrapper.h"
#include "common/json.h"
#include "common/type.h"
#include "common/value.h"
#include "internal/dynamic_loader.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
#if defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wreturn-type-c-linkage"
#endif
namespace cel::common_internal {
namespace {
using LegacyStructValue_DebugString = std::string (*)(uintptr_t, uintptr_t);
using LegacyStructValue_GetSerializedSize =
absl::StatusOr<size_t> (*)(uintptr_t, uintptr_t);
using LegacyStructValue_SerializeTo = absl::Status (*)(uintptr_t, uintptr_t,
absl::Cord&);
using LegacyStructValue_GetType = std::string (*)(uintptr_t, uintptr_t);
using LegacyStructValue_GetTypeName = absl::string_view (*)(uintptr_t,
uintptr_t);
using LegacyStructValue_ConvertToJsonObject =
absl::StatusOr<JsonObject> (*)(uintptr_t, uintptr_t);
using LegacyStructValue_GetFieldByName =
absl::Status (*)(uintptr_t, uintptr_t, ValueManager&, absl::string_view,
Value&, ProtoWrapperTypeOptions);
using LegacyStructValue_GetFieldByNumber =
absl::Status (*)(uintptr_t, uintptr_t, ValueManager&, int64_t, Value&,
ProtoWrapperTypeOptions);
using LegacyStructValue_HasFieldByName =
absl::StatusOr<bool> (*)(uintptr_t, uintptr_t, absl::string_view);
using LegacyStructValue_HasFieldByNumber = absl::StatusOr<bool> (*)(uintptr_t,
uintptr_t,
int64_t);
using LegacyStructValue_Equal = absl::Status (*)(uintptr_t, uintptr_t,
ValueManager&, const Value&,
Value&);
using LegacyStructValue_IsZeroValue = bool (*)(uintptr_t, uintptr_t);
using LegacyStructValue_ForEachField =
absl::Status (*)(uintptr_t, uintptr_t, ValueManager&,
LegacyStructValue::ForEachFieldCallback);
using LegacyStructValue_Qualify =
absl::StatusOr<int> (*)(uintptr_t, uintptr_t, ValueManager&,
absl::Span<const SelectQualifier>, bool, Value&);
ABSL_CONST_INIT struct {
absl::once_flag init_once;
LegacyStructValue_DebugString debug_string = nullptr;
LegacyStructValue_GetSerializedSize get_serialized_size = nullptr;
LegacyStructValue_SerializeTo serialize_to = nullptr;
LegacyStructValue_GetType get_type = nullptr;
LegacyStructValue_GetTypeName get_type_name = nullptr;
LegacyStructValue_ConvertToJsonObject convert_to_json_object = nullptr;
LegacyStructValue_GetFieldByName get_field_by_name = nullptr;
LegacyStructValue_GetFieldByNumber get_field_by_number = nullptr;
LegacyStructValue_HasFieldByName has_field_by_name = nullptr;
LegacyStructValue_HasFieldByNumber has_field_by_number = nullptr;
LegacyStructValue_Equal equal = nullptr;
LegacyStructValue_IsZeroValue is_zero_value = nullptr;
LegacyStructValue_ForEachField for_each_field = nullptr;
LegacyStructValue_Qualify qualify = nullptr;
} legacy_struct_value_vtable;
#if ABSL_HAVE_ATTRIBUTE_WEAK
extern "C" ABSL_ATTRIBUTE_WEAK std::string
cel_common_internal_LegacyStructValue_DebugString(uintptr_t message_ptr,
uintptr_t type_info);
extern "C" ABSL_ATTRIBUTE_WEAK absl::StatusOr<size_t>
cel_common_internal_LegacyStructValue_GetSerializedSize(uintptr_t message_ptr,
uintptr_t type_info);
extern "C" ABSL_ATTRIBUTE_WEAK absl::Status
cel_common_internal_LegacyStructValue_SerializeTo(uintptr_t message_ptr,
uintptr_t type_info,
absl::Cord& value);
extern "C" ABSL_ATTRIBUTE_WEAK std::string
cel_common_internal_LegacyStructValue_GetType(uintptr_t message_ptr,
uintptr_t type_info);
extern "C" ABSL_ATTRIBUTE_WEAK absl::string_view
cel_common_internal_LegacyStructValue_GetTypeName(uintptr_t message_ptr,
uintptr_t type_info);
extern "C" ABSL_ATTRIBUTE_WEAK absl::StatusOr<JsonObject>
cel_common_internal_LegacyStructValue_ConvertToJsonObject(uintptr_t message_ptr,
uintptr_t type_info);
extern "C" ABSL_ATTRIBUTE_WEAK absl::Status
cel_common_internal_LegacyStructValue_GetFieldByName(
uintptr_t message_ptr, uintptr_t type_info, ValueManager& value_manager,
absl::string_view name, Value& result,
ProtoWrapperTypeOptions unboxing_options);
extern "C" ABSL_ATTRIBUTE_WEAK absl::Status
cel_common_internal_LegacyStructValue_GetFieldByNumber(uintptr_t, uintptr_t,
ValueManager&, int64_t,
Value&,
ProtoWrapperTypeOptions);
extern "C" ABSL_ATTRIBUTE_WEAK absl::StatusOr<bool>
cel_common_internal_LegacyStructValue_HasFieldByName(uintptr_t message_ptr,
uintptr_t type_info,
absl::string_view name);
extern "C" ABSL_ATTRIBUTE_WEAK absl::StatusOr<bool>
cel_common_internal_LegacyStructValue_HasFieldByNumber(uintptr_t, uintptr_t,
int64_t);
extern "C" ABSL_ATTRIBUTE_WEAK absl::Status
cel_common_internal_LegacyStructValue_Equal(uintptr_t message_ptr,
uintptr_t type_info,
ValueManager& value_manager,
const Value& other, Value& result);
extern "C" ABSL_ATTRIBUTE_WEAK bool
cel_common_internal_LegacyStructValue_IsZeroValue(uintptr_t message_ptr,
uintptr_t type_info);
extern "C" ABSL_ATTRIBUTE_WEAK absl::Status
cel_common_internal_LegacyStructValue_ForEachField(
uintptr_t message_ptr, uintptr_t type_info, ValueManager& value_manager,
StructValue::ForEachFieldCallback callback);
extern "C" ABSL_ATTRIBUTE_WEAK absl::StatusOr<int>
cel_common_internal_LegacyStructValue_Qualify(
uintptr_t message_ptr, uintptr_t type_info, ValueManager& value_manager,
absl::Span<const SelectQualifier> qualifiers, bool presence_test,
Value& result);
#endif
void InitializeLegacyStructValue() {
absl::call_once(legacy_struct_value_vtable.init_once, []() -> void {
#if ABSL_HAVE_ATTRIBUTE_WEAK
legacy_struct_value_vtable.debug_string = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_DebugString);
legacy_struct_value_vtable.get_serialized_size =
ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_GetSerializedSize);
legacy_struct_value_vtable.serialize_to = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_SerializeTo);
legacy_struct_value_vtable.get_type = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_GetType);
legacy_struct_value_vtable.get_type_name = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_GetTypeName);
legacy_struct_value_vtable.convert_to_json_object =
ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_ConvertToJsonObject);
legacy_struct_value_vtable.get_field_by_name =
ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_GetFieldByName);
legacy_struct_value_vtable.get_field_by_number =
ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_GetFieldByNumber);
legacy_struct_value_vtable.has_field_by_name =
ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_HasFieldByName);
legacy_struct_value_vtable.has_field_by_number =
ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_HasFieldByNumber);
legacy_struct_value_vtable.equal = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_Equal);
legacy_struct_value_vtable.is_zero_value = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_IsZeroValue);
legacy_struct_value_vtable.for_each_field = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_ForEachField);
legacy_struct_value_vtable.qualify = ABSL_DIE_IF_NULL(
cel_common_internal_LegacyStructValue_Qualify);
#else
internal::DynamicLoader symbol_finder;
legacy_struct_value_vtable.debug_string = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_DebugString");
legacy_struct_value_vtable.get_serialized_size =
symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_GetSerializedSize");
legacy_struct_value_vtable.serialize_to = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_SerializeTo");
legacy_struct_value_vtable.get_type = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_GetType");
legacy_struct_value_vtable.get_type_name = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_GetTypeName");
legacy_struct_value_vtable.convert_to_json_object =
symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_ConvertToJsonObject");
legacy_struct_value_vtable.get_field_by_name =
symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_GetFieldByName");
legacy_struct_value_vtable.get_field_by_number =
symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_GetFieldByNumber");
legacy_struct_value_vtable.has_field_by_name =
symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_HasFieldByName");
legacy_struct_value_vtable.has_field_by_number =
symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_HasFieldByNumber");
legacy_struct_value_vtable.equal = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_Equal");
legacy_struct_value_vtable.is_zero_value = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_IsZeroValue");
legacy_struct_value_vtable.for_each_field = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_ForEachField");
legacy_struct_value_vtable.qualify = symbol_finder.FindSymbolOrDie(
"cel_common_internal_LegacyStructValue_Qualify");
#endif
});
}
}
StructType LegacyStructValue::GetRuntimeType() const {
InitializeLegacyStructValue();
if ((message_ptr_ & ::cel::base_internal::kMessageWrapperTagMask) ==
::cel::base_internal::kMessageWrapperTagMessageValue) {
return MessageType(
google::protobuf::DownCastMessage<google::protobuf::Message>(
reinterpret_cast<const google::protobuf::MessageLite*>(
message_ptr_ & ::cel::base_internal::kMessageWrapperPtrMask))
->GetDescriptor());
}
return common_internal::MakeBasicStructType(GetTypeName());
}
absl::string_view LegacyStructValue::GetTypeName() const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.get_type_name)(message_ptr_, type_info_);
}
std::string LegacyStructValue::DebugString() const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.debug_string)(message_ptr_, type_info_);
}
absl::Status LegacyStructValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.serialize_to)(message_ptr_, type_info_,
value);
}
absl::StatusOr<Json> LegacyStructValue::ConvertToJson(
AnyToJsonConverter& value_manager) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.convert_to_json_object)(message_ptr_,
type_info_);
}
absl::Status LegacyStructValue::Equal(ValueManager& value_manager,
const Value& other, Value& result) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.equal)(message_ptr_, type_info_,
value_manager, other, result);
}
bool LegacyStructValue::IsZeroValue() const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.is_zero_value)(message_ptr_, type_info_);
}
absl::Status LegacyStructValue::GetFieldByName(
ValueManager& value_manager, absl::string_view name, Value& result,
ProtoWrapperTypeOptions unboxing_options) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.get_field_by_name)(
message_ptr_, type_info_, value_manager, name, result, unboxing_options);
}
absl::Status LegacyStructValue::GetFieldByNumber(
ValueManager& value_manager, int64_t number, Value& result,
ProtoWrapperTypeOptions unboxing_options) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.get_field_by_number)(
message_ptr_, type_info_, value_manager, number, result,
unboxing_options);
}
absl::StatusOr<bool> LegacyStructValue::HasFieldByName(
absl::string_view name) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.has_field_by_name)(message_ptr_,
type_info_, name);
}
absl::StatusOr<bool> LegacyStructValue::HasFieldByNumber(int64_t number) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.has_field_by_number)(message_ptr_,
type_info_, number);
}
absl::Status LegacyStructValue::ForEachField(
ValueManager& value_manager, ForEachFieldCallback callback) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.for_each_field)(message_ptr_, type_info_,
value_manager, callback);
}
absl::StatusOr<int> LegacyStructValue::Qualify(
ValueManager& value_manager, absl::Span<const SelectQualifier> qualifiers,
bool presence_test, Value& result) const {
InitializeLegacyStructValue();
return (*legacy_struct_value_vtable.qualify)(message_ptr_, type_info_,
value_manager, qualifiers,
presence_test, result);
}
bool IsLegacyStructValue(const Value& value) {
return absl::holds_alternative<LegacyStructValue>(value.variant_);
}
LegacyStructValue GetLegacyStructValue(const Value& value) {
ABSL_DCHECK(IsLegacyStructValue(value));
return absl::get<LegacyStructValue>(value.variant_);
}
absl::optional<LegacyStructValue> AsLegacyStructValue(const Value& value) {
if (IsLegacyStructValue(value)) {
return GetLegacyStructValue(value);
}
return absl::nullopt;
}
}
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif | #include "common/values/legacy_struct_value.h"
#include "common/memory.h"
#include "common/value_kind.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel::common_internal {
namespace {
using ::testing::_;
class LegacyStructValueTest : public ThreadCompatibleValueTest<> {};
TEST_P(LegacyStructValueTest, Kind) {
EXPECT_EQ(LegacyStructValue(0, 0).kind(), ValueKind::kStruct);
}
TEST_P(LegacyStructValueTest, GetTypeName) {
EXPECT_DEATH(static_cast<void>(LegacyStructValue(0, 0).GetTypeName()), _);
}
TEST_P(LegacyStructValueTest, DebugString) {
EXPECT_DEATH(static_cast<void>(LegacyStructValue(0, 0).DebugString()), _);
}
TEST_P(LegacyStructValueTest, SerializeTo) {
absl::Cord serialize_value;
EXPECT_DEATH(static_cast<void>(LegacyStructValue(0, 0).SerializeTo(
value_manager(), serialize_value)),
_);
}
TEST_P(LegacyStructValueTest, ConvertToJson) {
EXPECT_DEATH(
static_cast<void>(LegacyStructValue(0, 0).ConvertToJson(value_manager())),
_);
}
TEST_P(LegacyStructValueTest, GetFieldByName) {
Value scratch;
EXPECT_DEATH(static_cast<void>(LegacyStructValue(0, 0).GetFieldByName(
value_manager(), "", scratch)),
_);
}
TEST_P(LegacyStructValueTest, GetFieldByNumber) {
Value scratch;
EXPECT_DEATH(static_cast<void>(LegacyStructValue(0, 0).GetFieldByNumber(
value_manager(), 0, scratch)),
_);
}
TEST_P(LegacyStructValueTest, HasFieldByName) {
EXPECT_DEATH(static_cast<void>(LegacyStructValue(0, 0).HasFieldByName("")),
_);
}
TEST_P(LegacyStructValueTest, HasFieldByNumber) {
EXPECT_DEATH(static_cast<void>(LegacyStructValue(0, 0).HasFieldByNumber(0)),
_);
}
INSTANTIATE_TEST_SUITE_P(
LegacyStructValueTest, LegacyStructValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
LegacyStructValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/legacy_struct_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/legacy_struct_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e94eef46-9cde-4093-b44d-8e89d991ead3 | cpp | tensorflow/tensorflow | gpu_kernel | third_party/xla/xla/stream_executor/gpu/gpu_kernel.h | third_party/xla/xla/stream_executor/gpu/gpu_kernel_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_GPU_KERNEL_H_
#define XLA_STREAM_EXECUTOR_GPU_GPU_KERNEL_H_
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
namespace stream_executor::gpu {
class GpuKernel : public Kernel {
public:
virtual GpuFunctionHandle gpu_function() const = 0;
};
inline const GpuKernel* AsGpuKernel(const Kernel* kernel) {
return static_cast<const GpuKernel*>(kernel);
}
inline GpuKernel* AsGpuKernel(Kernel* kernel) {
return static_cast<GpuKernel*>(kernel);
}
}
#endif | #include <cstdint>
#include <memory>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/gpu/gpu_test_kernels_fatbin.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using AddI32Kernel =
TypedKernelFactory<DeviceMemory<int32_t>, DeviceMemory<int32_t>,
DeviceMemory<int32_t>>;
class GpuKernelTest : public ::testing::Test {
public:
void SetUp() override {
auto name = absl::AsciiStrToUpper(
xla::PlatformUtil::CanonicalPlatformName("gpu").value());
Platform* platform = PlatformManager::PlatformWithName(name).value();
executor_ = platform->ExecutorForDevice(0).value();
}
void RunAddI32Kernel(const MultiKernelLoaderSpec& spec) {
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor_->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto add, AddI32Kernel::Create(executor_, spec));
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
DeviceMemory<int32_t> a = executor_->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> b = executor_->AllocateArray<int32_t>(length, 0);
DeviceMemory<int32_t> c = executor_->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, 2, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
ASSERT_TRUE(
stream->ThenLaunch(ThreadDim(), BlockDim(4), add, a, b, c).ok());
std::vector<int32_t> dst(4, 42);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<int32_t> expected = {3, 3, 3, 3};
ASSERT_EQ(dst, expected);
}
StreamExecutor* executor_;
};
TEST_F(GpuKernelTest, LoadAndRunKernelFromPtx) {
if (executor_->GetPlatform()->id() ==
stream_executor::rocm::kROCmPlatformId) {
GTEST_SKIP() << "There is no PTX or any equivalent abstraction for ROCm.";
}
MultiKernelLoaderSpec spec(3);
spec.AddCudaPtxInMemory(internal::kAddI32KernelPtx, "AddI32");
RunAddI32Kernel(spec);
}
TEST_F(GpuKernelTest, LoadAndRunKernelFromCubin) {
MultiKernelLoaderSpec spec(3);
TF_ASSERT_OK_AND_ASSIGN(auto binary, GetGpuTestKernelsFatbin());
spec.AddCudaCubinInMemory(binary, "AddI32");
RunAddI32Kernel(spec);
}
TEST_F(GpuKernelTest, LoadAndRunKernelFromSymbol) {
MultiKernelLoaderSpec spec(3);
spec.AddInProcessSymbol(internal::GetAddI32Kernel(), "AddI32");
RunAddI32Kernel(spec);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_kernel.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/gpu_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d944fc09-2809-4955-8b7e-fb42464a0952 | cpp | google/quiche | balsa_headers | quiche/balsa/balsa_headers.cc | quiche/balsa/balsa_headers_test.cc | #include "quiche/balsa/balsa_headers.h"
#include <sys/types.h>
#include <cstdint>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "quiche/balsa/balsa_enums.h"
#include "quiche/balsa/header_properties.h"
#include "quiche/common/platform/api/quiche_header_policy.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace {
constexpr absl::string_view kContentLength("Content-Length");
constexpr absl::string_view kCookie("Cookie");
constexpr absl::string_view kHost("Host");
constexpr absl::string_view kTransferEncoding("Transfer-Encoding");
#define ALL_ENVOY_HEADERS(HEADER_FUNC) \
HEADER_FUNC("Accept") \
HEADER_FUNC("Accept-Encoding") \
HEADER_FUNC("Access-Control-Request-Headers") \
HEADER_FUNC("Access-Control-Request-Method") \
HEADER_FUNC("Access-Control-Allow-Origin") \
HEADER_FUNC("Access-Control-Allow-Headers") \
HEADER_FUNC("Access-Control-Allow-Methods") \
HEADER_FUNC("Access-Control-Allow-Credentials") \
HEADER_FUNC("Access-Control-Expose-Headers") \
HEADER_FUNC("Access-Control-Max-Age") \
HEADER_FUNC("Authorization") \
HEADER_FUNC("Cache-Control") \
HEADER_FUNC("X-Client-Trace-Id") \
HEADER_FUNC("Connection") \
HEADER_FUNC("Content-Encoding") \
HEADER_FUNC("Content-Length") \
HEADER_FUNC("Content-Type") \
\
HEADER_FUNC("Envoy-Attempt-Count") \
HEADER_FUNC("Envoy-Degraded") \
HEADER_FUNC("Envoy-Decorator-Operation") \
HEADER_FUNC("Envoy-Downstream-Service-Cluster") \
HEADER_FUNC("Envoy-Downstream-Service-Node") \
HEADER_FUNC("Envoy-Expected-Request-Timeout-Ms") \
HEADER_FUNC("Envoy-External-Address") \
HEADER_FUNC("Envoy-Force-Trace") \
HEADER_FUNC("Envoy-Hedge-On-Per-Try-Timeout") \
HEADER_FUNC("Envoy-Immediate-Health-Check-Fail") \
HEADER_FUNC("Envoy-Internal-Request") \
HEADER_FUNC("Envoy-Ip-Tags") \
HEADER_FUNC("Envoy-Max-Retries") \
HEADER_FUNC("Envoy-Original-Path") \
HEADER_FUNC("Envoy-Original-Url") \
HEADER_FUNC("Envoy-Overloaded") \
HEADER_FUNC("Envoy-Rate-Limited") \
HEADER_FUNC("Envoy-Retry-On") \
HEADER_FUNC("Envoy-Retry-Grpc-On") \
HEADER_FUNC("Envoy-Retriable-StatusCodes") \
HEADER_FUNC("Envoy-Retriable-HeaderNames") \
HEADER_FUNC("Envoy-Upstream-AltStatName") \
HEADER_FUNC("Envoy-Upstream-Canary") \
HEADER_FUNC("Envoy-Upstream-HealthCheckedCluster") \
HEADER_FUNC("Envoy-Upstream-RequestPerTryTimeoutMs") \
HEADER_FUNC("Envoy-Upstream-RequestTimeoutAltResponse") \
HEADER_FUNC("Envoy-Upstream-RequestTimeoutMs") \
HEADER_FUNC("Envoy-Upstream-ServiceTime") \
HEADER_FUNC("Etag") \
HEADER_FUNC("Expect") \
HEADER_FUNC("X-Forwarded-Client-Cert") \
HEADER_FUNC("X-Forwarded-For") \
HEADER_FUNC("X-Forwarded-Proto") \
HEADER_FUNC("Grpc-Accept-Encoding") \
HEADER_FUNC("Grpc-Message") \
HEADER_FUNC("Grpc-Status") \
HEADER_FUNC("Grpc-Timeout") \
HEADER_FUNC("Host") \
HEADER_FUNC("Keep-Alive") \
\
\
HEADER_FUNC("Method") \
HEADER_FUNC("No-Chunks") \
HEADER_FUNC("Origin") \
HEADER_FUNC("X-Ot-Span-Context") \
HEADER_FUNC("Path") \
HEADER_FUNC("Protocol") \
HEADER_FUNC("Proxy-Connection") \
HEADER_FUNC("Referer") \
HEADER_FUNC("X-Request-Id") \
HEADER_FUNC("Scheme") \
HEADER_FUNC("Server") \
HEADER_FUNC("Status") \
HEADER_FUNC("TE") \
HEADER_FUNC("Transfer-Encoding") \
HEADER_FUNC("Upgrade") \
HEADER_FUNC("User-Agent") \
HEADER_FUNC("Vary") \
HEADER_FUNC("Via")
#define MULTIVALUE_ENVOY_HEADER(name) {name},
absl::string_view::difference_type FindIgnoreCase(absl::string_view haystack,
absl::string_view needle) {
absl::string_view::difference_type pos = 0;
while (haystack.size() >= needle.size()) {
if (absl::StartsWithIgnoreCase(haystack, needle)) {
return pos;
}
++pos;
haystack.remove_prefix(1);
}
return absl::string_view::npos;
}
absl::string_view::difference_type RemoveLeadingWhitespace(
absl::string_view* text) {
size_t count = 0;
const char* ptr = text->data();
while (count < text->size() && absl::ascii_isspace(*ptr)) {
count++;
ptr++;
}
text->remove_prefix(count);
return count;
}
absl::string_view::difference_type RemoveTrailingWhitespace(
absl::string_view* text) {
size_t count = 0;
const char* ptr = text->data() + text->size() - 1;
while (count < text->size() && absl::ascii_isspace(*ptr)) {
++count;
--ptr;
}
text->remove_suffix(count);
return count;
}
absl::string_view::difference_type RemoveWhitespaceContext(
absl::string_view* text) {
return RemoveLeadingWhitespace(text) + RemoveTrailingWhitespace(text);
}
}
namespace quiche {
const size_t BalsaBuffer::kDefaultBlocksize;
const BalsaHeaders::MultivaluedHeadersSet&
BalsaHeaders::multivalued_envoy_headers() {
static const MultivaluedHeadersSet* multivalued_envoy_headers =
new MultivaluedHeadersSet({ALL_ENVOY_HEADERS(MULTIVALUE_ENVOY_HEADER)});
return *multivalued_envoy_headers;
}
void BalsaHeaders::ParseTokenList(absl::string_view header_value,
HeaderTokenList* tokens) {
if (header_value.empty()) {
return;
}
const char* start = header_value.data();
const char* end = header_value.data() + header_value.size();
while (true) {
while (*start == ',' || static_cast<unsigned char>(*start) <= ' ') {
++start;
if (start == end) {
return;
}
}
const char* nws = start;
while (*start != ',' && static_cast<unsigned char>(*start) > ' ') {
++start;
if (start == end) {
if (nws != start) {
tokens->push_back(absl::string_view(nws, start - nws));
}
return;
}
}
tokens->push_back(absl::string_view(nws, start - nws));
}
}
void BalsaHeaders::Clear() {
balsa_buffer_.Clear();
transfer_encoding_is_chunked_ = false;
content_length_ = 0;
content_length_status_ = BalsaHeadersEnums::NO_CONTENT_LENGTH;
parsed_response_code_ = 0;
firstline_buffer_base_idx_ = 0;
whitespace_1_idx_ = 0;
non_whitespace_1_idx_ = 0;
whitespace_2_idx_ = 0;
non_whitespace_2_idx_ = 0;
whitespace_3_idx_ = 0;
non_whitespace_3_idx_ = 0;
whitespace_4_idx_ = 0;
header_lines_.clear();
header_lines_.shrink_to_fit();
}
void BalsaHeaders::CopyFrom(const BalsaHeaders& other) {
if (this == &other) {
return;
}
balsa_buffer_.CopyFrom(other.balsa_buffer_);
transfer_encoding_is_chunked_ = other.transfer_encoding_is_chunked_;
content_length_ = other.content_length_;
content_length_status_ = other.content_length_status_;
parsed_response_code_ = other.parsed_response_code_;
firstline_buffer_base_idx_ = other.firstline_buffer_base_idx_;
whitespace_1_idx_ = other.whitespace_1_idx_;
non_whitespace_1_idx_ = other.non_whitespace_1_idx_;
whitespace_2_idx_ = other.whitespace_2_idx_;
non_whitespace_2_idx_ = other.non_whitespace_2_idx_;
whitespace_3_idx_ = other.whitespace_3_idx_;
non_whitespace_3_idx_ = other.non_whitespace_3_idx_;
whitespace_4_idx_ = other.whitespace_4_idx_;
header_lines_ = other.header_lines_;
}
void BalsaHeaders::AddAndMakeDescription(absl::string_view key,
absl::string_view value,
HeaderLineDescription* d) {
QUICHE_CHECK(d != nullptr);
if (enforce_header_policy_) {
QuicheHandleHeaderPolicy(key);
}
size_t line_size = key.size() + 2 + value.size();
BalsaBuffer::Blocks::size_type block_buffer_idx = 0;
char* storage = balsa_buffer_.Reserve(line_size, &block_buffer_idx);
size_t base_idx = storage - GetPtr(block_buffer_idx);
char* cur_loc = storage;
memcpy(cur_loc, key.data(), key.size());
cur_loc += key.size();
*cur_loc = ':';
++cur_loc;
*cur_loc = ' ';
++cur_loc;
memcpy(cur_loc, value.data(), value.size());
*d = HeaderLineDescription(
base_idx, base_idx + key.size(), base_idx + key.size() + 2,
base_idx + key.size() + 2 + value.size(), block_buffer_idx);
}
void BalsaHeaders::AppendAndMakeDescription(absl::string_view key,
absl::string_view value,
HeaderLineDescription* d) {
size_t old_value_size = d->last_char_idx - d->value_begin_idx;
if (old_value_size == 0) {
AddAndMakeDescription(key, value, d);
return;
}
absl::string_view old_value(GetPtr(d->buffer_base_idx) + d->value_begin_idx,
old_value_size);
BalsaBuffer::Blocks::size_type block_buffer_idx = 0;
size_t new_size = key.size() + 3 + old_value_size + value.size();
char* storage = balsa_buffer_.Reserve(new_size, &block_buffer_idx);
size_t base_idx = storage - GetPtr(block_buffer_idx);
absl::string_view first_value = old_value;
absl::string_view second_value = value;
char* cur_loc = storage;
memcpy(cur_loc, key.data(), key.size());
cur_loc += key.size();
*cur_loc = ':';
++cur_loc;
*cur_loc = ' ';
++cur_loc;
memcpy(cur_loc, first_value.data(), first_value.size());
cur_loc += first_value.size();
*cur_loc = ',';
++cur_loc;
memcpy(cur_loc, second_value.data(), second_value.size());
*d = HeaderLineDescription(base_idx, base_idx + key.size(),
base_idx + key.size() + 2, base_idx + new_size,
block_buffer_idx);
}
void BalsaHeaders::MaybeClearSpecialHeaderValues(absl::string_view key) {
if (absl::EqualsIgnoreCase(key, kContentLength)) {
if (transfer_encoding_is_chunked_) {
return;
}
content_length_status_ = BalsaHeadersEnums::NO_CONTENT_LENGTH;
content_length_ = 0;
return;
}
if (absl::EqualsIgnoreCase(key, kTransferEncoding)) {
transfer_encoding_is_chunked_ = false;
}
}
void BalsaHeaders::RemoveAllOfHeaderStartingAt(absl::string_view key,
HeaderLines::iterator start) {
MaybeClearSpecialHeaderValues(key);
while (start != header_lines_.end()) {
start->skip = true;
++start;
start = GetHeaderLinesIterator(key, start);
}
}
void BalsaHeaders::ReplaceOrAppendHeader(absl::string_view key,
absl::string_view value) {
const HeaderLines::iterator end = header_lines_.end();
const HeaderLines::iterator begin = header_lines_.begin();
HeaderLines::iterator i = GetHeaderLinesIterator(key, begin);
if (i != end) {
RemoveAllOfHeaderStartingAt(key, i);
AddAndMakeDescription(key, value, &(*i));
return;
}
AppendHeader(key, value);
}
void BalsaHeaders::AppendHeader(absl::string_view key,
absl::string_view value) {
HeaderLineDescription hld;
AddAndMakeDescription(key, value, &hld);
header_lines_.push_back(hld);
}
void BalsaHeaders::AppendToHeader(absl::string_view key,
absl::string_view value) {
HeaderLines::iterator i = GetHeaderLinesIterator(key, header_lines_.begin());
if (i == header_lines_.end()) {
AppendHeader(key, value);
return;
}
HeaderLineDescription hld = *i;
AppendAndMakeDescription(key, value, &hld);
i->skip = true;
header_lines_.push_back(hld);
}
void BalsaHeaders::AppendToHeaderWithCommaAndSpace(absl::string_view key,
absl::string_view value) {
HeaderLines::iterator i = GetHeaderLinesIteratorForLastMultivaluedHeader(key);
if (i == header_lines_.end()) {
AppendHeader(key, value);
return;
}
std::string space_and_value = absl::StrCat(" ", value);
HeaderLineDescription hld = *i;
AppendAndMakeDescription(key, space_and_value, &hld);
i->skip = true;
header_lines_.push_back(hld);
}
absl::string_view BalsaHeaders::GetValueFromHeaderLineDescription(
const HeaderLineDescription& line) const {
QUICHE_DCHECK_GE(line.last_char_idx, line.value_begin_idx);
return absl::string_view(GetPtr(line.buffer_base_idx) + line.value_begin_idx,
line.last_char_idx - line.value_begin_idx);
}
absl::string_view BalsaHeaders::GetHeader(absl::string_view key) const {
QUICHE_DCHECK(!header_properties::IsMultivaluedHeader(key))
<< "Header '" << key << "' may consist of multiple lines. Do not "
<< "use BalsaHeaders::GetHeader() or you may be missing some of its "
<< "values.";
const HeaderLines::const_iterator end = header_lines_.end();
HeaderLines::const_iterator i = GetConstHeaderLinesIterator(key);
if (i == end) {
return absl::string_view();
}
return GetValueFromHeaderLineDescription(*i);
}
BalsaHeaders::const_header_lines_iterator BalsaHeaders::GetHeaderPosition(
absl::string_view key) const {
const HeaderLines::const_iterator end = header_lines_.end();
HeaderLines::const_iterator i = GetConstHeaderLinesIterator(key);
if (i == end) {
return lines().end();
}
return const_header_lines_iterator(this, (i - header_lines_.begin()));
}
BalsaHeaders::const_header_lines_key_iterator BalsaHeaders::GetIteratorForKey(
absl::string_view key) const {
HeaderLines::const_iterator i = GetConstHeaderLinesIterator(key);
if (i == header_lines_.end()) {
return header_lines_key_end();
}
return const_header_lines_key_iterator(this, (i - header_lines_.begin()),
key);
}
BalsaHeaders::HeaderLines::const_iterator
BalsaHeaders::GetConstHeaderLinesIterator(absl::string_view key) const {
const HeaderLines::const_iterator end = header_lines_.end();
for (HeaderLines::const_iterator i = header_lines_.begin(); i != end; ++i) {
const HeaderLineDescription& line = *i;
if (line.skip) {
continue;
}
const absl::string_view current_key(
GetPtr(line.buffer_base_idx) + line.first_char_idx,
line.key_end_idx - line.first_char_idx);
if (absl::EqualsIgnoreCase(current_key, key)) {
QUICHE_DCHECK_GE(line.last_char_idx, line.value_begin_idx);
return i;
}
}
return end;
}
BalsaHeaders::HeaderLines::iterator BalsaHeaders::GetHeaderLinesIterator(
absl::string_view key, BalsaHeaders::HeaderLines::iterator start) {
const HeaderLines::iterator end = header_lines_.end();
for (HeaderLines::iterator i = start; i != end; ++i) {
const HeaderLineDescription& line = *i;
if (line.skip) {
continue;
}
const absl::string_view current_key(
GetPtr(line.buffer_base_idx) + line.first_char_idx,
line.key_end_idx - line.first_char_idx);
if (absl::EqualsIgnoreCase(current_key, key)) {
QUICHE_DCHECK_GE(line.last_char_idx, line.value_begin_idx);
return i;
}
}
return end;
}
BalsaHeaders::HeaderLines::iterator
BalsaHeaders::GetHeaderLinesIteratorForLastMultivaluedHeader(
absl::string_view key) {
const HeaderLines::iterator end = header_lines_.end();
HeaderLines::iterator last_found_match;
bool found_a_match = false;
for (HeaderLines::iterator i = header_lines_.begin(); i != end; ++i) {
const HeaderLineDescription& line = *i;
if (line.skip) {
continue;
}
const absl::string_view current_key(
GetPtr(line.buffer_base_idx) + line.first_char_idx,
line.key_end_idx - line.first_char_idx);
if (absl::EqualsIgnoreCase(current_key, key)) {
QUICHE_DCHECK_GE(line.last_char_idx, line.value_begin_idx);
last_found_match = i;
found_a_match = true;
}
}
return (found_a_match ? last_found_match : end);
}
void BalsaHeaders::GetAllOfHeader(absl::string_view key,
std::vector<absl::string_view>* out) const {
for (const_header_lines_key_iterator it = GetIteratorForKey(key);
it != lines().end(); ++it) {
out->push_back(it->second);
}
}
void BalsaHeaders::GetAllOfHeaderIncludeRemoved(
absl::string_view key, std::vector<absl::string_view>* out) const {
const HeaderLines::const_iterator begin = header_lines_.begin();
const HeaderLines::const_iterator end = header_lines_.end();
for (bool add_removed : {false, true}) {
for (HeaderLines::const_iterator i = begin; i != end; ++i) {
const HeaderLineDescription& line = *i;
if ((!add_removed && line.skip) || (add_removed && !line.skip)) {
continue;
}
const absl::string_view current_key(
GetPtr(line.buffer_base_idx) + line.first_char_idx,
line.key_end_idx - line.first_char_idx);
if (absl::EqualsIgnoreCase(current_key, key)) {
QUICHE_DCHECK_GE(line.last_char_idx, line.value_begin_idx);
out->push_back(GetValueFromHeaderLineDescription(line));
}
}
}
}
namespace {
bool SurroundedOnlyBySpacesAndCommas(absl::string_view::difference_type idx,
absl::string_view::difference_type end_idx,
absl::string_view line) {
for (idx = idx - 1; idx >= 0; --idx) {
if (line[idx] == ',') {
break;
}
if (line[idx] != ' ') {
return false;
}
}
for (; end_idx < static_cast<int64_t>(line.size()); ++end_idx) {
if (line[end_idx] == ',') {
break;
}
if (line[end_idx] != ' ') {
return false;
}
}
return true;
}
}
bool BalsaHeaders::HeaderHasValueHelper(absl::string_view key,
absl::string_view value,
bool case_sensitive) const {
for (const_header_lines_key_iterator it = GetIteratorForKey(key);
it != lines().end(); ++it) {
absl::string_view line = it->second;
absl::string_view::size_type idx =
case_sensitive ? line.find(value, 0) : FindIgnoreCase(line, value);
while (idx != absl::string_view::npos) {
absl::string_view::difference_type end_idx = idx + value.size();
if (SurroundedOnlyBySpacesAndCommas(idx, end_idx, line)) {
return true;
}
idx = line.find(value, idx + 1);
}
}
return false;
}
bool BalsaHeaders::HasNonEmptyHeader(absl::string_view key) const {
for (const_header_lines_key_iterator it = GetIteratorForKey(key);
it != header_lines_key_end(); ++it) {
if (!it->second.empty()) {
return true;
}
}
return false;
}
std::string BalsaHeaders::GetAllOfHeaderAsString(absl::string_view key) const {
auto formatter = [](std::string* out,
std::pair<absl::string_view, absl::string_view> header) {
return absl::AlphaNumFormatter()(out, header.second);
};
return absl::StrJoin(GetIteratorForKey(key), header_lines_key_end(), ",",
formatter);
}
void BalsaHeaders::RemoveAllOfHeaderInList(const HeaderTokenList& keys) {
if (keys.empty()) {
return;
}
absl::flat_hash_set<std::string> lowercase_keys;
lowercase_keys.reserve(keys.size());
for (const auto& key : keys) {
MaybeClearSpecialHeaderValues(key);
lowercase_keys.insert(absl::AsciiStrToLower(key));
}
for (HeaderLineDescription& line : header_lines_) {
if (line.skip) {
continue;
}
const size_t key_len = line.key_end_idx - line.first_char_idx;
absl::string_view key(GetPtr(line.buffer_base_idx) + line.first_char_idx,
key_len);
std::string lowercase_key = absl::AsciiStrToLower(key);
if (lowercase_keys.count(lowercase_key) != 0) {
line.skip = true;
}
}
}
void BalsaHeaders::RemoveAllOfHeader(absl::string_view key) {
HeaderLines::iterator it = GetHeaderLinesIterator(key, header_lines_.begin());
RemoveAllOfHeaderStartingAt(key, it);
}
void BalsaHeaders::RemoveAllHeadersWithPrefix(absl::string_view prefix) {
for (HeaderLines::size_type i = 0; i < header_lines_.size(); ++i) {
if (header_lines_[i].skip) {
continue;
}
HeaderLineDescription& line = header_lines_[i];
const size_t key_len = line.key_end_idx - line.first_char_idx;
if (key_len < prefix.size()) {
continue;
}
const absl::string_view current_key_prefix(
GetPtr(line.buffer_base_idx) + line.first_char_idx, prefix.size());
if (absl::EqualsIgnoreCase(current_key_prefix, prefix)) {
const absl::string_view current_key(
GetPtr(line.buffer_base_idx) + line.first_char_idx, key_len);
MaybeClearSpecialHeaderValues(current_key);
line.skip = true;
}
}
}
bool BalsaHeaders::HasHeadersWithPrefix(absl::string_view prefix) const {
for (HeaderLines::size_type i = 0; i < header_lines_.size(); ++i) {
if (header_lines_[i].skip) {
continue;
}
const HeaderLineDescription& line = header_lines_[i];
if (line.key_end_idx - line.first_char_idx < prefix.size()) {
continue;
}
const absl::string_view current_key_prefix(
GetPtr(line.buffer_base_idx) + line.first_char_idx, prefix.size());
if (absl::EqualsIgnoreCase(current_key_prefix, prefix)) {
return true;
}
}
return false;
}
void BalsaHeaders::GetAllOfHeaderWithPrefix(
absl::string_view prefix,
std::vector<std::pair<absl::string_view, absl::string_view>>* out) const {
for (HeaderLines::size_type i = 0; i < header_lines_.size(); ++i) {
if (header_lines_[i].skip) {
continue;
}
const HeaderLineDescription& line = header_lines_[i];
absl::string_view key(GetPtr(line.buffer_base_idx) + line.first_char_idx,
line.key_end_idx - line.first_char_idx);
if (absl::StartsWithIgnoreCase(key, prefix)) {
out->push_back(std::make_pair(
key,
absl::string_view(GetPtr(line.buffer_base_idx) + line.value_begin_idx,
line.last_char_idx - line.value_begin_idx)));
}
}
}
void BalsaHeaders::GetAllHeadersWithLimit(
std::vector<std::pair<absl::string_view, absl::string_view>>* out,
int limit) const {
for (HeaderLines::size_type i = 0; i < header_lines_.size(); ++i) {
if (limit >= 0 && out->size() >= static_cast<size_t>(limit)) {
return;
}
if (header_lines_[i].skip) {
continue;
}
const HeaderLineDescription& line = header_lines_[i];
absl::string_view key(GetPtr(line.buffer_base_idx) + line.first_char_idx,
line.key_end_idx - line.first_char_idx);
out->push_back(std::make_pair(
key,
absl::string_view(GetPtr(line.buffer_base_idx) + line.value_begin_idx,
line.last_char_idx - line.value_begin_idx)));
}
}
size_t BalsaHeaders::RemoveValue(absl::string_view key,
absl::string_view search_value) {
absl::string_view needle = search_value;
RemoveWhitespaceContext(&needle);
QUICHE_BUG_IF(bug_22783_2, needle != search_value)
<< "Search value should not be surrounded by spaces.";
if (needle.empty()) {
return 0;
}
size_t removals = 0;
for (HeaderLines::iterator it =
GetHeaderLinesIterator(key, header_lines_.begin());
it != header_lines_.end(); it = GetHeaderLinesIterator(key, ++it)) {
HeaderLineDescription* line = &(*it);
if (line->ValuesLength() < needle.size()) {
continue;
}
char* buf = GetPtr(line->buffer_base_idx);
char* value_begin = buf + line->value_begin_idx;
absl::string_view values(value_begin, line->ValuesLength());
RemoveWhitespaceContext(&values);
if (values.size() == needle.size()) {
if (values == needle) {
line->skip = true;
removals++;
}
continue;
}
char* insertion = value_begin;
while (values.size() >= needle.size()) {
ssize_t cur_leading_whitespace = RemoveLeadingWhitespace(&values);
bool found = absl::StartsWith(values, needle);
const size_t next_comma =
values.find(',', (found ? needle.size() : 0));
const bool comma_found = next_comma != absl::string_view::npos;
const size_t cur_size = (comma_found ? next_comma + 1 : values.size());
if (found && cur_size != needle.size()) {
absl::string_view cur(values.data(), cur_size);
if (comma_found) {
cur.remove_suffix(1);
}
RemoveTrailingWhitespace(&cur);
found = (cur.size() == needle.size());
}
if (found) {
removals++;
if (!comma_found) {
insertion--;
}
} else {
if (insertion + cur_leading_whitespace != values.data()) {
memmove(insertion, values.data(), cur_size);
insertion += cur_size;
} else {
insertion += cur_leading_whitespace + cur_size;
}
}
values.remove_prefix(cur_size);
}
if (!values.empty()) {
if (insertion != values.data()) {
memmove(insertion, values.data(), values.size());
}
insertion += values.size();
}
if (insertion <= value_begin) {
line->skip = true;
} else {
line->last_char_idx = insertion - buf;
}
}
return removals;
}
size_t BalsaHeaders::GetSizeForWriteBuffer() const {
size_t write_buf_size = whitespace_4_idx_ - non_whitespace_1_idx_ + 2;
const HeaderLines::size_type end = header_lines_.size();
for (HeaderLines::size_type i = 0; i < end; ++i) {
const HeaderLineDescription& line = header_lines_[i];
if (!line.skip) {
write_buf_size += line.key_end_idx - line.first_char_idx + 2;
write_buf_size += line.last_char_idx - line.value_begin_idx + 2;
}
}
return write_buf_size + 2;
}
void BalsaHeaders::DumpToString(std::string* str) const {
DumpToPrefixedString(" ", str);
}
std::string BalsaHeaders::DebugString() const {
std::string s;
DumpToString(&s);
return s;
}
bool BalsaHeaders::ForEachHeader(
quiche::UnretainedCallback<bool(const absl::string_view key,
const absl::string_view value)>
fn) const {
int s = header_lines_.size();
for (int i = 0; i < s; ++i) {
const HeaderLineDescription& desc = header_lines_[i];
if (!desc.skip && desc.KeyLength() > 0) {
const char* stream_begin = GetPtr(desc.buffer_base_idx);
if (!fn(absl::string_view(stream_begin + desc.first_char_idx,
desc.KeyLength()),
absl::string_view(stream_begin + desc.value_begin_idx,
desc.ValuesLength()))) {
return false;
}
}
}
return true;
}
void BalsaHeaders::DumpToPrefixedString(const char* spaces,
std::string* str) const {
const absl::string_view firstline = first_line();
const int buffer_length = GetReadableBytesFromHeaderStream();
if (firstline.empty() && buffer_length == 0) {
absl::StrAppend(str, "\n", spaces, "<empty header>\n");
return;
}
if (!FramerIsDoneWriting()) {
absl::StrAppendFormat(str, "\n%s<incomplete header len: %d>\n%s%.*s\n",
spaces, buffer_length, spaces, buffer_length,
OriginalHeaderStreamBegin());
return;
}
str->reserve(str->size() + GetSizeForWriteBuffer());
absl::StrAppend(str, "\n", spaces, firstline, "\n");
for (const auto& line : lines()) {
absl::StrAppend(str, spaces, line.first, ": ", line.second, "\n");
}
}
void BalsaHeaders::SetContentLength(size_t length) {
if (content_length_status_ == BalsaHeadersEnums::VALID_CONTENT_LENGTH &&
content_length_ == length) {
return;
}
if (content_length_status_ != BalsaHeadersEnums::NO_CONTENT_LENGTH) {
RemoveAllOfHeader(kContentLength);
} else if (transfer_encoding_is_chunked_) {
RemoveAllOfHeader(kTransferEncoding);
}
content_length_status_ = BalsaHeadersEnums::VALID_CONTENT_LENGTH;
content_length_ = length;
AppendHeader(kContentLength, absl::StrCat(length));
}
void BalsaHeaders::SetTransferEncodingToChunkedAndClearContentLength() {
if (transfer_encoding_is_chunked_) {
return;
}
if (content_length_status_ != BalsaHeadersEnums::NO_CONTENT_LENGTH) {
ClearContentLength();
}
ReplaceOrAppendHeader(kTransferEncoding, "chunked");
transfer_encoding_is_chunked_ = true;
}
void BalsaHeaders::SetNoTransferEncoding() {
if (transfer_encoding_is_chunked_) {
RemoveAllOfHeader(kTransferEncoding);
}
}
void BalsaHeaders::ClearContentLength() { RemoveAllOfHeader(kContentLength); }
bool BalsaHeaders::IsEmpty() const {
return balsa_buffer_.GetTotalBytesUsed() == 0;
}
absl::string_view BalsaHeaders::Authority() const { return GetHeader(kHost); }
void BalsaHeaders::ReplaceOrAppendAuthority(absl::string_view value) {
ReplaceOrAppendHeader(kHost, value);
}
void BalsaHeaders::RemoveAuthority() { RemoveAllOfHeader(kHost); }
void BalsaHeaders::ApplyToCookie(
quiche::UnretainedCallback<void(absl::string_view cookie)> f) const {
f(GetHeader(kCookie));
}
void BalsaHeaders::SetResponseFirstline(absl::string_view version,
size_t parsed_response_code,
absl::string_view reason_phrase) {
SetFirstlineFromStringPieces(version, absl::StrCat(parsed_response_code),
reason_phrase);
parsed_response_code_ = parsed_response_code;
}
void BalsaHeaders::SetFirstlineFromStringPieces(absl::string_view firstline_a,
absl::string_view firstline_b,
absl::string_view firstline_c) {
size_t line_size =
(firstline_a.size() + firstline_b.size() + firstline_c.size() + 2);
char* storage = balsa_buffer_.Reserve(line_size, &firstline_buffer_base_idx_);
char* cur_loc = storage;
memcpy(cur_loc, firstline_a.data(), firstline_a.size());
cur_loc += firstline_a.size();
*cur_loc = ' ';
++cur_loc;
memcpy(cur_loc, firstline_b.data(), firstline_b.size());
cur_loc += firstline_b.size();
*cur_loc = ' ';
++cur_loc;
memcpy(cur_loc, firstline_c.data(), firstline_c.size());
whitespace_1_idx_ = storage - BeginningOfFirstLine();
non_whitespace_1_idx_ = whitespace_1_idx_;
whitespace_2_idx_ = non_whitespace_1_idx_ + firstline_a.size();
non_whitespace_2_idx_ = whitespace_2_idx_ + 1;
whitespace_3_idx_ = non_whitespace_2_idx_ + firstline_b.size();
non_whitespace_3_idx_ = whitespace_3_idx_ + 1;
whitespace_4_idx_ = non_whitespace_3_idx_ + firstline_c.size();
}
void BalsaHeaders::SetRequestMethod(absl::string_view method) {
if (method.size() <= (whitespace_2_idx_ - non_whitespace_1_idx_)) {
non_whitespace_1_idx_ = whitespace_2_idx_ - method.size();
if (!method.empty()) {
char* stream_begin = BeginningOfFirstLine();
memcpy(stream_begin + non_whitespace_1_idx_, method.data(),
method.size());
}
} else {
SetRequestFirstlineFromStringPieces(method, request_uri(),
request_version());
}
}
void BalsaHeaders::SetResponseVersion(absl::string_view version) {
SetRequestMethod(version);
}
void BalsaHeaders::SetRequestUri(absl::string_view uri) {
SetRequestFirstlineFromStringPieces(request_method(), uri, request_version());
}
void BalsaHeaders::SetResponseCode(absl::string_view code) {
SetRequestUri(code);
}
void BalsaHeaders::SetParsedResponseCodeAndUpdateFirstline(
size_t parsed_response_code) {
parsed_response_code_ = parsed_response_code;
SetResponseCode(absl::StrCat(parsed_response_code));
}
void BalsaHeaders::SetRequestVersion(absl::string_view version) {
bool fits_in_space_allowed =
version.size() + 1 <= whitespace_4_idx_ - whitespace_3_idx_;
if (!fits_in_space_allowed) {
SetRequestFirstlineFromStringPieces(request_method(), request_uri(),
version);
return;
}
char* stream_begin = BeginningOfFirstLine();
*(stream_begin + whitespace_3_idx_) = ' ';
non_whitespace_3_idx_ = whitespace_3_idx_ + 1;
whitespace_4_idx_ = non_whitespace_3_idx_ + version.size();
memcpy(stream_begin + non_whitespace_3_idx_, version.data(), version.size());
}
void BalsaHeaders::SetResponseReasonPhrase(absl::string_view reason) {
SetRequestVersion(reason);
}
void BalsaHeaders::RemoveLastTokenFromHeaderValue(absl::string_view key) {
BalsaHeaders::HeaderLines::iterator it =
GetHeaderLinesIterator(key, header_lines_.begin());
if (it == header_lines_.end()) {
QUICHE_DLOG(WARNING)
<< "Attempting to remove last token from a non-existent "
<< "header \"" << key << "\"";
return;
}
BalsaHeaders::HeaderLines::iterator header_line;
do {
header_line = it;
it = GetHeaderLinesIterator(key, it + 1);
} while (it != header_lines_.end());
BalsaHeaders::HeaderTokenList tokens;
absl::string_view value(
GetPtr(header_line->buffer_base_idx) + header_line->value_begin_idx,
header_line->last_char_idx - header_line->value_begin_idx);
ParseTokenList(value, &tokens);
if (tokens.empty()) {
QUICHE_DLOG(WARNING)
<< "Attempting to remove a token from an empty header value "
<< "for header \"" << key << "\"";
header_line->skip = true;
} else if (tokens.size() == 1) {
header_line->skip = true;
} else {
absl::string_view new_last_token = tokens[tokens.size() - 2];
const char* last_char_address =
new_last_token.data() + new_last_token.size() - 1;
const char* const stream_begin = GetPtr(header_line->buffer_base_idx);
header_line->last_char_idx = last_char_address - stream_begin + 1;
}
}
bool BalsaHeaders::ResponseCanHaveBody(int response_code) {
if (response_code >= 100 && response_code < 200) {
return false;
}
return (response_code != 204) && (response_code != 304);
}
} | #include "quiche/balsa/balsa_headers.h"
#include <cstring>
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "quiche/balsa/balsa_enums.h"
#include "quiche/balsa/balsa_frame.h"
#include "quiche/balsa/simple_buffer.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using absl::make_unique;
using testing::AnyOf;
using testing::Combine;
using testing::ElementsAre;
using testing::Eq;
using testing::StrEq;
using testing::ValuesIn;
namespace quiche {
namespace test {
class BalsaHeadersTestPeer {
public:
static void WriteFromFramer(BalsaHeaders* headers, const char* ptr,
size_t size) {
headers->WriteFromFramer(ptr, size);
}
};
namespace {
class BalsaBufferTest : public QuicheTest {
public:
void CreateBuffer(size_t blocksize) {
buffer_ = std::make_unique<BalsaBuffer>(blocksize);
}
void CreateBuffer() { buffer_ = std::make_unique<BalsaBuffer>(); }
static std::unique_ptr<BalsaBuffer> CreateUnmanagedBuffer(size_t blocksize) {
return std::make_unique<BalsaBuffer>(blocksize);
}
absl::string_view Write(absl::string_view sp, size_t* block_buffer_idx) {
if (sp.empty()) {
return sp;
}
char* storage = buffer_->Reserve(sp.size(), block_buffer_idx);
memcpy(storage, sp.data(), sp.size());
return absl::string_view(storage, sp.size());
}
protected:
std::unique_ptr<BalsaBuffer> buffer_;
};
using BufferBlock = BalsaBuffer::BufferBlock;
BufferBlock MakeBufferBlock(const std::string& s) {
BufferBlock block{make_unique<char[]>(s.size()), s.size() * 2, s.size()};
std::memcpy(block.buffer.get(), s.data(), s.size());
return block;
}
BalsaHeaders CreateHTTPHeaders(bool request, absl::string_view s) {
BalsaHeaders headers;
BalsaFrame framer;
framer.set_is_request(request);
framer.set_balsa_headers(&headers);
QUICHE_CHECK_EQ(s.size(), framer.ProcessInput(s.data(), s.size()));
QUICHE_CHECK(framer.MessageFullyRead());
return headers;
}
class BufferBlockTest
: public QuicheTestWithParam<std::tuple<const char*, const char*>> {};
TEST_P(BufferBlockTest, CopyFrom) {
const std::string s1 = std::get<0>(GetParam());
const std::string s2 = std::get<1>(GetParam());
BufferBlock block;
block.CopyFrom(MakeBufferBlock(s1));
EXPECT_EQ(s1.size(), block.bytes_free);
ASSERT_EQ(2 * s1.size(), block.buffer_size);
EXPECT_EQ(0, memcmp(s1.data(), block.buffer.get(), s1.size()));
block.CopyFrom(MakeBufferBlock(s2));
EXPECT_EQ(s2.size(), block.bytes_free);
ASSERT_EQ(2 * s2.size(), block.buffer_size);
EXPECT_EQ(0, memcmp(s2.data(), block.buffer.get(), s2.size()));
}
const char* block_strings[] = {"short string", "longer than the other string"};
INSTANTIATE_TEST_SUITE_P(VariousSizes, BufferBlockTest,
Combine(ValuesIn(block_strings),
ValuesIn(block_strings)));
TEST_F(BalsaBufferTest, BlocksizeSet) {
CreateBuffer();
EXPECT_EQ(BalsaBuffer::kDefaultBlocksize, buffer_->blocksize());
CreateBuffer(1024);
EXPECT_EQ(1024u, buffer_->blocksize());
}
TEST_F(BalsaBufferTest, GetMemorySize) {
CreateBuffer(10);
EXPECT_EQ(0u, buffer_->GetTotalBytesUsed());
EXPECT_EQ(0u, buffer_->GetTotalBufferBlockSize());
BalsaBuffer::Blocks::size_type index;
buffer_->Reserve(1024, &index);
EXPECT_EQ(10u + 1024u, buffer_->GetTotalBufferBlockSize());
EXPECT_EQ(1024u, buffer_->GetTotalBytesUsed());
}
TEST_F(BalsaBufferTest, ManyWritesToContiguousBuffer) {
CreateBuffer(0);
std::string data = "0123456789";
for (int i = 0; i < 120 * 1000; ++i) {
buffer_->WriteToContiguousBuffer(data);
}
}
TEST_F(BalsaBufferTest, CopyFrom) {
CreateBuffer(10);
std::unique_ptr<BalsaBuffer> ptr = CreateUnmanagedBuffer(1024);
ASSERT_EQ(1024u, ptr->blocksize());
EXPECT_EQ(0u, ptr->num_blocks());
std::string data1 = "foobarbaz01";
buffer_->WriteToContiguousBuffer(data1);
buffer_->NoMoreWriteToContiguousBuffer();
std::string data2 = "12345";
Write(data2, nullptr);
std::string data3 = "6789";
Write(data3, nullptr);
std::string data4 = "123456789012345";
Write(data3, nullptr);
ptr->CopyFrom(*buffer_);
EXPECT_EQ(ptr->can_write_to_contiguous_buffer(),
buffer_->can_write_to_contiguous_buffer());
ASSERT_EQ(ptr->num_blocks(), buffer_->num_blocks());
for (size_t i = 0; i < buffer_->num_blocks(); ++i) {
ASSERT_EQ(ptr->bytes_used(i), buffer_->bytes_used(i));
ASSERT_EQ(ptr->buffer_size(i), buffer_->buffer_size(i));
EXPECT_EQ(0,
memcmp(ptr->GetPtr(i), buffer_->GetPtr(i), ptr->bytes_used(i)));
}
}
TEST_F(BalsaBufferTest, ClearWorks) {
CreateBuffer(10);
std::string data1 = "foobarbaz01";
buffer_->WriteToContiguousBuffer(data1);
buffer_->NoMoreWriteToContiguousBuffer();
std::string data2 = "12345";
Write(data2, nullptr);
std::string data3 = "6789";
Write(data3, nullptr);
std::string data4 = "123456789012345";
Write(data3, nullptr);
buffer_->Clear();
EXPECT_TRUE(buffer_->can_write_to_contiguous_buffer());
EXPECT_EQ(10u, buffer_->blocksize());
EXPECT_EQ(0u, buffer_->num_blocks());
}
TEST_F(BalsaBufferTest, ClearWorksWhenLargerThanBlocksize) {
CreateBuffer(10);
std::string data1 = "foobarbaz01lkjasdlkjasdlkjasd";
buffer_->WriteToContiguousBuffer(data1);
buffer_->NoMoreWriteToContiguousBuffer();
std::string data2 = "12345";
Write(data2, nullptr);
std::string data3 = "6789";
Write(data3, nullptr);
std::string data4 = "123456789012345";
Write(data3, nullptr);
buffer_->Clear();
EXPECT_TRUE(buffer_->can_write_to_contiguous_buffer());
EXPECT_EQ(10u, buffer_->blocksize());
EXPECT_EQ(0u, buffer_->num_blocks());
}
TEST_F(BalsaBufferTest, ContiguousWriteSmallerThanBlocksize) {
CreateBuffer(1024);
std::string data1 = "foo";
buffer_->WriteToContiguousBuffer(data1);
std::string composite = data1;
const char* buf_ptr = buffer_->GetPtr(0);
ASSERT_LE(composite.size(), buffer_->buffer_size(0));
EXPECT_EQ(0, memcmp(composite.data(), buf_ptr, composite.size()));
std::string data2 = "barbaz";
buffer_->WriteToContiguousBuffer(data2);
composite += data2;
buf_ptr = buffer_->GetPtr(0);
ASSERT_LE(composite.size(), buffer_->buffer_size(0));
EXPECT_EQ(0, memcmp(composite.data(), buf_ptr, composite.size()));
}
TEST_F(BalsaBufferTest, SingleContiguousWriteLargerThanBlocksize) {
CreateBuffer(10);
std::string data1 = "abracadabrawords";
buffer_->WriteToContiguousBuffer(data1);
std::string composite = data1;
const char* buf_ptr = buffer_->GetPtr(0);
ASSERT_LE(data1.size(), buffer_->buffer_size(0));
EXPECT_EQ(0, memcmp(composite.data(), buf_ptr, composite.size()))
<< composite << "\n"
<< absl::string_view(buf_ptr, buffer_->bytes_used(0));
}
TEST_F(BalsaBufferTest, ContiguousWriteLargerThanBlocksize) {
CreateBuffer(10);
std::string data1 = "123456789";
buffer_->WriteToContiguousBuffer(data1);
std::string composite = data1;
ASSERT_LE(10u, buffer_->buffer_size(0));
std::string data2 = "0123456789";
buffer_->WriteToContiguousBuffer(data2);
composite += data2;
const char* buf_ptr = buffer_->GetPtr(0);
ASSERT_LE(composite.size(), buffer_->buffer_size(0));
EXPECT_EQ(0, memcmp(composite.data(), buf_ptr, composite.size()))
<< "composite: " << composite << "\n"
<< " actual: " << absl::string_view(buf_ptr, buffer_->bytes_used(0));
}
TEST_F(BalsaBufferTest, TwoContiguousWritesLargerThanBlocksize) {
CreateBuffer(5);
std::string data1 = "123456";
buffer_->WriteToContiguousBuffer(data1);
std::string composite = data1;
ASSERT_LE(composite.size(), buffer_->buffer_size(0));
std::string data2 = "7890123";
buffer_->WriteToContiguousBuffer(data2);
composite += data2;
const char* buf_ptr = buffer_->GetPtr(0);
ASSERT_LE(composite.size(), buffer_->buffer_size(0));
EXPECT_EQ(0, memcmp(composite.data(), buf_ptr, composite.size()))
<< "composite: " << composite << "\n"
<< " actual: " << absl::string_view(buf_ptr, buffer_->bytes_used(0));
}
TEST_F(BalsaBufferTest, WriteSmallerThanBlocksize) {
CreateBuffer(5);
std::string data1 = "1234";
size_t block_idx = 0;
absl::string_view write_result = Write(data1, &block_idx);
ASSERT_EQ(1u, block_idx);
EXPECT_THAT(write_result, StrEq(data1));
CreateBuffer(5);
data1 = "1234";
block_idx = 0;
write_result = Write(data1, &block_idx);
ASSERT_EQ(1u, block_idx);
EXPECT_THAT(write_result, StrEq(data1));
}
TEST_F(BalsaBufferTest, TwoWritesSmallerThanBlocksizeThenAnotherWrite) {
CreateBuffer(10);
std::string data1 = "12345";
size_t block_idx = 0;
absl::string_view write_result = Write(data1, &block_idx);
ASSERT_EQ(1u, block_idx);
EXPECT_THAT(write_result, StrEq(data1));
std::string data2 = "data2";
block_idx = 0;
write_result = Write(data2, &block_idx);
ASSERT_EQ(1u, block_idx);
EXPECT_THAT(write_result, StrEq(data2));
std::string data3 = "data3";
block_idx = 0;
write_result = Write(data3, &block_idx);
ASSERT_EQ(2u, block_idx);
EXPECT_THAT(write_result, StrEq(data3));
CreateBuffer(10);
buffer_->NoMoreWriteToContiguousBuffer();
data1 = "12345";
block_idx = 0;
write_result = Write(data1, &block_idx);
ASSERT_EQ(0u, block_idx);
EXPECT_THAT(write_result, StrEq(data1));
data2 = "data2";
block_idx = 0;
write_result = Write(data2, &block_idx);
ASSERT_EQ(0u, block_idx);
EXPECT_THAT(write_result, StrEq(data2));
data3 = "data3";
block_idx = 0;
write_result = Write(data3, &block_idx);
ASSERT_EQ(1u, block_idx);
EXPECT_THAT(write_result, StrEq(data3));
}
TEST_F(BalsaBufferTest, WriteLargerThanBlocksize) {
CreateBuffer(5);
std::string data1 = "123456789";
size_t block_idx = 0;
absl::string_view write_result = Write(data1, &block_idx);
ASSERT_EQ(1u, block_idx);
EXPECT_THAT(write_result, StrEq(data1));
CreateBuffer(5);
buffer_->NoMoreWriteToContiguousBuffer();
data1 = "123456789";
block_idx = 0;
write_result = Write(data1, &block_idx);
ASSERT_EQ(1u, block_idx);
EXPECT_THAT(write_result, StrEq(data1));
}
TEST_F(BalsaBufferTest, ContiguousThenTwoSmallerThanBlocksize) {
CreateBuffer(5);
std::string data1 = "1234567890";
buffer_->WriteToContiguousBuffer(data1);
size_t block_idx = 0;
std::string data2 = "1234";
absl::string_view write_result = Write(data2, &block_idx);
ASSERT_EQ(1u, block_idx);
std::string data3 = "1234";
write_result = Write(data3, &block_idx);
ASSERT_EQ(2u, block_idx);
}
TEST_F(BalsaBufferTest, AccessFirstBlockUninitialized) {
CreateBuffer(5);
EXPECT_EQ(0u, buffer_->GetReadableBytesOfFirstBlock());
EXPECT_QUICHE_BUG(buffer_->StartOfFirstBlock(),
"First block not allocated yet!");
EXPECT_QUICHE_BUG(buffer_->EndOfFirstBlock(),
"First block not allocated yet!");
}
TEST_F(BalsaBufferTest, AccessFirstBlockInitialized) {
CreateBuffer(5);
std::string data1 = "1234567890";
buffer_->WriteToContiguousBuffer(data1);
const char* start = buffer_->StartOfFirstBlock();
EXPECT_TRUE(start != nullptr);
const char* end = buffer_->EndOfFirstBlock();
EXPECT_TRUE(end != nullptr);
EXPECT_EQ(data1.length(), static_cast<size_t>(end - start));
EXPECT_EQ(data1.length(), buffer_->GetReadableBytesOfFirstBlock());
}
TEST(BalsaHeaders, CanAssignBeginToIterator) {
{
BalsaHeaders header;
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
static_cast<void>(chli);
}
{
const BalsaHeaders header;
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
static_cast<void>(chli);
}
}
TEST(BalsaHeaders, CanAssignEndToIterator) {
{
BalsaHeaders header;
BalsaHeaders::const_header_lines_iterator chli = header.lines().end();
static_cast<void>(chli);
}
{
const BalsaHeaders header;
BalsaHeaders::const_header_lines_iterator chli = header.lines().end();
static_cast<void>(chli);
}
}
TEST(BalsaHeaders, ReplaceOrAppendHeaderTestAppending) {
BalsaHeaders header;
std::string key_1 = "key_1";
std::string value_1 = "value_1";
header.ReplaceOrAppendHeader(key_1, value_1);
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
ASSERT_EQ(absl::string_view("key_1"), chli->first);
ASSERT_EQ(absl::string_view("value_1"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
}
TEST(BalsaHeaders, ReplaceOrAppendHeaderTestReplacing) {
BalsaHeaders header;
std::string key_1 = "key_1";
std::string value_1 = "value_1";
std::string key_2 = "key_2";
header.ReplaceOrAppendHeader(key_1, value_1);
header.ReplaceOrAppendHeader(key_2, value_1);
std::string value_2 = "value_2_string";
header.ReplaceOrAppendHeader(key_1, value_2);
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
ASSERT_EQ(key_1, chli->first);
ASSERT_EQ(value_2, chli->second);
++chli;
ASSERT_EQ(key_2, chli->first);
ASSERT_EQ(value_1, chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
}
TEST(BalsaHeaders, ReplaceOrAppendHeaderTestReplacingMultiple) {
BalsaHeaders header;
std::string key_1 = "key_1";
std::string key_2 = "key_2";
std::string value_1 = "val_1";
std::string value_2 = "val_2";
std::string value_3 =
"value_3_is_longer_than_value_1_and_value_2_and_their_keys";
header.AppendHeader(key_1, value_1);
header.AppendHeader(key_1, value_2);
header.AppendHeader(key_2, value_1);
header.ReplaceOrAppendHeader(key_1, value_3);
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
ASSERT_EQ(key_1, chli->first);
ASSERT_EQ(value_3, chli->second);
++chli;
ASSERT_EQ(key_2, chli->first);
ASSERT_EQ(value_1, chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
header.ReplaceOrAppendHeader(key_1, value_1);
chli = header.lines().begin();
ASSERT_EQ(key_1, chli->first);
ASSERT_EQ(value_1, chli->second);
++chli;
ASSERT_EQ(key_2, chli->first);
ASSERT_EQ(value_1, chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
}
TEST(BalsaHeaders, AppendHeaderAndIteratorTest1) {
BalsaHeaders header;
ASSERT_EQ(header.lines().begin(), header.lines().end());
{
std::string key_1 = "key_1";
std::string value_1 = "value_1";
header.AppendHeader(key_1, value_1);
key_1 = "garbage";
value_1 = "garbage";
}
ASSERT_NE(header.lines().begin(), header.lines().end());
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
ASSERT_EQ(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_1"), chli->first);
ASSERT_EQ(absl::string_view("value_1"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
}
TEST(BalsaHeaders, AppendHeaderAndIteratorTest2) {
BalsaHeaders header;
ASSERT_EQ(header.lines().begin(), header.lines().end());
{
std::string key_1 = "key_1";
std::string value_1 = "value_1";
header.AppendHeader(key_1, value_1);
key_1 = "garbage";
value_1 = "garbage";
}
{
std::string key_2 = "key_2";
std::string value_2 = "value_2";
header.AppendHeader(key_2, value_2);
key_2 = "garbage";
value_2 = "garbage";
}
ASSERT_NE(header.lines().begin(), header.lines().end());
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
ASSERT_EQ(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_1"), chli->first);
ASSERT_EQ(absl::string_view("value_1"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_2"), chli->first);
ASSERT_EQ(absl::string_view("value_2"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
}
TEST(BalsaHeaders, AppendHeaderAndIteratorTest3) {
BalsaHeaders header;
ASSERT_EQ(header.lines().begin(), header.lines().end());
{
std::string key_1 = "key_1";
std::string value_1 = "value_1";
header.AppendHeader(key_1, value_1);
key_1 = "garbage";
value_1 = "garbage";
}
{
std::string key_2 = "key_2";
std::string value_2 = "value_2";
header.AppendHeader(key_2, value_2);
key_2 = "garbage";
value_2 = "garbage";
}
{
std::string key_3 = "key_3";
std::string value_3 = "value_3";
header.AppendHeader(key_3, value_3);
key_3 = "garbage";
value_3 = "garbage";
}
ASSERT_NE(header.lines().begin(), header.lines().end());
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
ASSERT_EQ(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_1"), chli->first);
ASSERT_EQ(absl::string_view("value_1"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_2"), chli->first);
ASSERT_EQ(absl::string_view("value_2"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_3"), chli->first);
ASSERT_EQ(absl::string_view("value_3"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
}
TEST(BalsaHeaders, AppendHeaderAndTestEraseWithIterator) {
BalsaHeaders header;
ASSERT_EQ(header.lines().begin(), header.lines().end());
{
std::string key_1 = "key_1";
std::string value_1 = "value_1";
header.AppendHeader(key_1, value_1);
key_1 = "garbage";
value_1 = "garbage";
}
{
std::string key_2 = "key_2";
std::string value_2 = "value_2";
header.AppendHeader(key_2, value_2);
key_2 = "garbage";
value_2 = "garbage";
}
{
std::string key_3 = "key_3";
std::string value_3 = "value_3";
header.AppendHeader(key_3, value_3);
key_3 = "garbage";
value_3 = "garbage";
}
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
++chli;
ASSERT_EQ(absl::string_view("key_2"), chli->first);
header.erase(chli);
chli = header.lines().begin();
ASSERT_NE(header.lines().begin(), header.lines().end());
ASSERT_EQ(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_1"), chli->first);
ASSERT_EQ(absl::string_view("value_1"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_NE(header.lines().end(), chli);
ASSERT_EQ(absl::string_view("key_3"), chli->first);
ASSERT_EQ(absl::string_view("value_3"), chli->second);
++chli;
ASSERT_NE(header.lines().begin(), chli);
ASSERT_EQ(header.lines().end(), chli);
}
TEST(BalsaHeaders, TestSetFirstlineInAdditionalBuffer) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
ASSERT_THAT(headers.first_line(), StrEq("GET / HTTP/1.0"));
}
TEST(BalsaHeaders, TestSetFirstlineInOriginalBufferAndIsShorterThanOriginal) {
BalsaHeaders headers = CreateHTTPHeaders(true,
"GET /foobar HTTP/1.0\r\n"
"\r\n");
ASSERT_THAT(headers.first_line(), StrEq("GET /foobar HTTP/1.0"));
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
ASSERT_THAT(headers.first_line(), StrEq("GET / HTTP/1.0"));
}
TEST(BalsaHeaders, TestSetFirstlineInOriginalBufferAndIsLongerThanOriginal) {
BalsaHeaders headers = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"some_key: some_value\r\n"
"another_key: another_value\r\n"
"\r\n");
ASSERT_THAT(headers.first_line(), StrEq("GET / HTTP/1.0"));
headers.erase(headers.lines().begin());
headers.SetRequestFirstlineFromStringPieces("GET", "/foobar", "HTTP/1.0");
ASSERT_THAT(headers.first_line(), StrEq("GET /foobar HTTP/1.0"));
}
TEST(BalsaHeaders, TestSetFirstlineInAdditionalDataAndIsShorterThanOriginal) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/foobar", "HTTP/1.0");
ASSERT_THAT(headers.first_line(), StrEq("GET /foobar HTTP/1.0"));
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
ASSERT_THAT(headers.first_line(), StrEq("GET / HTTP/1.0"));
}
TEST(BalsaHeaders, TestSetFirstlineInAdditionalDataAndIsLongerThanOriginal) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
ASSERT_THAT(headers.first_line(), StrEq("GET / HTTP/1.0"));
headers.SetRequestFirstlineFromStringPieces("GET", "/foobar", "HTTP/1.0");
ASSERT_THAT(headers.first_line(), StrEq("GET /foobar HTTP/1.0"));
}
TEST(BalsaHeaders, TestDeletingSubstring) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key2", "value2");
headers.AppendHeader("key", "value");
headers.AppendHeader("unrelated", "value");
headers.RemoveAllOfHeader("key");
EXPECT_TRUE(headers.HasHeader("key1"));
EXPECT_TRUE(headers.HasHeader("key2"));
EXPECT_TRUE(headers.HasHeader("unrelated"));
EXPECT_FALSE(headers.HasHeader("key"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("key"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("KeY"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("UNREL"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("key3"));
EXPECT_FALSE(headers.GetHeader("key1").empty());
EXPECT_FALSE(headers.GetHeader("KEY1").empty());
EXPECT_FALSE(headers.GetHeader("key2").empty());
EXPECT_FALSE(headers.GetHeader("unrelated").empty());
EXPECT_TRUE(headers.GetHeader("key").empty());
headers.AppendHeader("key", "");
EXPECT_TRUE(headers.HasHeader("key"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("key"));
EXPECT_TRUE(headers.GetHeader("key").empty());
headers.RemoveAllHeadersWithPrefix("key");
EXPECT_FALSE(headers.HasHeader("key1"));
EXPECT_FALSE(headers.HasHeader("key2"));
EXPECT_TRUE(headers.HasHeader("unrelated"));
EXPECT_FALSE(headers.HasHeader("key"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("key"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("key1"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("key2"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("kEy"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("unrelated"));
EXPECT_TRUE(headers.GetHeader("key1").empty());
EXPECT_TRUE(headers.GetHeader("key2").empty());
EXPECT_FALSE(headers.GetHeader("unrelated").empty());
EXPECT_TRUE(headers.GetHeader("key").empty());
}
TEST(BalsaHeaders, TestRemovingValues) {
{
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("hi", "hello");
headers.AppendHeader("key1", "val1");
headers.AppendHeader("key1", "value2");
headers.AppendHeader("key1", "value3");
headers.AppendHeader("key2", "value4");
headers.AppendHeader("unrelated", "value");
EXPECT_EQ(0u, headers.RemoveValue("key1", ""));
EXPECT_EQ(1u, headers.RemoveValue("key1", "value2"));
std::string key1_vals = headers.GetAllOfHeaderAsString("key1");
EXPECT_THAT(key1_vals, StrEq("val1,value3"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "val1"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "value3"));
EXPECT_EQ("value4", headers.GetHeader("key2"));
EXPECT_EQ("hello", headers.GetHeader("hi"));
EXPECT_EQ("value", headers.GetHeader("unrelated"));
EXPECT_FALSE(headers.HeaderHasValue("key1", "value2"));
EXPECT_EQ(1u, headers.RemoveValue("key1", "value3"));
key1_vals = headers.GetAllOfHeaderAsString("key1");
EXPECT_THAT(key1_vals, StrEq("val1"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "val1"));
EXPECT_EQ("value4", headers.GetHeader("key2"));
EXPECT_EQ("hello", headers.GetHeader("hi"));
EXPECT_EQ("value", headers.GetHeader("unrelated"));
EXPECT_FALSE(headers.HeaderHasValue("key1", "value3"));
EXPECT_FALSE(headers.HeaderHasValue("key1", "value2"));
}
{
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key1", "value2, value3,value2");
headers.AppendHeader("key1", "value4 ,value2,value5,val6");
headers.AppendHeader("key1", "value2, value2 , value2");
headers.AppendHeader("key1", " value2 , value2 ");
headers.AppendHeader("key1", " value2 a");
headers.AppendHeader("key1", "");
headers.AppendHeader("key1", ", ,,");
headers.AppendHeader("unrelated", "value");
EXPECT_EQ(8u, headers.RemoveValue("key1", "value2"));
std::string key1_vals = headers.GetAllOfHeaderAsString("key1");
EXPECT_THAT(key1_vals,
StrEq("value1,value3,value4 ,value5,val6,value2 a,,, ,,"));
EXPECT_EQ("value", headers.GetHeader("unrelated"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "value1"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "value3"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "value4"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "value5"));
EXPECT_TRUE(headers.HeaderHasValue("key1", "val6"));
EXPECT_FALSE(headers.HeaderHasValue("key1", "value2"));
}
{
const absl::string_view key("key");
const absl::string_view value1("foo\0bar", 7);
const absl::string_view value2("value2");
const std::string value = absl::StrCat(value1, ",", value2);
{
BalsaHeaders headers;
headers.AppendHeader(key, value);
EXPECT_TRUE(headers.HeaderHasValue(key, value1));
EXPECT_TRUE(headers.HeaderHasValue(key, value2));
EXPECT_EQ(value, headers.GetAllOfHeaderAsString(key));
EXPECT_EQ(1u, headers.RemoveValue(key, value2));
EXPECT_TRUE(headers.HeaderHasValue(key, value1));
EXPECT_FALSE(headers.HeaderHasValue(key, value2));
EXPECT_EQ(value1, headers.GetAllOfHeaderAsString(key));
}
{
BalsaHeaders headers;
headers.AppendHeader(key, value1);
headers.AppendHeader(key, value2);
EXPECT_TRUE(headers.HeaderHasValue(key, value1));
EXPECT_TRUE(headers.HeaderHasValue(key, value2));
EXPECT_EQ(value, headers.GetAllOfHeaderAsString(key));
EXPECT_EQ(1u, headers.RemoveValue(key, value2));
EXPECT_TRUE(headers.HeaderHasValue(key, value1));
EXPECT_FALSE(headers.HeaderHasValue(key, value2));
EXPECT_EQ(value1, headers.GetAllOfHeaderAsString(key));
}
}
}
TEST(BalsaHeaders, ZeroAppendToHeaderWithCommaAndSpace) {
BalsaHeaders headers = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"\r\n");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "1.1.1.1");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "2.2.2.2");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "3.3.3.3");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "4.4.4.4");
EXPECT_THAT(headers.GetAllOfHeader("X-Forwarded-For"),
ElementsAre("1.1.1.1, 2.2.2.2, 3.3.3.3, 4.4.4.4"));
}
TEST(BalsaHeaders, SingleAppendToHeaderWithCommaAndSpace) {
BalsaHeaders headers = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"X-Forwarded-For: 1.1.1.1\r\n"
"\r\n");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "2.2.2.2");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "3.3.3.3");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "4.4.4.4");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "5.5.5.5");
EXPECT_THAT(headers.GetAllOfHeader("X-Forwarded-For"),
ElementsAre("1.1.1.1, 2.2.2.2, 3.3.3.3, 4.4.4.4, 5.5.5.5"));
}
TEST(BalsaHeaders, MultipleAppendToHeaderWithCommaAndSpace) {
BalsaHeaders headers = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"X-Forwarded-For: 1.1.1.1\r\n"
"X-Forwarded-For: 2.2.2.2\r\n"
"\r\n");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "3.3.3.3");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "4.4.4.4");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "5.5.5.5");
headers.AppendToHeaderWithCommaAndSpace("X-Forwarded-For", "6.6.6.6");
EXPECT_THAT(
headers.GetAllOfHeader("X-Forwarded-For"),
ElementsAre("1.1.1.1", "2.2.2.2, 3.3.3.3, 4.4.4.4, 5.5.5.5, 6.6.6.6"));
}
TEST(BalsaHeaders, HeaderHasValues) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key", "val1,val2val2,val2,val3");
headers.AppendHeader("key", "val4val5val6");
headers.AppendHeader("key", "val11 val12");
headers.AppendHeader("key", "v val13");
headers.AppendHeader("key", "val7");
headers.AppendHeader("key", "");
headers.AppendHeader("key", "val8 , val9 , val10");
headers.AppendHeader("key", " val14 ");
headers.AppendHeader("key2", "val15");
headers.AppendHeader("key", "Val16");
headers.AppendHeader("key", "foo, Val17, bar");
EXPECT_TRUE(headers.HeaderHasValue("key", "val1"));
EXPECT_TRUE(headers.HeaderHasValue("key", "val2"));
EXPECT_TRUE(headers.HeaderHasValue("key", "val3"));
EXPECT_TRUE(headers.HeaderHasValue("key", "val7"));
EXPECT_TRUE(headers.HeaderHasValue("key", "val8"));
EXPECT_TRUE(headers.HeaderHasValue("key", "val9"));
EXPECT_TRUE(headers.HeaderHasValue("key", "val10"));
EXPECT_TRUE(headers.HeaderHasValue("key", "val14"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val4"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val5"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val6"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val11"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val12"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val13"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val15"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val16"));
EXPECT_FALSE(headers.HeaderHasValue("key", "val17"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val1"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val2"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val3"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val7"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val8"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val9"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val10"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val14"));
EXPECT_FALSE(headers.HeaderHasValueIgnoreCase("key", "val4"));
EXPECT_FALSE(headers.HeaderHasValueIgnoreCase("key", "val5"));
EXPECT_FALSE(headers.HeaderHasValueIgnoreCase("key", "val6"));
EXPECT_FALSE(headers.HeaderHasValueIgnoreCase("key", "val11"));
EXPECT_FALSE(headers.HeaderHasValueIgnoreCase("key", "val12"));
EXPECT_FALSE(headers.HeaderHasValueIgnoreCase("key", "val13"));
EXPECT_FALSE(headers.HeaderHasValueIgnoreCase("key", "val15"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val16"));
EXPECT_TRUE(headers.HeaderHasValueIgnoreCase("key", "val17"));
}
TEST(BalsaHeaders, TestNotDeletingBeyondString) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.RemoveAllHeadersWithPrefix("key1: value1");
EXPECT_NE(headers.lines().begin(), headers.lines().end());
}
TEST(BalsaHeaders, TestIteratingOverErasedHeaders) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key2", "value2");
headers.AppendHeader("key3", "value3");
headers.AppendHeader("key4", "value4");
headers.AppendHeader("key5", "value5");
headers.AppendHeader("key6", "value6");
headers.RemoveAllOfHeader("key6");
headers.RemoveAllOfHeader("key5");
headers.RemoveAllOfHeader("key4");
BalsaHeaders::const_header_lines_iterator chli = headers.lines().begin();
EXPECT_NE(headers.lines().end(), chli);
EXPECT_EQ(headers.lines().begin(), chli);
EXPECT_THAT(chli->first, StrEq("key1"));
EXPECT_THAT(chli->second, StrEq("value1"));
++chli;
EXPECT_NE(headers.lines().end(), chli);
EXPECT_NE(headers.lines().begin(), chli);
EXPECT_THAT(chli->first, StrEq("key2"));
EXPECT_THAT(chli->second, StrEq("value2"));
++chli;
EXPECT_NE(headers.lines().end(), chli);
EXPECT_NE(headers.lines().begin(), chli);
EXPECT_THAT(chli->first, StrEq("key3"));
EXPECT_THAT(chli->second, StrEq("value3"));
++chli;
EXPECT_EQ(headers.lines().end(), chli);
EXPECT_NE(headers.lines().begin(), chli);
headers.RemoveAllOfHeader("key1");
headers.RemoveAllOfHeader("key2");
chli = headers.lines().begin();
EXPECT_THAT(chli->first, StrEq("key3"));
EXPECT_THAT(chli->second, StrEq("value3"));
EXPECT_NE(headers.lines().end(), chli);
EXPECT_EQ(headers.lines().begin(), chli);
++chli;
EXPECT_EQ(headers.lines().end(), chli);
EXPECT_NE(headers.lines().begin(), chli);
}
TEST(BalsaHeaders, CanCompareIterators) {
BalsaHeaders header;
ASSERT_EQ(header.lines().begin(), header.lines().end());
{
std::string key_1 = "key_1";
std::string value_1 = "value_1";
header.AppendHeader(key_1, value_1);
key_1 = "garbage";
value_1 = "garbage";
}
{
std::string key_2 = "key_2";
std::string value_2 = "value_2";
header.AppendHeader(key_2, value_2);
key_2 = "garbage";
value_2 = "garbage";
}
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
BalsaHeaders::const_header_lines_iterator chlj = header.lines().begin();
EXPECT_EQ(chli, chlj);
++chlj;
EXPECT_NE(chli, chlj);
EXPECT_LT(chli, chlj);
EXPECT_LE(chli, chlj);
EXPECT_LE(chli, chli);
EXPECT_GT(chlj, chli);
EXPECT_GE(chlj, chli);
EXPECT_GE(chlj, chlj);
}
TEST(BalsaHeaders, AppendHeaderAndTestThatYouCanEraseEverything) {
BalsaHeaders header;
ASSERT_EQ(header.lines().begin(), header.lines().end());
{
std::string key_1 = "key_1";
std::string value_1 = "value_1";
header.AppendHeader(key_1, value_1);
key_1 = "garbage";
value_1 = "garbage";
}
{
std::string key_2 = "key_2";
std::string value_2 = "value_2";
header.AppendHeader(key_2, value_2);
key_2 = "garbage";
value_2 = "garbage";
}
{
std::string key_3 = "key_3";
std::string value_3 = "value_3";
header.AppendHeader(key_3, value_3);
key_3 = "garbage";
value_3 = "garbage";
}
EXPECT_NE(header.lines().begin(), header.lines().end());
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
while (chli != header.lines().end()) {
header.erase(chli);
chli = header.lines().begin();
}
ASSERT_EQ(header.lines().begin(), header.lines().end());
}
TEST(BalsaHeaders, GetHeaderPositionWorksAsExpectedWithNoHeaderLines) {
BalsaHeaders header;
BalsaHeaders::const_header_lines_iterator i = header.GetHeaderPosition("foo");
EXPECT_EQ(i, header.lines().end());
}
TEST(BalsaHeaders, GetHeaderPositionWorksAsExpectedWithBalsaFrameProcessInput) {
BalsaHeaders headers = CreateHTTPHeaders(
true,
"GET / HTTP/1.0\r\n"
"key1: value_1\r\n"
"key1: value_foo\r\n"
"key2: value_2\r\n"
"key3: value_3\r\n"
"a: value_a\r\n"
"b: value_b\r\n"
"\r\n");
BalsaHeaders::const_header_lines_iterator header_position_b =
headers.GetHeaderPosition("b");
ASSERT_NE(header_position_b, headers.lines().end());
absl::string_view header_key_b_value = header_position_b->second;
ASSERT_FALSE(header_key_b_value.empty());
EXPECT_EQ(std::string("value_b"), header_key_b_value);
BalsaHeaders::const_header_lines_iterator header_position_1 =
headers.GetHeaderPosition("key1");
ASSERT_NE(header_position_1, headers.lines().end());
absl::string_view header_key_1_value = header_position_1->second;
ASSERT_FALSE(header_key_1_value.empty());
EXPECT_EQ(std::string("value_1"), header_key_1_value);
BalsaHeaders::const_header_lines_iterator header_position_3 =
headers.GetHeaderPosition("key3");
ASSERT_NE(header_position_3, headers.lines().end());
absl::string_view header_key_3_value = header_position_3->second;
ASSERT_FALSE(header_key_3_value.empty());
EXPECT_EQ(std::string("value_3"), header_key_3_value);
BalsaHeaders::const_header_lines_iterator header_position_2 =
headers.GetHeaderPosition("key2");
ASSERT_NE(header_position_2, headers.lines().end());
absl::string_view header_key_2_value = header_position_2->second;
ASSERT_FALSE(header_key_2_value.empty());
EXPECT_EQ(std::string("value_2"), header_key_2_value);
BalsaHeaders::const_header_lines_iterator header_position_a =
headers.GetHeaderPosition("a");
ASSERT_NE(header_position_a, headers.lines().end());
absl::string_view header_key_a_value = header_position_a->second;
ASSERT_FALSE(header_key_a_value.empty());
EXPECT_EQ(std::string("value_a"), header_key_a_value);
}
TEST(BalsaHeaders, GetHeaderWorksAsExpectedWithNoHeaderLines) {
BalsaHeaders header;
absl::string_view value = header.GetHeader("foo");
EXPECT_TRUE(value.empty());
value = header.GetHeader("");
EXPECT_TRUE(value.empty());
}
TEST(BalsaHeaders, HasHeaderWorksAsExpectedWithNoHeaderLines) {
BalsaHeaders header;
EXPECT_FALSE(header.HasHeader("foo"));
EXPECT_FALSE(header.HasHeader(""));
EXPECT_FALSE(header.HasHeadersWithPrefix("foo"));
EXPECT_FALSE(header.HasHeadersWithPrefix(""));
}
TEST(BalsaHeaders, HasHeaderWorksAsExpectedWithBalsaFrameProcessInput) {
BalsaHeaders headers = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"key1: value_1\r\n"
"key1: value_foo\r\n"
"key2:\r\n"
"\r\n");
EXPECT_FALSE(headers.HasHeader("foo"));
EXPECT_TRUE(headers.HasHeader("key1"));
EXPECT_TRUE(headers.HasHeader("key2"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("foo"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("key"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("KEY"));
}
TEST(BalsaHeaders, GetHeaderWorksAsExpectedWithBalsaFrameProcessInput) {
BalsaHeaders headers = CreateHTTPHeaders(
true,
"GET / HTTP/1.0\r\n"
"key1: value_1\r\n"
"key1: value_foo\r\n"
"key2: value_2\r\n"
"key3: value_3\r\n"
"key4:\r\n"
"a: value_a\r\n"
"b: value_b\r\n"
"\r\n");
absl::string_view header_key_b_value = headers.GetHeader("b");
ASSERT_FALSE(header_key_b_value.empty());
EXPECT_EQ(std::string("value_b"), header_key_b_value);
absl::string_view header_key_1_value = headers.GetHeader("key1");
ASSERT_FALSE(header_key_1_value.empty());
EXPECT_EQ(std::string("value_1"), header_key_1_value);
absl::string_view header_key_3_value = headers.GetHeader("key3");
ASSERT_FALSE(header_key_3_value.empty());
EXPECT_EQ(std::string("value_3"), header_key_3_value);
absl::string_view header_key_2_value = headers.GetHeader("key2");
ASSERT_FALSE(header_key_2_value.empty());
EXPECT_EQ(std::string("value_2"), header_key_2_value);
absl::string_view header_key_a_value = headers.GetHeader("a");
ASSERT_FALSE(header_key_a_value.empty());
EXPECT_EQ(std::string("value_a"), header_key_a_value);
EXPECT_TRUE(headers.GetHeader("key4").empty());
}
TEST(BalsaHeaders, GetHeaderWorksAsExpectedWithAppendHeader) {
BalsaHeaders header;
header.AppendHeader("key1", "value_1");
header.AppendHeader("key1", "value_2");
header.AppendHeader("key2", "value_2");
header.AppendHeader("key3", "value_3");
header.AppendHeader("a", "value_a");
header.AppendHeader("b", "value_b");
absl::string_view header_key_b_value = header.GetHeader("b");
absl::string_view header_key_1_value = header.GetHeader("key1");
absl::string_view header_key_3_value = header.GetHeader("key3");
absl::string_view header_key_2_value = header.GetHeader("key2");
absl::string_view header_key_a_value = header.GetHeader("a");
ASSERT_FALSE(header_key_1_value.empty());
ASSERT_FALSE(header_key_2_value.empty());
ASSERT_FALSE(header_key_3_value.empty());
ASSERT_FALSE(header_key_a_value.empty());
ASSERT_FALSE(header_key_b_value.empty());
EXPECT_TRUE(header.HasHeader("key1"));
EXPECT_TRUE(header.HasHeader("key2"));
EXPECT_TRUE(header.HasHeader("key3"));
EXPECT_TRUE(header.HasHeader("a"));
EXPECT_TRUE(header.HasHeader("b"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key1"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key2"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key3"));
EXPECT_TRUE(header.HasHeadersWithPrefix("a"));
EXPECT_TRUE(header.HasHeadersWithPrefix("b"));
EXPECT_EQ(std::string("value_1"), header_key_1_value);
EXPECT_EQ(std::string("value_2"), header_key_2_value);
EXPECT_EQ(std::string("value_3"), header_key_3_value);
EXPECT_EQ(std::string("value_a"), header_key_a_value);
EXPECT_EQ(std::string("value_b"), header_key_b_value);
}
TEST(BalsaHeaders, HasHeaderWorksAsExpectedWithAppendHeader) {
BalsaHeaders header;
ASSERT_FALSE(header.HasHeader("key1"));
EXPECT_FALSE(header.HasHeadersWithPrefix("K"));
EXPECT_FALSE(header.HasHeadersWithPrefix("ke"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key1"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key2"));
header.AppendHeader("key1", "value_1");
EXPECT_TRUE(header.HasHeader("key1"));
EXPECT_TRUE(header.HasHeadersWithPrefix("K"));
EXPECT_TRUE(header.HasHeadersWithPrefix("ke"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key1"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key2"));
header.AppendHeader("key1", "value_2");
EXPECT_TRUE(header.HasHeader("key1"));
EXPECT_FALSE(header.HasHeader("key2"));
EXPECT_TRUE(header.HasHeadersWithPrefix("k"));
EXPECT_TRUE(header.HasHeadersWithPrefix("ke"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key1"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key2"));
}
TEST(BalsaHeaders, GetHeaderWorksAsExpectedWithHeadersErased) {
BalsaHeaders header;
header.AppendHeader("key1", "value_1");
header.AppendHeader("key1", "value_2");
header.AppendHeader("key2", "value_2");
header.AppendHeader("key3", "value_3");
header.AppendHeader("a", "value_a");
header.AppendHeader("b", "value_b");
header.erase(header.GetHeaderPosition("key2"));
absl::string_view header_key_b_value = header.GetHeader("b");
absl::string_view header_key_1_value = header.GetHeader("key1");
absl::string_view header_key_3_value = header.GetHeader("key3");
absl::string_view header_key_2_value = header.GetHeader("key2");
absl::string_view header_key_a_value = header.GetHeader("a");
ASSERT_FALSE(header_key_1_value.empty());
ASSERT_TRUE(header_key_2_value.empty());
ASSERT_FALSE(header_key_3_value.empty());
ASSERT_FALSE(header_key_a_value.empty());
ASSERT_FALSE(header_key_b_value.empty());
EXPECT_EQ(std::string("value_1"), header_key_1_value);
EXPECT_EQ(std::string("value_3"), header_key_3_value);
EXPECT_EQ(std::string("value_a"), header_key_a_value);
EXPECT_EQ(std::string("value_b"), header_key_b_value);
header.erase(header.GetHeaderPosition("key1"));
header_key_1_value = header.GetHeader("key1");
ASSERT_FALSE(header_key_1_value.empty());
EXPECT_EQ(std::string("value_2"), header_key_1_value);
header.erase(header.GetHeaderPosition("key1"));
ASSERT_TRUE(header.GetHeader("key1").empty());
}
TEST(BalsaHeaders, HasHeaderWorksAsExpectedWithHeadersErased) {
BalsaHeaders header;
header.AppendHeader("key1", "value_1");
header.AppendHeader("key2", "value_2a");
header.AppendHeader("key2", "value_2b");
ASSERT_TRUE(header.HasHeader("key1"));
ASSERT_TRUE(header.HasHeadersWithPrefix("key1"));
ASSERT_TRUE(header.HasHeadersWithPrefix("key2"));
ASSERT_TRUE(header.HasHeadersWithPrefix("kEY"));
header.erase(header.GetHeaderPosition("key1"));
EXPECT_FALSE(header.HasHeader("key1"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key1"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key2"));
EXPECT_TRUE(header.HasHeadersWithPrefix("kEY"));
ASSERT_TRUE(header.HasHeader("key2"));
header.erase(header.GetHeaderPosition("key2"));
ASSERT_TRUE(header.HasHeader("key2"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key1"));
EXPECT_TRUE(header.HasHeadersWithPrefix("key2"));
EXPECT_TRUE(header.HasHeadersWithPrefix("kEY"));
header.erase(header.GetHeaderPosition("key2"));
EXPECT_FALSE(header.HasHeader("key2"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key1"));
EXPECT_FALSE(header.HasHeadersWithPrefix("key2"));
EXPECT_FALSE(header.HasHeadersWithPrefix("kEY"));
}
TEST(BalsaHeaders, HasNonEmptyHeaderWorksAsExpectedWithNoHeaderLines) {
BalsaHeaders header;
EXPECT_FALSE(header.HasNonEmptyHeader("foo"));
EXPECT_FALSE(header.HasNonEmptyHeader(""));
}
TEST(BalsaHeaders, HasNonEmptyHeaderWorksAsExpectedWithAppendHeader) {
BalsaHeaders header;
EXPECT_FALSE(header.HasNonEmptyHeader("key1"));
header.AppendHeader("key1", "");
EXPECT_FALSE(header.HasNonEmptyHeader("key1"));
header.AppendHeader("key1", "value_2");
EXPECT_TRUE(header.HasNonEmptyHeader("key1"));
EXPECT_FALSE(header.HasNonEmptyHeader("key2"));
}
TEST(BalsaHeaders, HasNonEmptyHeaderWorksAsExpectedWithHeadersErased) {
BalsaHeaders header;
header.AppendHeader("key1", "value_1");
header.AppendHeader("key2", "value_2a");
header.AppendHeader("key2", "");
EXPECT_TRUE(header.HasNonEmptyHeader("key1"));
header.erase(header.GetHeaderPosition("key1"));
EXPECT_FALSE(header.HasNonEmptyHeader("key1"));
EXPECT_TRUE(header.HasNonEmptyHeader("key2"));
header.erase(header.GetHeaderPosition("key2"));
EXPECT_FALSE(header.HasNonEmptyHeader("key2"));
header.erase(header.GetHeaderPosition("key2"));
EXPECT_FALSE(header.HasNonEmptyHeader("key2"));
}
TEST(BalsaHeaders, HasNonEmptyHeaderWorksAsExpectedWithBalsaFrameProcessInput) {
BalsaHeaders headers = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"key1: value_1\r\n"
"key2:\r\n"
"key3:\r\n"
"key3: value_3\r\n"
"key4:\r\n"
"key4:\r\n"
"key5: value_5\r\n"
"key5:\r\n"
"\r\n");
EXPECT_FALSE(headers.HasNonEmptyHeader("foo"));
EXPECT_TRUE(headers.HasNonEmptyHeader("key1"));
EXPECT_FALSE(headers.HasNonEmptyHeader("key2"));
EXPECT_TRUE(headers.HasNonEmptyHeader("key3"));
EXPECT_FALSE(headers.HasNonEmptyHeader("key4"));
EXPECT_TRUE(headers.HasNonEmptyHeader("key5"));
headers.erase(headers.GetHeaderPosition("key5"));
EXPECT_FALSE(headers.HasNonEmptyHeader("key5"));
}
TEST(BalsaHeaders, GetAllOfHeader) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("Key", "value_2,value_3");
header.AppendHeader("key", "");
header.AppendHeader("KEY", "value_4");
std::vector<absl::string_view> result;
header.GetAllOfHeader("key", &result);
ASSERT_EQ(4u, result.size());
EXPECT_EQ("value_1", result[0]);
EXPECT_EQ("value_2,value_3", result[1]);
EXPECT_EQ("", result[2]);
EXPECT_EQ("value_4", result[3]);
EXPECT_EQ(header.GetAllOfHeader("key"), result);
}
TEST(BalsaHeaders, GetAllOfHeaderDoesWhatItSays) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
header.AppendHeader("key", "");
header.AppendHeader("key", "value_1");
ASSERT_NE(header.lines().begin(), header.lines().end());
std::vector<absl::string_view> out;
header.GetAllOfHeader("key", &out);
ASSERT_EQ(4u, out.size());
EXPECT_EQ("value_1", out[0]);
EXPECT_EQ("value_2", out[1]);
EXPECT_EQ("", out[2]);
EXPECT_EQ("value_1", out[3]);
EXPECT_EQ(header.GetAllOfHeader("key"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderWithPrefix) {
BalsaHeaders header;
header.AppendHeader("foo-Foo", "value_1");
header.AppendHeader("Foo-bar", "value_2,value_3");
header.AppendHeader("foo-Foo", "");
header.AppendHeader("bar", "value_not");
header.AppendHeader("fOO-fOO", "value_4");
std::vector<std::pair<absl::string_view, absl::string_view>> result;
header.GetAllOfHeaderWithPrefix("abc", &result);
ASSERT_EQ(0u, result.size());
header.GetAllOfHeaderWithPrefix("foo", &result);
ASSERT_EQ(4u, result.size());
EXPECT_EQ("foo-Foo", result[0].first);
EXPECT_EQ("value_1", result[0].second);
EXPECT_EQ("Foo-bar", result[1].first);
EXPECT_EQ("value_2,value_3", result[1].second);
EXPECT_EQ("", result[2].second);
EXPECT_EQ("value_4", result[3].second);
std::vector<std::pair<absl::string_view, absl::string_view>> result2;
header.GetAllOfHeaderWithPrefix("FoO", &result2);
ASSERT_EQ(4u, result2.size());
}
TEST(BalsaHeaders, GetAllHeadersWithLimit) {
BalsaHeaders header;
header.AppendHeader("foo-Foo", "value_1");
header.AppendHeader("Foo-bar", "value_2,value_3");
header.AppendHeader("foo-Foo", "");
header.AppendHeader("bar", "value_4");
header.AppendHeader("fOO-fOO", "value_5");
std::vector<std::pair<absl::string_view, absl::string_view>> result;
header.GetAllHeadersWithLimit(&result, 4);
ASSERT_EQ(4u, result.size());
EXPECT_EQ("foo-Foo", result[0].first);
EXPECT_EQ("value_1", result[0].second);
EXPECT_EQ("Foo-bar", result[1].first);
EXPECT_EQ("value_2,value_3", result[1].second);
EXPECT_EQ("", result[2].second);
EXPECT_EQ("value_4", result[3].second);
std::vector<std::pair<absl::string_view, absl::string_view>> result2;
header.GetAllHeadersWithLimit(&result2, -1);
ASSERT_EQ(5u, result2.size());
}
TEST(BalsaHeaders, RangeFor) {
BalsaHeaders header;
header.AppendHeader("key1", "value_1a");
header.AppendHeader("key1", "value_1b");
header.AppendHeader("key2", "");
header.AppendHeader("key3", "value_3");
std::vector<std::pair<absl::string_view, absl::string_view>> out;
for (const auto& line : header.lines()) {
out.push_back(line);
}
const std::vector<std::pair<absl::string_view, absl::string_view>> expected =
{{"key1", "value_1a"},
{"key1", "value_1b"},
{"key2", ""},
{"key3", "value_3"}};
EXPECT_EQ(expected, out);
}
TEST(BalsaHeaders, GetAllOfHeaderWithNonExistentKey) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key_non_existent", &out);
ASSERT_EQ(0u, out.size());
EXPECT_EQ(header.GetAllOfHeader("key_non_existent"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderEmptyValVariation1) {
BalsaHeaders header;
header.AppendHeader("key", "");
header.AppendHeader("key", "");
header.AppendHeader("key", "v1");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key", &out);
ASSERT_EQ(3u, out.size());
EXPECT_EQ("", out[0]);
EXPECT_EQ("", out[1]);
EXPECT_EQ("v1", out[2]);
EXPECT_EQ(header.GetAllOfHeader("key"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderEmptyValVariation2) {
BalsaHeaders header;
header.AppendHeader("key", "");
header.AppendHeader("key", "v1");
header.AppendHeader("key", "");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key", &out);
ASSERT_EQ(3u, out.size());
EXPECT_EQ("", out[0]);
EXPECT_EQ("v1", out[1]);
EXPECT_EQ("", out[2]);
EXPECT_EQ(header.GetAllOfHeader("key"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderEmptyValVariation3) {
BalsaHeaders header;
header.AppendHeader("key", "");
header.AppendHeader("key", "v1");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key", &out);
ASSERT_EQ(2u, out.size());
EXPECT_EQ("", out[0]);
EXPECT_EQ("v1", out[1]);
EXPECT_EQ(header.GetAllOfHeader("key"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderEmptyValVariation4) {
BalsaHeaders header;
header.AppendHeader("key", "v1");
header.AppendHeader("key", "");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key", &out);
ASSERT_EQ(2u, out.size());
EXPECT_EQ("v1", out[0]);
EXPECT_EQ("", out[1]);
EXPECT_EQ(header.GetAllOfHeader("key"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderWithAppendHeaders) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key_new", &out);
ASSERT_EQ(0u, out.size());
EXPECT_EQ(header.GetAllOfHeader("key_new"), out);
header.AppendHeader("key_new", "value_3");
header.GetAllOfHeader("key_new", &out);
ASSERT_EQ(1u, out.size());
EXPECT_EQ("value_3", out[0]);
EXPECT_EQ(header.GetAllOfHeader("key_new"), out);
header.GetAllOfHeader("key", &out);
ASSERT_EQ(3u, out.size());
EXPECT_EQ("value_1", out[1]);
EXPECT_EQ("value_2", out[2]);
EXPECT_THAT(header.GetAllOfHeader("key"), ElementsAre("value_1", "value_2"));
}
TEST(BalsaHeaders, GetAllOfHeaderWithRemoveHeaders) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
header.AppendHeader("a", "va");
header.RemoveAllOfHeader("key");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key", &out);
ASSERT_EQ(0u, out.size());
EXPECT_EQ(header.GetAllOfHeader("key"), out);
header.GetAllOfHeader("a", &out);
ASSERT_EQ(1u, out.size());
EXPECT_EQ(header.GetAllOfHeader("a"), out);
out.clear();
header.RemoveAllOfHeader("a");
header.GetAllOfHeader("a", &out);
ASSERT_EQ(0u, out.size());
EXPECT_EQ(header.GetAllOfHeader("a"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderWithRemoveNonExistentHeaders) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("Accept-Encoding", "deflate,compress");
EXPECT_EQ(0u, headers.RemoveValue("Accept-Encoding", "gzip(gfe)"));
std::string accept_encoding_vals =
headers.GetAllOfHeaderAsString("Accept-Encoding");
EXPECT_EQ("deflate,compress", accept_encoding_vals);
}
TEST(BalsaHeaders, GetAllOfHeaderWithEraseHeaders) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
header.AppendHeader("a", "va");
std::vector<absl::string_view> out;
header.erase(header.GetHeaderPosition("key"));
header.GetAllOfHeader("key", &out);
ASSERT_EQ(1u, out.size());
EXPECT_EQ("value_2", out[0]);
EXPECT_EQ(header.GetAllOfHeader("key"), out);
out.clear();
header.erase(header.GetHeaderPosition("key"));
header.GetAllOfHeader("key", &out);
ASSERT_EQ(0u, out.size());
EXPECT_EQ(header.GetAllOfHeader("key"), out);
out.clear();
header.GetAllOfHeader("a", &out);
ASSERT_EQ(1u, out.size());
EXPECT_EQ(header.GetAllOfHeader("a"), out);
out.clear();
header.erase(header.GetHeaderPosition("a"));
header.GetAllOfHeader("a", &out);
ASSERT_EQ(0u, out.size());
EXPECT_EQ(header.GetAllOfHeader("key"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderWithNoHeaderLines) {
BalsaHeaders header;
std::vector<absl::string_view> out;
header.GetAllOfHeader("key", &out);
EXPECT_EQ(0u, out.size());
EXPECT_EQ(header.GetAllOfHeader("key"), out);
}
TEST(BalsaHeaders, GetAllOfHeaderDoesWhatItSaysForVariousKeys) {
BalsaHeaders header;
header.AppendHeader("key1", "value_11");
header.AppendHeader("key2", "value_21");
header.AppendHeader("key1", "value_12");
header.AppendHeader("key2", "value_22");
std::vector<absl::string_view> out;
header.GetAllOfHeader("key1", &out);
EXPECT_EQ("value_11", out[0]);
EXPECT_EQ("value_12", out[1]);
EXPECT_EQ(header.GetAllOfHeader("key1"), out);
header.GetAllOfHeader("key2", &out);
EXPECT_EQ("value_21", out[2]);
EXPECT_EQ("value_22", out[3]);
EXPECT_THAT(header.GetAllOfHeader("key2"),
ElementsAre("value_21", "value_22"));
}
TEST(BalsaHeaders, GetAllOfHeaderWithBalsaFrameProcessInput) {
BalsaHeaders header = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"key1: value_1\r\n"
"key1: value_foo\r\n"
"key2: value_2\r\n"
"a: value_a\r\n"
"key2: \r\n"
"b: value_b\r\n"
"\r\n");
std::vector<absl::string_view> out;
int index = 0;
header.GetAllOfHeader("key1", &out);
EXPECT_EQ("value_1", out[index++]);
EXPECT_EQ("value_foo", out[index++]);
EXPECT_EQ(header.GetAllOfHeader("key1"), out);
header.GetAllOfHeader("key2", &out);
EXPECT_EQ("value_2", out[index++]);
EXPECT_EQ("", out[index++]);
EXPECT_THAT(header.GetAllOfHeader("key2"), ElementsAre("value_2", ""));
header.GetAllOfHeader("a", &out);
EXPECT_EQ("value_a", out[index++]);
EXPECT_THAT(header.GetAllOfHeader("a"), ElementsAre("value_a"));
header.GetAllOfHeader("b", &out);
EXPECT_EQ("value_b", out[index++]);
EXPECT_THAT(header.GetAllOfHeader("b"), ElementsAre("value_b"));
}
TEST(BalsaHeaders, GetAllOfHeaderIncludeRemovedDoesWhatItSays) {
BalsaHeaders header;
header.AppendHeader("key1", "value_11");
header.AppendHeader("key2", "value_21");
header.AppendHeader("key1", "value_12");
header.AppendHeader("key2", "value_22");
header.AppendHeader("key1", "");
std::vector<absl::string_view> out;
header.GetAllOfHeaderIncludeRemoved("key1", &out);
ASSERT_EQ(3u, out.size());
EXPECT_EQ("value_11", out[0]);
EXPECT_EQ("value_12", out[1]);
EXPECT_EQ("", out[2]);
header.GetAllOfHeaderIncludeRemoved("key2", &out);
ASSERT_EQ(5u, out.size());
EXPECT_EQ("value_21", out[3]);
EXPECT_EQ("value_22", out[4]);
header.erase(header.GetHeaderPosition("key1"));
out.clear();
header.GetAllOfHeaderIncludeRemoved("key1", &out);
ASSERT_EQ(3u, out.size());
EXPECT_EQ("value_12", out[0]);
EXPECT_EQ("", out[1]);
EXPECT_EQ("value_11", out[2]);
header.GetAllOfHeaderIncludeRemoved("key2", &out);
ASSERT_EQ(5u, out.size());
EXPECT_EQ("value_21", out[3]);
EXPECT_EQ("value_22", out[4]);
header.RemoveAllOfHeader("key1");
out.clear();
header.GetAllOfHeaderIncludeRemoved("key1", &out);
ASSERT_EQ(3u, out.size());
EXPECT_EQ("value_11", out[0]);
EXPECT_EQ("value_12", out[1]);
EXPECT_EQ("", out[2]);
header.GetAllOfHeaderIncludeRemoved("key2", &out);
ASSERT_EQ(5u, out.size());
EXPECT_EQ("value_21", out[3]);
EXPECT_EQ("value_22", out[4]);
header.Clear();
out.clear();
header.GetAllOfHeaderIncludeRemoved("key1", &out);
ASSERT_EQ(0u, out.size());
header.GetAllOfHeaderIncludeRemoved("key2", &out);
ASSERT_EQ(0u, out.size());
}
TEST(BalsaHeaders, GetAllOfHeaderIncludeRemovedWithNonExistentKey) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
std::vector<absl::string_view> out;
header.GetAllOfHeaderIncludeRemoved("key_non_existent", &out);
ASSERT_EQ(0u, out.size());
}
TEST(BalsaHeaders, GetIteratorForKeyDoesWhatItSays) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("Key", "value_2");
header.AppendHeader("key", "");
header.AppendHeader("KEY", "value_1");
BalsaHeaders::const_header_lines_key_iterator key_it =
header.GetIteratorForKey("key");
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key", key_it->first);
EXPECT_EQ("value_1", key_it->second);
++key_it;
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("Key", key_it->first);
EXPECT_EQ("value_2", key_it->second);
++key_it;
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key", key_it->first);
EXPECT_EQ("", key_it->second);
++key_it;
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("KEY", key_it->first);
EXPECT_EQ("value_1", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
}
TEST(BalsaHeaders, GetIteratorForKeyWithNonExistentKey) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
BalsaHeaders::const_header_lines_key_iterator key_it =
header.GetIteratorForKey("key_non_existent");
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
const auto lines = header.lines("key_non_existent");
EXPECT_EQ(lines.begin(), header.lines().end());
EXPECT_EQ(lines.end(), header.header_lines_key_end());
}
TEST(BalsaHeaders, GetIteratorForKeyWithAppendHeaders) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
BalsaHeaders::const_header_lines_key_iterator key_it =
header.GetIteratorForKey("key_new");
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
header.AppendHeader("key_new", "value_3");
key_it = header.GetIteratorForKey("key_new");
const auto lines1 = header.lines("key_new");
EXPECT_EQ(lines1.begin(), key_it);
EXPECT_EQ(lines1.end(), header.header_lines_key_end());
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key_new", key_it->first);
EXPECT_EQ("value_3", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
key_it = header.GetIteratorForKey("key");
const auto lines2 = header.lines("key");
EXPECT_EQ(lines2.begin(), key_it);
EXPECT_EQ(lines2.end(), header.header_lines_key_end());
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key", key_it->first);
EXPECT_EQ("value_1", key_it->second);
++key_it;
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key", key_it->first);
EXPECT_EQ("value_2", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
}
TEST(BalsaHeaders, GetIteratorForKeyWithRemoveHeaders) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
header.AppendHeader("a", "va");
header.RemoveAllOfHeader("a");
BalsaHeaders::const_header_lines_key_iterator key_it =
header.GetIteratorForKey("key");
EXPECT_NE(header.lines().end(), key_it);
const auto lines1 = header.lines("key");
EXPECT_EQ(lines1.begin(), key_it);
EXPECT_EQ(lines1.end(), header.header_lines_key_end());
EXPECT_EQ("value_1", key_it->second);
++key_it;
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key", key_it->first);
EXPECT_EQ("value_2", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
for (BalsaHeaders::const_header_lines_key_iterator it =
header.GetIteratorForKey("key");
it != header.lines().end(); ++it) {
EXPECT_EQ("key", it->first);
}
}
TEST(BalsaHeaders, GetIteratorForKeyWithEraseHeaders) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
header.AppendHeader("a", "va");
header.erase(header.GetHeaderPosition("key"));
BalsaHeaders::const_header_lines_key_iterator key_it =
header.GetIteratorForKey("key");
EXPECT_NE(header.lines().end(), key_it);
const auto lines1 = header.lines("key");
EXPECT_EQ(lines1.begin(), key_it);
EXPECT_EQ(lines1.end(), header.header_lines_key_end());
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key", key_it->first);
EXPECT_EQ("value_2", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
header.erase(header.GetHeaderPosition("key"));
key_it = header.GetIteratorForKey("key");
const auto lines2 = header.lines("key");
EXPECT_EQ(lines2.begin(), key_it);
EXPECT_EQ(lines2.end(), header.header_lines_key_end());
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
key_it = header.GetIteratorForKey("a");
const auto lines3 = header.lines("a");
EXPECT_EQ(lines3.begin(), key_it);
EXPECT_EQ(lines3.end(), header.header_lines_key_end());
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("a", key_it->first);
EXPECT_EQ("va", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
header.erase(header.GetHeaderPosition("a"));
key_it = header.GetIteratorForKey("a");
const auto lines4 = header.lines("a");
EXPECT_EQ(lines4.begin(), key_it);
EXPECT_EQ(lines4.end(), header.header_lines_key_end());
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
}
TEST(BalsaHeaders, GetIteratorForKeyWithNoHeaderLines) {
BalsaHeaders header;
BalsaHeaders::const_header_lines_key_iterator key_it =
header.GetIteratorForKey("key");
const auto lines = header.lines("key");
EXPECT_EQ(lines.begin(), key_it);
EXPECT_EQ(lines.end(), header.header_lines_key_end());
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
}
TEST(BalsaHeaders, GetIteratorForKeyWithBalsaFrameProcessInput) {
BalsaHeaders header = CreateHTTPHeaders(true,
"GET / HTTP/1.0\r\n"
"key1: value_1\r\n"
"Key1: value_foo\r\n"
"key2: value_2\r\n"
"a: value_a\r\n"
"key2: \r\n"
"b: value_b\r\n"
"\r\n");
BalsaHeaders::const_header_lines_key_iterator key_it =
header.GetIteratorForKey("Key1");
const auto lines1 = header.lines("Key1");
EXPECT_EQ(lines1.begin(), key_it);
EXPECT_EQ(lines1.end(), header.header_lines_key_end());
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key1", key_it->first);
EXPECT_EQ("value_1", key_it->second);
++key_it;
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("Key1", key_it->first);
EXPECT_EQ("value_foo", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
key_it = header.GetIteratorForKey("key2");
EXPECT_NE(header.lines().end(), key_it);
const auto lines2 = header.lines("key2");
EXPECT_EQ(lines2.begin(), key_it);
EXPECT_EQ(lines2.end(), header.header_lines_key_end());
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key2", key_it->first);
EXPECT_EQ("value_2", key_it->second);
++key_it;
EXPECT_NE(header.lines().end(), key_it);
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("key2", key_it->first);
EXPECT_EQ("", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
key_it = header.GetIteratorForKey("a");
EXPECT_NE(header.lines().end(), key_it);
const auto lines3 = header.lines("a");
EXPECT_EQ(lines3.begin(), key_it);
EXPECT_EQ(lines3.end(), header.header_lines_key_end());
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("a", key_it->first);
EXPECT_EQ("value_a", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
key_it = header.GetIteratorForKey("b");
EXPECT_NE(header.lines().end(), key_it);
const auto lines4 = header.lines("b");
EXPECT_EQ(lines4.begin(), key_it);
EXPECT_EQ(lines4.end(), header.header_lines_key_end());
EXPECT_NE(header.header_lines_key_end(), key_it);
EXPECT_EQ("b", key_it->first);
EXPECT_EQ("value_b", key_it->second);
++key_it;
EXPECT_EQ(header.lines().end(), key_it);
EXPECT_EQ(header.header_lines_key_end(), key_it);
}
TEST(BalsaHeaders, GetAllOfHeaderAsStringDoesWhatItSays) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("Key", "value_2");
header.AppendHeader("key", "");
header.AppendHeader("KEY", "value_1");
std::string result = header.GetAllOfHeaderAsString("key");
EXPECT_EQ("value_1,value_2,,value_1", result);
}
TEST(BalsaHeaders, RemoveAllOfHeaderDoesWhatItSays) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
ASSERT_NE(header.lines().begin(), header.lines().end());
header.RemoveAllOfHeader("key");
ASSERT_EQ(header.lines().begin(), header.lines().end());
}
TEST(BalsaHeaders,
RemoveAllOfHeaderDoesWhatItSaysEvenWhenThingsHaveBeenErased) {
BalsaHeaders header;
header.AppendHeader("key1", "value_1");
header.AppendHeader("key1", "value_2");
header.AppendHeader("key2", "value_3");
header.AppendHeader("key1", "value_4");
header.AppendHeader("key2", "value_5");
header.AppendHeader("key1", "value_6");
ASSERT_NE(header.lines().begin(), header.lines().end());
BalsaHeaders::const_header_lines_iterator chli = header.lines().begin();
++chli;
++chli;
++chli;
header.erase(chli);
chli = header.lines().begin();
++chli;
header.erase(chli);
header.RemoveAllOfHeader("key1");
for (const auto& line : header.lines()) {
EXPECT_NE(std::string("key1"), line.first);
}
}
TEST(BalsaHeaders, RemoveAllOfHeaderDoesNothingWhenNoKeyOfThatNameExists) {
BalsaHeaders header;
header.AppendHeader("key", "value_1");
header.AppendHeader("key", "value_2");
ASSERT_NE(header.lines().begin(), header.lines().end());
header.RemoveAllOfHeader("foo");
int num_found = 0;
for (const auto& line : header.lines()) {
++num_found;
EXPECT_EQ(absl::string_view("key"), line.first);
}
EXPECT_EQ(2, num_found);
EXPECT_NE(header.lines().begin(), header.lines().end());
}
TEST(BalsaHeaders, WriteHeaderEndingToBuffer) {
BalsaHeaders header;
SimpleBuffer simple_buffer;
header.WriteHeaderEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq("\r\n"));
}
TEST(BalsaHeaders, WriteToBufferDoesntCrashWithUninitializedHeader) {
BalsaHeaders header;
SimpleBuffer simple_buffer;
header.WriteHeaderAndEndingToBuffer(&simple_buffer);
}
TEST(BalsaHeaders, WriteToBufferWorksWithBalsaHeadersParsedByFramer) {
std::string input =
"GET / HTTP/1.0\r\n"
"key_with_value: value\r\n"
"key_with_continuation_value: \r\n"
" with continuation\r\n"
"key_with_two_continuation_value: \r\n"
" continuation 1\r\n"
" continuation 2\r\n"
"a: foo \r\n"
"b-s:\n"
" bar\t\n"
"foo: \r\n"
"bazzzzzzzleriffic!: snaps\n"
"\n";
std::string expected =
"GET / HTTP/1.0\r\n"
"key_with_value: value\r\n"
"key_with_continuation_value: with continuation\r\n"
"key_with_two_continuation_value: continuation 1\r\n"
" continuation 2\r\n"
"a: foo\r\n"
"b-s: bar\r\n"
"foo: \r\n"
"bazzzzzzzleriffic!: snaps\r\n"
"\r\n";
BalsaHeaders headers = CreateHTTPHeaders(true, input);
SimpleBuffer simple_buffer;
size_t expected_write_buffer_size = headers.GetSizeForWriteBuffer();
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected));
EXPECT_EQ(expected_write_buffer_size,
static_cast<size_t>(simple_buffer.ReadableBytes()));
}
TEST(BalsaHeaders,
WriteToBufferWorksWithBalsaHeadersParsedByFramerTabContinuations) {
std::string input =
"GET / HTTP/1.0\r\n"
"key_with_value: value\r\n"
"key_with_continuation_value: \r\n"
"\twith continuation\r\n"
"key_with_two_continuation_value: \r\n"
"\tcontinuation 1\r\n"
"\tcontinuation 2\r\n"
"a: foo \r\n"
"b-s:\n"
"\tbar\t\n"
"foo: \r\n"
"bazzzzzzzleriffic!: snaps\n"
"\n";
std::string expected =
"GET / HTTP/1.0\r\n"
"key_with_value: value\r\n"
"key_with_continuation_value: with continuation\r\n"
"key_with_two_continuation_value: continuation 1\r\n"
"\tcontinuation 2\r\n"
"a: foo\r\n"
"b-s: bar\r\n"
"foo: \r\n"
"bazzzzzzzleriffic!: snaps\r\n"
"\r\n";
BalsaHeaders headers = CreateHTTPHeaders(true, input);
SimpleBuffer simple_buffer;
size_t expected_write_buffer_size = headers.GetSizeForWriteBuffer();
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected));
EXPECT_EQ(expected_write_buffer_size,
static_cast<size_t>(simple_buffer.ReadableBytes()));
}
TEST(BalsaHeaders, WriteToBufferWorksWhenFirstlineSetThroughHeaders) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
std::string expected =
"GET / HTTP/1.0\r\n"
"\r\n";
SimpleBuffer simple_buffer;
size_t expected_write_buffer_size = headers.GetSizeForWriteBuffer();
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected));
EXPECT_EQ(expected_write_buffer_size,
static_cast<size_t>(simple_buffer.ReadableBytes()));
}
TEST(BalsaHeaders, WriteToBufferWorksWhenSetThroughHeaders) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key 2", "value\n 2");
headers.AppendHeader("key\n 3", "value3");
std::string expected =
"GET / HTTP/1.0\r\n"
"key1: value1\r\n"
"key 2: value\n"
" 2\r\n"
"key\n"
" 3: value3\r\n"
"\r\n";
SimpleBuffer simple_buffer;
size_t expected_write_buffer_size = headers.GetSizeForWriteBuffer();
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected));
EXPECT_EQ(expected_write_buffer_size,
static_cast<size_t>(simple_buffer.ReadableBytes()));
}
TEST(BalsaHeaders, WriteToBufferWorkWhensOnlyLinesSetThroughHeaders) {
BalsaHeaders headers;
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key 2", "value\n 2");
headers.AppendHeader("key\n 3", "value3");
std::string expected =
"\r\n"
"key1: value1\r\n"
"key 2: value\n"
" 2\r\n"
"key\n"
" 3: value3\r\n"
"\r\n";
SimpleBuffer simple_buffer;
size_t expected_write_buffer_size = headers.GetSizeForWriteBuffer();
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected));
EXPECT_EQ(expected_write_buffer_size,
static_cast<size_t>(simple_buffer.ReadableBytes()));
}
TEST(BalsaHeaders, WriteToBufferWorksWhenSetThroughHeadersWithElementsErased) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key 2", "value\n 2");
headers.AppendHeader("key\n 3", "value3");
headers.RemoveAllOfHeader("key1");
headers.RemoveAllOfHeader("key\n 3");
std::string expected =
"GET / HTTP/1.0\r\n"
"key 2: value\n"
" 2\r\n"
"\r\n";
SimpleBuffer simple_buffer;
size_t expected_write_buffer_size = headers.GetSizeForWriteBuffer();
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected));
EXPECT_EQ(expected_write_buffer_size,
static_cast<size_t>(simple_buffer.ReadableBytes()));
}
TEST(BalsaHeaders, WriteToBufferWithManuallyAppendedHeaderLine) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key 2", "value\n 2");
std::string expected =
"GET / HTTP/1.0\r\n"
"key1: value1\r\n"
"key 2: value\n"
" 2\r\n"
"key 3: value 3\r\n"
"\r\n";
SimpleBuffer simple_buffer;
size_t expected_write_buffer_size = headers.GetSizeForWriteBuffer();
headers.WriteToBuffer(&simple_buffer);
headers.WriteHeaderLineToBuffer(&simple_buffer, "key 3", "value 3",
BalsaHeaders::CaseOption::kNoModification);
headers.WriteHeaderEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected));
EXPECT_EQ(expected_write_buffer_size + 16,
static_cast<size_t>(simple_buffer.ReadableBytes()));
}
TEST(BalsaHeaders, DumpToStringEmptyHeaders) {
BalsaHeaders headers;
std::string headers_str;
headers.DumpToString(&headers_str);
EXPECT_EQ("\n <empty header>\n", headers_str);
}
TEST(BalsaHeaders, DumpToStringParsedHeaders) {
std::string input =
"GET / HTTP/1.0\r\n"
"Header1: value\r\n"
"Header2: value\r\n"
"\r\n";
std::string output =
"\n"
" GET / HTTP/1.0\n"
" Header1: value\n"
" Header2: value\n";
BalsaHeaders headers = CreateHTTPHeaders(true, input);
std::string headers_str;
headers.DumpToString(&headers_str);
EXPECT_EQ(output, headers_str);
EXPECT_TRUE(headers.FramerIsDoneWriting());
}
TEST(BalsaHeaders, DumpToStringPartialHeaders) {
BalsaHeaders headers;
BalsaFrame balsa_frame;
balsa_frame.set_is_request(true);
balsa_frame.set_balsa_headers(&headers);
std::string input =
"GET / HTTP/1.0\r\n"
"Header1: value\r\n"
"Header2: value\r\n";
std::string output = absl::StrFormat("\n <incomplete header len: %d>\n ",
static_cast<int>(input.size()));
output += input;
output += '\n';
ASSERT_EQ(input.size(), balsa_frame.ProcessInput(input.data(), input.size()));
ASSERT_FALSE(balsa_frame.MessageFullyRead());
std::string headers_str;
headers.DumpToString(&headers_str);
EXPECT_EQ(output, headers_str);
EXPECT_FALSE(headers.FramerIsDoneWriting());
}
TEST(BalsaHeaders, DumpToStringParsingNonHeadersData) {
BalsaHeaders headers;
BalsaFrame balsa_frame;
balsa_frame.set_is_request(true);
balsa_frame.set_balsa_headers(&headers);
std::string input =
"This is not a header. "
"Just some random data to simulate mismatch.";
std::string output = absl::StrFormat("\n <incomplete header len: %d>\n ",
static_cast<int>(input.size()));
output += input;
output += '\n';
ASSERT_EQ(input.size(), balsa_frame.ProcessInput(input.data(), input.size()));
ASSERT_FALSE(balsa_frame.MessageFullyRead());
std::string headers_str;
headers.DumpToString(&headers_str);
EXPECT_EQ(output, headers_str);
}
TEST(BalsaHeaders, Clear) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key 2", "value\n 2");
headers.AppendHeader("key\n 3", "value3");
headers.RemoveAllOfHeader("key1");
headers.RemoveAllOfHeader("key\n 3");
headers.Clear();
EXPECT_TRUE(headers.first_line().empty());
EXPECT_EQ(headers.lines().begin(), headers.lines().end());
EXPECT_TRUE(headers.IsEmpty());
}
TEST(BalsaHeaders,
TestSetFromStringPiecesWithInitialFirstlineInHeaderStreamAndNewToo) {
BalsaHeaders headers = CreateHTTPHeaders(false,
"HTTP/1.1 200 reason phrase\r\n"
"content-length: 0\r\n"
"\r\n");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.1"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("reason phrase"));
headers.SetResponseFirstline("HTTP/1.0", 404, "a reason");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.0"));
EXPECT_THAT(headers.response_code(), StrEq("404"));
EXPECT_THAT(headers.parsed_response_code(), Eq(404));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("a reason"));
EXPECT_THAT(headers.first_line(), StrEq("HTTP/1.0 404 a reason"));
}
TEST(BalsaHeaders,
TestSetFromStringPiecesWithInitialFirstlineInHeaderStreamButNotNew) {
BalsaHeaders headers = CreateHTTPHeaders(false,
"HTTP/1.1 200 reason phrase\r\n"
"content-length: 0\r\n"
"\r\n");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.1"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("reason phrase"));
headers.SetResponseFirstline("HTTP/1.000", 404000,
"supercalifragilisticexpealidocious");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.000"));
EXPECT_THAT(headers.response_code(), StrEq("404000"));
EXPECT_THAT(headers.parsed_response_code(), Eq(404000));
EXPECT_THAT(headers.response_reason_phrase(),
StrEq("supercalifragilisticexpealidocious"));
EXPECT_THAT(headers.first_line(),
StrEq("HTTP/1.000 404000 supercalifragilisticexpealidocious"));
}
TEST(BalsaHeaders,
TestSetFromStringPiecesWithFirstFirstlineInHeaderStreamButNotNew2) {
SCOPED_TRACE(
"This test tests the codepath where the new firstline is"
" too large to fit within the space used by the original"
" firstline, but large enuogh to space in the free space"
" available in both firstline plus the space made available"
" with deleted header lines (specifically, the first one");
BalsaHeaders headers = CreateHTTPHeaders(
false,
"HTTP/1.1 200 reason phrase\r\n"
"a: 0987123409871234078130948710938471093827401983740198327401982374\r\n"
"content-length: 0\r\n"
"\r\n");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.1"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("reason phrase"));
headers.erase(headers.lines().begin());
headers.SetResponseFirstline("HTTP/1.000", 404000,
"supercalifragilisticexpealidocious");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.000"));
EXPECT_THAT(headers.response_code(), StrEq("404000"));
EXPECT_THAT(headers.parsed_response_code(), Eq(404000));
EXPECT_THAT(headers.response_reason_phrase(),
StrEq("supercalifragilisticexpealidocious"));
EXPECT_THAT(headers.first_line(),
StrEq("HTTP/1.000 404000 supercalifragilisticexpealidocious"));
}
TEST(BalsaHeaders, TestSetFirstlineFromStringPiecesWithNoInitialFirstline) {
BalsaHeaders headers;
headers.SetResponseFirstline("HTTP/1.1", 200, "don't need a reason");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.1"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.parsed_response_code(), Eq(200));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("don't need a reason"));
EXPECT_THAT(headers.first_line(), StrEq("HTTP/1.1 200 don't need a reason"));
}
TEST(BalsaHeaders, TestSettingFirstlineElementsWithOtherElementsMissing) {
{
BalsaHeaders headers;
headers.SetRequestMethod("GET");
headers.SetRequestUri("/");
EXPECT_THAT(headers.first_line(), StrEq("GET / "));
}
{
BalsaHeaders headers;
headers.SetRequestMethod("GET");
headers.SetRequestVersion("HTTP/1.1");
EXPECT_THAT(headers.first_line(), StrEq("GET HTTP/1.1"));
}
{
BalsaHeaders headers;
headers.SetRequestUri("/");
headers.SetRequestVersion("HTTP/1.1");
EXPECT_THAT(headers.first_line(), StrEq(" / HTTP/1.1"));
}
}
TEST(BalsaHeaders, TestSettingMissingFirstlineElementsAfterBalsaHeadersParsed) {
{
BalsaHeaders headers = CreateHTTPHeaders(true, "GET /foo\r\n");
ASSERT_THAT(headers.first_line(), StrEq("GET /foo"));
headers.SetRequestVersion("HTTP/1.1");
EXPECT_THAT(headers.first_line(), StrEq("GET /foo HTTP/1.1"));
}
{
BalsaHeaders headers = CreateHTTPHeaders(true, "GET\r\n");
ASSERT_THAT(headers.first_line(), StrEq("GET"));
headers.SetRequestUri("/foo");
EXPECT_THAT(headers.first_line(), StrEq("GET /foo "));
}
}
TEST(BalsaHeaders,
SetFirstlineFromStringPiecesFirstInAdditionalDataAndNewLarger) {
BalsaHeaders headers;
headers.SetResponseFirstline("HTTP/1.1", 200, "don't need a reason");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.1"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.parsed_response_code(), Eq(200));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("don't need a reason"));
EXPECT_THAT(headers.first_line(), StrEq("HTTP/1.1 200 don't need a reason"));
headers.SetResponseFirstline("HTTP/1.10", 2000, "REALLY don't need a reason");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.10"));
EXPECT_THAT(headers.response_code(), StrEq("2000"));
EXPECT_THAT(headers.parsed_response_code(), Eq(2000));
EXPECT_THAT(headers.response_reason_phrase(),
StrEq("REALLY don't need a reason"));
EXPECT_THAT(headers.first_line(),
StrEq("HTTP/1.10 2000 REALLY don't need a reason"));
}
TEST(BalsaHeaders,
TestSetFirstlineFromStringPiecesWithPreviousInAdditionalDataNewSmaller) {
BalsaHeaders headers;
headers.SetResponseFirstline("HTTP/1.10", 2000, "REALLY don't need a reason");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.10"));
EXPECT_THAT(headers.response_code(), StrEq("2000"));
EXPECT_THAT(headers.parsed_response_code(), Eq(2000));
EXPECT_THAT(headers.response_reason_phrase(),
StrEq("REALLY don't need a reason"));
EXPECT_THAT(headers.first_line(),
StrEq("HTTP/1.10 2000 REALLY don't need a reason"));
headers.SetResponseFirstline("HTTP/1.0", 200, "a reason");
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.0"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.parsed_response_code(), Eq(200));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("a reason"));
EXPECT_THAT(headers.first_line(), StrEq("HTTP/1.0 200 a reason"));
}
TEST(BalsaHeaders, CopyFrom) {
BalsaHeaders headers1, headers2;
absl::string_view method("GET");
absl::string_view uri("/foo");
absl::string_view version("HTTP/1.0");
headers1.SetRequestFirstlineFromStringPieces(method, uri, version);
headers1.AppendHeader("key1", "value1");
headers1.AppendHeader("key 2", "value\n 2");
headers1.AppendHeader("key\n 3", "value3");
headers2.CopyFrom(headers1);
EXPECT_THAT(headers1.first_line(), StrEq("GET /foo HTTP/1.0"));
BalsaHeaders::const_header_lines_iterator chli = headers1.lines().begin();
EXPECT_THAT(chli->first, StrEq("key1"));
EXPECT_THAT(chli->second, StrEq("value1"));
++chli;
EXPECT_THAT(chli->first, StrEq("key 2"));
EXPECT_THAT(chli->second, StrEq("value\n 2"));
++chli;
EXPECT_THAT(chli->first, StrEq("key\n 3"));
EXPECT_THAT(chli->second, StrEq("value3"));
++chli;
EXPECT_EQ(headers1.lines().end(), chli);
EXPECT_THAT(headers1.request_method(),
StrEq((std::string(headers2.request_method()))));
EXPECT_THAT(headers1.request_uri(),
StrEq((std::string(headers2.request_uri()))));
EXPECT_THAT(headers1.request_version(),
StrEq((std::string(headers2.request_version()))));
EXPECT_THAT(headers2.first_line(), StrEq("GET /foo HTTP/1.0"));
chli = headers2.lines().begin();
EXPECT_THAT(chli->first, StrEq("key1"));
EXPECT_THAT(chli->second, StrEq("value1"));
++chli;
EXPECT_THAT(chli->first, StrEq("key 2"));
EXPECT_THAT(chli->second, StrEq("value\n 2"));
++chli;
EXPECT_THAT(chli->first, StrEq("key\n 3"));
EXPECT_THAT(chli->second, StrEq("value3"));
++chli;
EXPECT_EQ(headers2.lines().end(), chli);
version = absl::string_view("HTTP/1.1");
int code = 200;
absl::string_view reason_phrase("reason phrase asdf");
headers1.RemoveAllOfHeader("key1");
headers1.AppendHeader("key4", "value4");
headers1.SetResponseFirstline(version, code, reason_phrase);
headers2.CopyFrom(headers1);
EXPECT_THAT(headers1.request_method(),
StrEq((std::string(headers2.request_method()))));
EXPECT_THAT(headers1.request_uri(),
StrEq((std::string(headers2.request_uri()))));
EXPECT_THAT(headers1.request_version(),
StrEq((std::string(headers2.request_version()))));
EXPECT_THAT(headers2.first_line(), StrEq("HTTP/1.1 200 reason phrase asdf"));
chli = headers2.lines().begin();
EXPECT_THAT(chli->first, StrEq("key 2"));
EXPECT_THAT(chli->second, StrEq("value\n 2"));
++chli;
EXPECT_THAT(chli->first, StrEq("key\n 3"));
EXPECT_THAT(chli->second, StrEq("value3"));
++chli;
EXPECT_THAT(chli->first, StrEq("key4"));
EXPECT_THAT(chli->second, StrEq("value4"));
++chli;
EXPECT_EQ(headers2.lines().end(), chli);
}
TEST(BalsaHeaders, Move) {
BalsaHeaders headers1, headers3;
absl::string_view method("GET");
absl::string_view uri("/foo");
absl::string_view version("HTTP/1.0");
headers1.SetRequestFirstlineFromStringPieces(method, uri, version);
headers1.AppendHeader("key1", "value1");
headers1.AppendHeader("key 2", "value\n 2");
headers1.AppendHeader("key\n 3", "value3");
BalsaHeaders headers2 = std::move(headers1);
EXPECT_EQ("GET /foo HTTP/1.0", headers2.first_line());
BalsaHeaders::const_header_lines_iterator chli = headers2.lines().begin();
EXPECT_EQ("key1", chli->first);
EXPECT_EQ("value1", chli->second);
++chli;
EXPECT_EQ("key 2", chli->first);
EXPECT_EQ("value\n 2", chli->second);
++chli;
EXPECT_EQ("key\n 3", chli->first);
EXPECT_EQ("value3", chli->second);
++chli;
EXPECT_EQ(headers2.lines().end(), chli);
EXPECT_EQ("GET", headers2.request_method());
EXPECT_EQ("/foo", headers2.request_uri());
EXPECT_EQ("HTTP/1.0", headers2.request_version());
headers3 = std::move(headers2);
version = absl::string_view("HTTP/1.1");
int code = 200;
absl::string_view reason_phrase("reason phrase asdf");
headers3.RemoveAllOfHeader("key1");
headers3.AppendHeader("key4", "value4");
headers3.SetResponseFirstline(version, code, reason_phrase);
BalsaHeaders headers4 = std::move(headers3);
EXPECT_EQ("200", headers4.response_code());
EXPECT_EQ("reason phrase asdf", headers4.response_reason_phrase());
EXPECT_EQ("HTTP/1.1", headers4.response_version());
EXPECT_EQ("HTTP/1.1 200 reason phrase asdf", headers4.first_line());
chli = headers4.lines().begin();
EXPECT_EQ("key 2", chli->first);
EXPECT_EQ("value\n 2", chli->second);
++chli;
EXPECT_EQ("key\n 3", chli->first);
EXPECT_EQ("value3", chli->second);
++chli;
EXPECT_EQ("key4", chli->first);
EXPECT_EQ("value4", chli->second);
++chli;
EXPECT_EQ(headers4.lines().end(), chli);
}
TEST(BalsaHeaders, IteratorWorksWithOStreamAsExpected) {
{
std::stringstream actual;
BalsaHeaders::const_header_lines_iterator chli;
actual << chli;
EXPECT_THAT(actual.str(), AnyOf(StrEq("[0, 0]"),
StrEq("[(nil), 0]"),
StrEq("[0x0, 0]")));
}
{
BalsaHeaders headers;
std::stringstream actual;
BalsaHeaders::const_header_lines_iterator chli = headers.lines().begin();
actual << chli;
std::stringstream expected;
expected << "[" << &headers << ", 0]";
EXPECT_THAT(expected.str(), StrEq(actual.str()));
}
}
TEST(BalsaHeaders, TestSetResponseReasonPhraseWithNoInitialFirstline) {
BalsaHeaders balsa_headers;
balsa_headers.SetResponseReasonPhrase("don't need a reason");
EXPECT_THAT(balsa_headers.first_line(), StrEq(" don't need a reason"));
EXPECT_TRUE(balsa_headers.response_version().empty());
EXPECT_TRUE(balsa_headers.response_code().empty());
EXPECT_THAT(balsa_headers.response_reason_phrase(),
StrEq("don't need a reason"));
}
TEST(BalsaHeaders, TestSetResponseReasonPhrase) {
const char* response_reason_phrases[] = {
"qwerty asdfgh",
"qwerty",
"qwerty asdfghjkl",
};
size_t arraysize_squared = (ABSL_ARRAYSIZE(response_reason_phrases) *
ABSL_ARRAYSIZE(response_reason_phrases));
for (size_t iteration = 0; iteration < arraysize_squared; ++iteration) {
SCOPED_TRACE("Original firstline: \"HTTP/1.0 200 reason phrase\"");
BalsaHeaders headers = CreateHTTPHeaders(true,
"HTTP/1.0 200 reason phrase\r\n"
"content-length: 0\r\n"
"\r\n");
ASSERT_THAT(headers.first_line(), StrEq("HTTP/1.0 200 reason phrase"));
{
int first = iteration / ABSL_ARRAYSIZE(response_reason_phrases);
const char* response_reason_phrase_first = response_reason_phrases[first];
std::string expected_new_firstline =
absl::StrFormat("HTTP/1.0 200 %s", response_reason_phrase_first);
SCOPED_TRACE(absl::StrFormat("Then set response_reason_phrase(\"%s\")",
response_reason_phrase_first));
headers.SetResponseReasonPhrase(response_reason_phrase_first);
EXPECT_THAT(headers.first_line(),
StrEq(absl::StrFormat("HTTP/1.0 200 %s",
response_reason_phrase_first)));
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.0"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.response_reason_phrase(),
StrEq(response_reason_phrase_first));
}
{
int second = iteration % ABSL_ARRAYSIZE(response_reason_phrases);
const char* response_reason_phrase_second =
response_reason_phrases[second];
std::string expected_new_firstline =
absl::StrFormat("HTTP/1.0 200 %s", response_reason_phrase_second);
SCOPED_TRACE(absl::StrFormat("Then set response_reason_phrase(\"%s\")",
response_reason_phrase_second));
headers.SetResponseReasonPhrase(response_reason_phrase_second);
EXPECT_THAT(headers.first_line(),
StrEq(absl::StrFormat("HTTP/1.0 200 %s",
response_reason_phrase_second)));
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.0"));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.response_reason_phrase(),
StrEq(response_reason_phrase_second));
}
}
}
TEST(BalsaHeaders, TestSetResponseVersionWithNoInitialFirstline) {
BalsaHeaders balsa_headers;
balsa_headers.SetResponseVersion("HTTP/1.1");
EXPECT_THAT(balsa_headers.first_line(), StrEq("HTTP/1.1 "));
EXPECT_THAT(balsa_headers.response_version(), StrEq("HTTP/1.1"));
EXPECT_TRUE(balsa_headers.response_code().empty());
EXPECT_TRUE(balsa_headers.response_reason_phrase().empty());
}
TEST(BalsaHeaders, TestSetResponseVersion) {
const char* response_versions[] = {
"ABCD/123",
"ABCD",
"ABCD/123456",
};
size_t arraysize_squared =
(ABSL_ARRAYSIZE(response_versions) * ABSL_ARRAYSIZE(response_versions));
for (size_t iteration = 0; iteration < arraysize_squared; ++iteration) {
SCOPED_TRACE("Original firstline: \"HTTP/1.0 200 reason phrase\"");
BalsaHeaders headers = CreateHTTPHeaders(false,
"HTTP/1.0 200 reason phrase\r\n"
"content-length: 0\r\n"
"\r\n");
ASSERT_THAT(headers.first_line(), StrEq("HTTP/1.0 200 reason phrase"));
{
int first = iteration / ABSL_ARRAYSIZE(response_versions);
const char* response_version_first = response_versions[first];
std::string expected_new_firstline =
absl::StrFormat("%s 200 reason phrase", response_version_first);
SCOPED_TRACE(absl::StrFormat("Then set response_version(\"%s\")",
response_version_first));
headers.SetResponseVersion(response_version_first);
EXPECT_THAT(headers.first_line(), StrEq(expected_new_firstline));
EXPECT_THAT(headers.response_version(), StrEq(response_version_first));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("reason phrase"));
}
{
int second = iteration % ABSL_ARRAYSIZE(response_versions);
const char* response_version_second = response_versions[second];
std::string expected_new_firstline =
absl::StrFormat("%s 200 reason phrase", response_version_second);
SCOPED_TRACE(absl::StrFormat("Then set response_version(\"%s\")",
response_version_second));
headers.SetResponseVersion(response_version_second);
EXPECT_THAT(headers.first_line(), StrEq(expected_new_firstline));
EXPECT_THAT(headers.response_version(), StrEq(response_version_second));
EXPECT_THAT(headers.response_code(), StrEq("200"));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("reason phrase"));
}
}
}
TEST(BalsaHeaders, TestSetResponseReasonAndVersionWithNoInitialFirstline) {
BalsaHeaders headers;
headers.SetResponseVersion("HTTP/1.1");
headers.SetResponseReasonPhrase("don't need a reason");
EXPECT_THAT(headers.first_line(), StrEq("HTTP/1.1 don't need a reason"));
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.1"));
EXPECT_TRUE(headers.response_code().empty());
EXPECT_THAT(headers.response_reason_phrase(), StrEq("don't need a reason"));
}
TEST(BalsaHeaders, TestSetResponseCodeWithNoInitialFirstline) {
BalsaHeaders balsa_headers;
balsa_headers.SetParsedResponseCodeAndUpdateFirstline(2002);
EXPECT_THAT(balsa_headers.first_line(), StrEq(" 2002 "));
EXPECT_TRUE(balsa_headers.response_version().empty());
EXPECT_THAT(balsa_headers.response_code(), StrEq("2002"));
EXPECT_TRUE(balsa_headers.response_reason_phrase().empty());
EXPECT_THAT(balsa_headers.parsed_response_code(), Eq(2002));
}
TEST(BalsaHeaders, TestSetParsedResponseCode) {
BalsaHeaders balsa_headers;
balsa_headers.set_parsed_response_code(std::numeric_limits<int>::max());
EXPECT_THAT(balsa_headers.parsed_response_code(),
Eq(std::numeric_limits<int>::max()));
}
TEST(BalsaHeaders, TestSetResponseCode) {
const char* response_codes[] = {
"200"
"23",
"200200",
};
size_t arraysize_squared =
(ABSL_ARRAYSIZE(response_codes) * ABSL_ARRAYSIZE(response_codes));
for (size_t iteration = 0; iteration < arraysize_squared; ++iteration) {
SCOPED_TRACE("Original firstline: \"HTTP/1.0 200 reason phrase\"");
BalsaHeaders headers = CreateHTTPHeaders(false,
"HTTP/1.0 200 reason phrase\r\n"
"content-length: 0\r\n"
"\r\n");
ASSERT_THAT(headers.first_line(), StrEq("HTTP/1.0 200 reason phrase"));
{
int first = iteration / ABSL_ARRAYSIZE(response_codes);
const char* response_code_first = response_codes[first];
std::string expected_new_firstline =
absl::StrFormat("HTTP/1.0 %s reason phrase", response_code_first);
SCOPED_TRACE(absl::StrFormat("Then set response_code(\"%s\")",
response_code_first));
headers.SetResponseCode(response_code_first);
EXPECT_THAT(headers.first_line(), StrEq(expected_new_firstline));
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.0"));
EXPECT_THAT(headers.response_code(), StrEq(response_code_first));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("reason phrase"));
}
{
int second = iteration % ABSL_ARRAYSIZE(response_codes);
const char* response_code_second = response_codes[second];
std::string expected_new_secondline =
absl::StrFormat("HTTP/1.0 %s reason phrase", response_code_second);
SCOPED_TRACE(absl::StrFormat("Then set response_code(\"%s\")",
response_code_second));
headers.SetResponseCode(response_code_second);
EXPECT_THAT(headers.first_line(), StrEq(expected_new_secondline));
EXPECT_THAT(headers.response_version(), StrEq("HTTP/1.0"));
EXPECT_THAT(headers.response_code(), StrEq(response_code_second));
EXPECT_THAT(headers.response_reason_phrase(), StrEq("reason phrase"));
}
}
}
TEST(BalsaHeaders, TestAppendToHeader) {
BalsaHeaders headers;
headers.AppendHeader("foo", "foo_value");
headers.AppendHeader("bar", "bar_value");
headers.AppendToHeader("foo", "foo_value2");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value,foo_value2"));
EXPECT_THAT(headers.GetHeader("bar"), StrEq("bar_value"));
}
TEST(BalsaHeaders, TestInitialAppend) {
BalsaHeaders headers;
headers.AppendToHeader("foo", "foo_value");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value"));
headers.AppendToHeader("foo", "foo_value2");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value,foo_value2"));
}
TEST(BalsaHeaders, TestAppendAndRemove) {
BalsaHeaders headers;
headers.AppendToHeader("foo", "foo_value");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value"));
headers.AppendToHeader("foo", "foo_value2");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value,foo_value2"));
headers.RemoveAllOfHeader("foo");
headers.AppendToHeader("foo", "foo_value3");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value3"));
headers.AppendToHeader("foo", "foo_value4");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value3,foo_value4"));
}
TEST(BalsaHeaders, TestAppendToHeaderWithCommaAndSpace) {
BalsaHeaders headers;
headers.AppendHeader("foo", "foo_value");
headers.AppendHeader("bar", "bar_value");
headers.AppendToHeaderWithCommaAndSpace("foo", "foo_value2");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value, foo_value2"));
EXPECT_THAT(headers.GetHeader("bar"), StrEq("bar_value"));
}
TEST(BalsaHeaders, TestInitialAppendWithCommaAndSpace) {
BalsaHeaders headers;
headers.AppendToHeaderWithCommaAndSpace("foo", "foo_value");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value"));
headers.AppendToHeaderWithCommaAndSpace("foo", "foo_value2");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value, foo_value2"));
}
TEST(BalsaHeaders, TestAppendWithCommaAndSpaceAndRemove) {
BalsaHeaders headers;
headers.AppendToHeaderWithCommaAndSpace("foo", "foo_value");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value"));
headers.AppendToHeaderWithCommaAndSpace("foo", "foo_value2");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value, foo_value2"));
headers.RemoveAllOfHeader("foo");
headers.AppendToHeaderWithCommaAndSpace("foo", "foo_value3");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value3"));
headers.AppendToHeaderWithCommaAndSpace("foo", "foo_value4");
EXPECT_THAT(headers.GetHeader("foo"), StrEq("foo_value3, foo_value4"));
}
TEST(BalsaHeaders, SetContentLength) {
BalsaHeaders headers;
headers.SetContentLength(10);
EXPECT_THAT(headers.GetHeader("Content-length"), StrEq("10"));
EXPECT_EQ(BalsaHeadersEnums::VALID_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_TRUE(headers.content_length_valid());
headers.SetContentLength(0);
EXPECT_THAT(headers.GetHeader("Content-length"), StrEq("0"));
EXPECT_EQ(BalsaHeadersEnums::VALID_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_TRUE(headers.content_length_valid());
BalsaHeaders::const_header_lines_iterator iter =
headers.GetHeaderPosition("Content-length");
EXPECT_EQ(headers.lines().begin(), iter);
EXPECT_EQ(headers.lines().end(), ++iter);
headers.SetContentLength(0);
EXPECT_THAT(headers.GetHeader("Content-length"), StrEq("0"));
EXPECT_EQ(BalsaHeadersEnums::VALID_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_TRUE(headers.content_length_valid());
iter = headers.GetHeaderPosition("Content-length");
EXPECT_EQ(headers.lines().begin(), iter);
EXPECT_EQ(headers.lines().end(), ++iter);
}
TEST(BalsaHeaders, ToggleChunkedEncoding) {
BalsaHeaders headers;
headers.SetTransferEncodingToChunkedAndClearContentLength();
EXPECT_EQ("chunked", headers.GetAllOfHeaderAsString("Transfer-Encoding"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("Transfer-Encoding"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("transfer-encoding"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("transfer"));
EXPECT_TRUE(headers.transfer_encoding_is_chunked());
headers.SetTransferEncodingToChunkedAndClearContentLength();
EXPECT_EQ("chunked", headers.GetAllOfHeaderAsString("Transfer-Encoding"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("Transfer-Encoding"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("transfer-encoding"));
EXPECT_TRUE(headers.HasHeadersWithPrefix("transfer"));
EXPECT_TRUE(headers.transfer_encoding_is_chunked());
BalsaHeaders::const_header_lines_iterator iter =
headers.GetHeaderPosition("Transfer-Encoding");
EXPECT_EQ(headers.lines().begin(), iter);
EXPECT_EQ(headers.lines().end(), ++iter);
headers.SetNoTransferEncoding();
EXPECT_FALSE(headers.HasHeader("Transfer-Encoding"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("Transfer-Encoding"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("transfer-encoding"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("transfer"));
EXPECT_FALSE(headers.transfer_encoding_is_chunked());
EXPECT_EQ(headers.lines().end(), headers.lines().begin());
headers.SetNoTransferEncoding();
EXPECT_FALSE(headers.HasHeader("Transfer-Encoding"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("Transfer-Encoding"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("transfer-encoding"));
EXPECT_FALSE(headers.HasHeadersWithPrefix("transfer"));
EXPECT_FALSE(headers.transfer_encoding_is_chunked());
EXPECT_EQ(headers.lines().end(), headers.lines().begin());
}
TEST(BalsaHeaders, SetNoTransferEncodingByRemoveHeader) {
BalsaHeaders headers;
headers.SetTransferEncodingToChunkedAndClearContentLength();
headers.RemoveAllOfHeader("Transfer-Encoding");
EXPECT_FALSE(headers.transfer_encoding_is_chunked());
headers.SetTransferEncodingToChunkedAndClearContentLength();
std::vector<absl::string_view> headers_to_remove;
headers_to_remove.emplace_back("Transfer-Encoding");
headers.RemoveAllOfHeaderInList(headers_to_remove);
EXPECT_FALSE(headers.transfer_encoding_is_chunked());
headers.SetTransferEncodingToChunkedAndClearContentLength();
headers.RemoveAllHeadersWithPrefix("Transfer");
EXPECT_FALSE(headers.transfer_encoding_is_chunked());
}
TEST(BalsaHeaders, ClearContentLength) {
BalsaHeaders headers;
headers.SetContentLength(10);
headers.ClearContentLength();
EXPECT_FALSE(headers.HasHeader("Content-length"));
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_FALSE(headers.content_length_valid());
headers.ClearContentLength();
EXPECT_FALSE(headers.HasHeader("Content-length"));
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_FALSE(headers.content_length_valid());
headers.SetTransferEncodingToChunkedAndClearContentLength();
headers.ClearContentLength();
EXPECT_EQ("chunked", headers.GetAllOfHeaderAsString("Transfer-Encoding"));
EXPECT_TRUE(headers.transfer_encoding_is_chunked());
BalsaHeaders::const_header_lines_iterator iter =
headers.GetHeaderPosition("Transfer-Encoding");
EXPECT_EQ(headers.lines().begin(), iter);
EXPECT_EQ(headers.lines().end(), ++iter);
headers.SetNoTransferEncoding();
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_FALSE(headers.content_length_valid());
}
TEST(BalsaHeaders, ClearContentLengthByRemoveHeader) {
BalsaHeaders headers;
headers.SetContentLength(10);
headers.RemoveAllOfHeader("Content-Length");
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_EQ(0u, headers.content_length());
EXPECT_FALSE(headers.content_length_valid());
headers.SetContentLength(11);
std::vector<absl::string_view> headers_to_remove;
headers_to_remove.emplace_back("Content-Length");
headers.RemoveAllOfHeaderInList(headers_to_remove);
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_EQ(0u, headers.content_length());
EXPECT_FALSE(headers.content_length_valid());
headers.SetContentLength(12);
headers.RemoveAllHeadersWithPrefix("Content");
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_EQ(0u, headers.content_length());
EXPECT_FALSE(headers.content_length_valid());
}
TEST(BalsaHeaders, IdentityCodingToChunked) {
std::string message =
"HTTP/1.1 200 OK\r\n"
"Transfer-Encoding: identity\r\n\r\n";
BalsaHeaders headers;
BalsaFrame balsa_frame;
balsa_frame.set_is_request(false);
balsa_frame.set_balsa_headers(&headers);
EXPECT_EQ(message.size(),
balsa_frame.ProcessInput(message.data(), message.size()));
EXPECT_TRUE(headers.is_framed_by_connection_close());
EXPECT_FALSE(headers.transfer_encoding_is_chunked());
EXPECT_THAT(headers.GetAllOfHeader("Transfer-Encoding"),
ElementsAre("identity"));
headers.SetTransferEncodingToChunkedAndClearContentLength();
EXPECT_FALSE(headers.is_framed_by_connection_close());
EXPECT_TRUE(headers.transfer_encoding_is_chunked());
EXPECT_THAT(headers.GetAllOfHeader("Transfer-Encoding"),
ElementsAre("chunked"));
}
TEST(BalsaHeaders, SwitchContentLengthToChunk) {
BalsaHeaders headers;
headers.SetContentLength(10);
EXPECT_THAT(headers.GetHeader("Content-length"), StrEq("10"));
EXPECT_EQ(BalsaHeadersEnums::VALID_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_TRUE(headers.content_length_valid());
headers.SetTransferEncodingToChunkedAndClearContentLength();
EXPECT_EQ("chunked", headers.GetAllOfHeaderAsString("Transfer-Encoding"));
EXPECT_TRUE(headers.transfer_encoding_is_chunked());
EXPECT_FALSE(headers.HasHeader("Content-length"));
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_FALSE(headers.content_length_valid());
}
TEST(BalsaHeaders, SwitchChunkedToContentLength) {
BalsaHeaders headers;
headers.SetTransferEncodingToChunkedAndClearContentLength();
EXPECT_EQ("chunked", headers.GetAllOfHeaderAsString("Transfer-Encoding"));
EXPECT_TRUE(headers.transfer_encoding_is_chunked());
EXPECT_FALSE(headers.HasHeader("Content-length"));
EXPECT_EQ(BalsaHeadersEnums::NO_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_FALSE(headers.content_length_valid());
headers.SetContentLength(10);
EXPECT_THAT(headers.GetHeader("Content-length"), StrEq("10"));
EXPECT_EQ(BalsaHeadersEnums::VALID_CONTENT_LENGTH,
headers.content_length_status());
EXPECT_TRUE(headers.content_length_valid());
EXPECT_FALSE(headers.HasHeader("Transfer-Encoding"));
EXPECT_FALSE(headers.transfer_encoding_is_chunked());
}
TEST(BalsaHeaders, OneHundredResponseMessagesNoFramedByClose) {
BalsaHeaders headers;
headers.SetResponseFirstline("HTTP/1.1", 100, "Continue");
EXPECT_FALSE(headers.is_framed_by_connection_close());
}
TEST(BalsaHeaders, TwoOhFourResponseMessagesNoFramedByClose) {
BalsaHeaders headers;
headers.SetResponseFirstline("HTTP/1.1", 204, "Continue");
EXPECT_FALSE(headers.is_framed_by_connection_close());
}
TEST(BalsaHeaders, ThreeOhFourResponseMessagesNoFramedByClose) {
BalsaHeaders headers;
headers.SetResponseFirstline("HTTP/1.1", 304, "Continue");
EXPECT_FALSE(headers.is_framed_by_connection_close());
}
TEST(BalsaHeaders, InvalidCharInHeaderValue) {
std::string message =
"GET http:
"Host: \x01\x01www.265.com\r\n"
"\r\n";
BalsaHeaders headers = CreateHTTPHeaders(true, message);
EXPECT_EQ("www.265.com", headers.GetHeader("Host"));
SimpleBuffer buffer;
headers.WriteHeaderAndEndingToBuffer(&buffer);
message.replace(message.find_first_of(0x1), 2, "");
EXPECT_EQ(message, buffer.GetReadableRegion());
}
TEST(BalsaHeaders, CarriageReturnAtStartOfLine) {
std::string message =
"GET /foo HTTP/1.1\r\n"
"Host: www.265.com\r\n"
"Foo: bar\r\n"
"\rX-User-Ip: 1.2.3.4\r\n"
"\r\n";
BalsaHeaders headers;
BalsaFrame balsa_frame;
balsa_frame.set_is_request(true);
balsa_frame.set_balsa_headers(&headers);
EXPECT_EQ(message.size(),
balsa_frame.ProcessInput(message.data(), message.size()));
EXPECT_EQ(BalsaFrameEnums::INVALID_HEADER_FORMAT, balsa_frame.ErrorCode());
EXPECT_TRUE(balsa_frame.Error());
}
TEST(BalsaHeaders, CheckEmpty) {
BalsaHeaders headers;
EXPECT_TRUE(headers.IsEmpty());
}
TEST(BalsaHeaders, CheckNonEmpty) {
BalsaHeaders headers;
BalsaHeadersTestPeer::WriteFromFramer(&headers, "a b c", 5);
EXPECT_FALSE(headers.IsEmpty());
}
TEST(BalsaHeaders, ForEachHeader) {
BalsaHeaders headers;
headers.AppendHeader(":host", "SomeHost");
headers.AppendHeader("key", "val1,val2val2,val2,val3");
headers.AppendHeader("key", "val4val5val6");
headers.AppendHeader("key", "val11 val12");
headers.AppendHeader("key", "v val13");
headers.AppendHeader("key", "val7");
headers.AppendHeader("key", "");
headers.AppendHeader("key", "val8 , val9 ,, val10");
headers.AppendHeader("key", " val14 ");
headers.AppendHeader("key2", "val15");
headers.AppendHeader("key", "Val16");
headers.AppendHeader("key", "foo, Val17, bar");
headers.AppendHeader("date", "2 Jan 1970");
headers.AppendHeader("AcceptEncoding", "MyFavoriteEncoding");
{
std::string result;
EXPECT_TRUE(headers.ForEachHeader(
[&result](const absl::string_view key, absl::string_view value) {
result.append("<")
.append(key.data(), key.size())
.append("> = <")
.append(value.data(), value.size())
.append(">\n");
return true;
}));
EXPECT_EQ(result,
"<:host> = <SomeHost>\n"
"<key> = <val1,val2val2,val2,val3>\n"
"<key> = <val4val5val6>\n"
"<key> = <val11 val12>\n"
"<key> = <v val13>\n"
"<key> = <val7>\n"
"<key> = <>\n"
"<key> = <val8 , val9 ,, val10>\n"
"<key> = < val14 >\n"
"<key2> = <val15>\n"
"<key> = <Val16>\n"
"<key> = <foo, Val17, bar>\n"
"<date> = <2 Jan 1970>\n"
"<AcceptEncoding> = <MyFavoriteEncoding>\n");
}
{
std::string result;
EXPECT_FALSE(headers.ForEachHeader(
[&result](const absl::string_view key, absl::string_view value) {
result.append("<")
.append(key.data(), key.size())
.append("> = <")
.append(value.data(), value.size())
.append(">\n");
return !value.empty();
}));
EXPECT_EQ(result,
"<:host> = <SomeHost>\n"
"<key> = <val1,val2val2,val2,val3>\n"
"<key> = <val4val5val6>\n"
"<key> = <val11 val12>\n"
"<key> = <v val13>\n"
"<key> = <val7>\n"
"<key> = <>\n");
}
}
TEST(BalsaHeaders, WriteToBufferWithLowerCasedHeaderKey) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("Key1", "value1");
headers.AppendHeader("Key2", "value2");
std::string expected_lower_case =
"GET / HTTP/1.0\r\n"
"key1: value1\r\n"
"key2: value2\r\n";
std::string expected_lower_case_with_end =
"GET / HTTP/1.0\r\n"
"key1: value1\r\n"
"key2: value2\r\n\r\n";
std::string expected_upper_case =
"GET / HTTP/1.0\r\n"
"Key1: value1\r\n"
"Key2: value2\r\n";
std::string expected_upper_case_with_end =
"GET / HTTP/1.0\r\n"
"Key1: value1\r\n"
"Key2: value2\r\n\r\n";
SimpleBuffer simple_buffer;
headers.WriteToBuffer(&simple_buffer, BalsaHeaders::CaseOption::kLowercase,
BalsaHeaders::CoalesceOption::kNoCoalesce);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected_lower_case));
simple_buffer.Clear();
headers.WriteToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(), StrEq(expected_upper_case));
simple_buffer.Clear();
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_THAT(simple_buffer.GetReadableRegion(),
StrEq(expected_upper_case_with_end));
simple_buffer.Clear();
headers.WriteHeaderAndEndingToBuffer(
&simple_buffer, BalsaHeaders::CaseOption::kLowercase,
BalsaHeaders::CoalesceOption::kNoCoalesce);
EXPECT_THAT(simple_buffer.GetReadableRegion(),
StrEq(expected_lower_case_with_end));
}
TEST(BalsaHeaders, WriteToBufferWithProperCasedHeaderKey) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("Te", "value1");
headers.AppendHeader("my-Test-header", "value2");
std::string expected_proper_case =
"GET / HTTP/1.0\r\n"
"TE: value1\r\n"
"My-Test-Header: value2\r\n";
std::string expected_proper_case_with_end =
"GET / HTTP/1.0\r\n"
"TE: value1\r\n"
"My-Test-Header: value2\r\n\r\n";
std::string expected_unmodified =
"GET / HTTP/1.0\r\n"
"Te: value1\r\n"
"my-Test-header: value2\r\n";
std::string expected_unmodified_with_end =
"GET / HTTP/1.0\r\n"
"Te: value1\r\n"
"my-Test-header: value2\r\n\r\n";
SimpleBuffer simple_buffer;
headers.WriteToBuffer(&simple_buffer, BalsaHeaders::CaseOption::kPropercase,
BalsaHeaders::CoalesceOption::kNoCoalesce);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_proper_case);
simple_buffer.Clear();
headers.WriteToBuffer(&simple_buffer,
BalsaHeaders::CaseOption::kNoModification,
BalsaHeaders::CoalesceOption::kNoCoalesce);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_unmodified);
simple_buffer.Clear();
headers.WriteHeaderAndEndingToBuffer(
&simple_buffer, BalsaHeaders::CaseOption::kNoModification,
BalsaHeaders::CoalesceOption::kNoCoalesce);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_unmodified_with_end);
simple_buffer.Clear();
headers.WriteHeaderAndEndingToBuffer(
&simple_buffer, BalsaHeaders::CaseOption::kPropercase,
BalsaHeaders::CoalesceOption::kNoCoalesce);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_proper_case_with_end);
}
TEST(BalsaHeadersTest, ToPropercaseTest) {
EXPECT_EQ(BalsaHeaders::ToPropercase(""), "");
EXPECT_EQ(BalsaHeaders::ToPropercase("Foo"), "Foo");
EXPECT_EQ(BalsaHeaders::ToPropercase("foO"), "Foo");
EXPECT_EQ(BalsaHeaders::ToPropercase("my-test-header"), "My-Test-Header");
EXPECT_EQ(BalsaHeaders::ToPropercase("my--test-header"), "My--Test-Header");
}
TEST(BalsaHeaders, WriteToBufferCoalescingMultivaluedHeaders) {
BalsaHeaders::MultivaluedHeadersSet multivalued_headers;
multivalued_headers.insert("KeY1");
multivalued_headers.insert("another_KEY");
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("Key1", "value1");
headers.AppendHeader("Key2", "value2");
headers.AppendHeader("Key1", "value11");
headers.AppendHeader("Key2", "value21");
headers.AppendHeader("Key1", "multiples, values, already");
std::string expected_non_coalesced =
"GET / HTTP/1.0\r\n"
"Key1: value1\r\n"
"Key2: value2\r\n"
"Key1: value11\r\n"
"Key2: value21\r\n"
"Key1: multiples, values, already\r\n";
std::string expected_coalesced =
"Key1: value1,value11,multiples, values, already\r\n"
"Key2: value2\r\n"
"Key2: value21\r\n";
SimpleBuffer simple_buffer;
headers.WriteToBuffer(&simple_buffer);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_non_coalesced);
simple_buffer.Clear();
headers.WriteToBufferCoalescingMultivaluedHeaders(
&simple_buffer, multivalued_headers,
BalsaHeaders::CaseOption::kNoModification);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_coalesced);
}
TEST(BalsaHeaders, WriteToBufferCoalescingMultivaluedHeadersMultiLine) {
BalsaHeaders::MultivaluedHeadersSet multivalued_headers;
multivalued_headers.insert("Key 2");
multivalued_headers.insert("key\n 3");
BalsaHeaders headers;
headers.AppendHeader("key1", "value1");
headers.AppendHeader("key 2", "value\n 2");
headers.AppendHeader("key\n 3", "value3");
headers.AppendHeader("key 2", "value 21");
headers.AppendHeader("key 3", "value 33");
std::string expected_non_coalesced =
"\r\n"
"key1: value1\r\n"
"key 2: value\n"
" 2\r\n"
"key\n"
" 3: value3\r\n"
"key 2: value 21\r\n"
"key 3: value 33\r\n";
SimpleBuffer simple_buffer;
headers.WriteToBuffer(&simple_buffer);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_non_coalesced);
std::string expected_coalesced =
"key1: value1\r\n"
"key 2: value\n"
" 2,value 21\r\n"
"key\n"
" 3: value3\r\n"
"key 3: value 33\r\n";
simple_buffer.Clear();
headers.WriteToBufferCoalescingMultivaluedHeaders(
&simple_buffer, multivalued_headers,
BalsaHeaders::CaseOption::kNoModification);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_coalesced);
}
TEST(BalsaHeaders, WriteToBufferCoalescingEnvoyHeaders) {
BalsaHeaders headers;
headers.SetRequestFirstlineFromStringPieces("GET", "/", "HTTP/1.0");
headers.AppendHeader("User-Agent", "UserAgent1");
headers.AppendHeader("Key2", "value2");
headers.AppendHeader("USER-AGENT", "UA2");
headers.AppendHeader("Set-Cookie", "Cookie1=aaa");
headers.AppendHeader("user-agent", "agent3");
headers.AppendHeader("Set-Cookie", "Cookie2=bbb");
std::string expected_non_coalesced =
"GET / HTTP/1.0\r\n"
"User-Agent: UserAgent1\r\n"
"Key2: value2\r\n"
"USER-AGENT: UA2\r\n"
"Set-Cookie: Cookie1=aaa\r\n"
"user-agent: agent3\r\n"
"Set-Cookie: Cookie2=bbb\r\n"
"\r\n";
std::string expected_coalesced =
"GET / HTTP/1.0\r\n"
"User-Agent: UserAgent1,UA2,agent3\r\n"
"Key2: value2\r\n"
"Set-Cookie: Cookie1=aaa\r\n"
"Set-Cookie: Cookie2=bbb\r\n"
"\r\n";
SimpleBuffer simple_buffer;
headers.WriteHeaderAndEndingToBuffer(&simple_buffer);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_non_coalesced);
simple_buffer.Clear();
headers.WriteHeaderAndEndingToBuffer(
&simple_buffer, BalsaHeaders::CaseOption::kNoModification,
BalsaHeaders::CoalesceOption::kCoalesce);
EXPECT_EQ(simple_buffer.GetReadableRegion(), expected_coalesced);
}
TEST(BalsaHeadersTest, RemoveLastTokenFromOneLineHeader) {
BalsaHeaders headers =
CreateHTTPHeaders(true,
"GET /foo HTTP/1.1\r\n"
"Content-Length: 0\r\n"
"Content-Encoding: gzip, 3des, tar, prc\r\n\r\n");
BalsaHeaders::const_header_lines_key_iterator it =
headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip, 3des, tar, prc", it->second);
EXPECT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
it = headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip, 3des, tar", it->second);
EXPECT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
it = headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip, 3des", it->second);
EXPECT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
it = headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip", it->second);
EXPECT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
EXPECT_FALSE(headers.HasHeader("Content-Encoding"));
}
TEST(BalsaHeadersTest, RemoveLastTokenFromMultiLineHeader) {
BalsaHeaders headers =
CreateHTTPHeaders(true,
"GET /foo HTTP/1.1\r\n"
"Content-Length: 0\r\n"
"Content-Encoding: gzip, 3des\r\n"
"Content-Encoding: tar, prc\r\n\r\n");
BalsaHeaders::const_header_lines_key_iterator it =
headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip, 3des", it->second);
ASSERT_EQ("tar, prc", (++it)->second);
ASSERT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
it = headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip, 3des", it->second);
ASSERT_EQ("tar", (++it)->second);
ASSERT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
it = headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip, 3des", it->second);
ASSERT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
it = headers.GetIteratorForKey("Content-Encoding");
ASSERT_EQ("gzip", it->second);
ASSERT_EQ(headers.header_lines_key_end(), ++it);
headers.RemoveLastTokenFromHeaderValue("Content-Encoding");
EXPECT_FALSE(headers.HasHeader("Content-Encoding"));
}
TEST(BalsaHeadersTest, ResponseCanHaveBody) {
EXPECT_FALSE(BalsaHeaders::ResponseCanHaveBody(100));
EXPECT_FALSE(BalsaHeaders::ResponseCanHaveBody(101));
EXPECT_FALSE(BalsaHeaders::ResponseCanHaveBody(102));
EXPECT_FALSE(BalsaHeaders::ResponseCanHaveBody(204));
EXPECT_FALSE(BalsaHeaders::ResponseCanHaveBody(304));
EXPECT_TRUE(BalsaHeaders::ResponseCanHaveBody(200));
EXPECT_TRUE(BalsaHeaders::ResponseCanHaveBody(302));
EXPECT_TRUE(BalsaHeaders::ResponseCanHaveBody(404));
EXPECT_TRUE(BalsaHeaders::ResponseCanHaveBody(502));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/balsa/balsa_headers.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/balsa/balsa_headers_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
9662c336-34ba-4275-bdd6-126ea9e0e928 | cpp | tensorflow/tensorflow | depthwise_conv_hybrid | tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid.h | tensorflow/lite/kernels/depthwise_conv_hybrid_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_H_
#include <algorithm>
#include <memory>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
namespace depthwise_conv {
inline void DepthwiseConvInitAccBuffer(int num_output_pixels, int output_depth,
int32* acc_buffer) {
memset(acc_buffer, 0,
sizeof(acc_buffer[0]) * output_depth * num_output_pixels);
}
static void DoDepthwiseConvHybridGeneral(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim, int32* acc_buffer,
int32 acc_buffer_size) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_rows = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_GE(acc_buffer_size, output_depth);
const int kOutputPixelsInAccBuffer = acc_buffer_size / output_depth;
const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth;
TFLITE_DCHECK_LE(kOutputPixelsInAccBuffer * output_depth,
kAccBufferActualSize);
TFLITE_DCHECK_LE(kAccBufferActualSize, acc_buffer_size);
TFLITE_DCHECK_GE(kOutputPixelsInAccBuffer, 1);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
using row_accum_func_t = decltype(&QuantizedDepthwiseConvAccumRowGeneric);
row_accum_func_t row_accum_func = nullptr;
#define TFMINI_USE_DEPTHWISECONV_KERNEL(ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER) \
if (!row_accum_func && (stride_width == 1 || ALLOW_STRIDED) && \
(input_depth == FIXED_INPUT_DEPTH || FIXED_INPUT_DEPTH == 0) && \
depth_multiplier == FIXED_DEPTH_MULTIPLIER) { \
row_accum_func = \
QuantizedDepthwiseConvAccumRow<ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER>; \
}
#ifdef USE_NEON
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 12, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 16, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 16)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 20)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 32)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 4, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 3)
#endif
if (!row_accum_func) {
row_accum_func = QuantizedDepthwiseConvAccumRowGeneric;
}
#undef TFMINI_USE_DEPTHWISECONV_KERNEL
const int input_height_stride = input_shape.Dims(3) * input_shape.Dims(2);
const int input_batch_stride = input_height_stride * input_shape.Dims(1);
const int filter_height_stride = filter_shape.Dims(3) * filter_shape.Dims(2);
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = output_rows;
int output_ptr_offset = 0;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
output_ptr_offset = batch_start * FlatSizeSkipDim(output_shape, 0);
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, output_rows);
row_start = thread_start;
row_end = thread_end;
output_ptr_offset = row_start * output_width * output_depth;
break;
}
float* output_ptr = output_data + output_ptr_offset;
int batch_step =
(output_rows + row_start - row_end) * output_width * output_depth;
for (int b = batch_start; b < batch_end; ++b) {
float input_scale = input_scales[b];
int32_t input_offset = input_offsets[b];
for (int out_y = row_start; out_y < row_end; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
const int filter_y_start =
std::max(0, (-in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
const int filter_y_end =
std::min(filter_height,
(input_height - in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
for (int out_x_buffer_start = 0; out_x_buffer_start < output_width;
out_x_buffer_start += kOutputPixelsInAccBuffer) {
const int out_x_buffer_end = std::min(
output_width, out_x_buffer_start + kOutputPixelsInAccBuffer);
const int num_output_pixels = out_x_buffer_end - out_x_buffer_start;
DepthwiseConvInitAccBuffer(num_output_pixels, output_depth, acc_buffer);
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
row_accum_func(
stride_width, dilation_width_factor, input_depth, input_width,
input_data + in_y * input_height_stride + b * input_batch_stride,
-input_offset, pad_width, depth_multiplier, filter_width,
filter_data + filter_y * filter_height_stride, out_x_buffer_start,
out_x_buffer_end, output_depth, acc_buffer);
}
gemmlowp::ScopedProfilingLabel label("store");
const int num_output_values = output_depth * num_output_pixels;
int c = 0;
while (c < output_depth) {
int target_output_depth = output_depth;
#ifdef USE_NEON
const float32x4_t output_activation_min_vec =
vdupq_n_f32(output_activation_min);
const float32x4_t output_activation_max_vec =
vdupq_n_f32(output_activation_max);
const float32x4_t input_scale_32x4 = vdupq_n_f32(input_scale);
for (; c <= output_depth - 4; c += 4) {
if ((c + 4) > output_depth) {
break;
}
const float32x4_t channel_scale_32x4 =
vld1q_f32(per_channel_scales + c);
const float32x4_t bias_32x4 = vld1q_f32(bias_data + c);
for (int n = 0; n < num_output_pixels; ++n) {
int loc = n * output_depth + c;
int32x4_t acc = vld1q_s32(acc_buffer + loc);
float32x4_t float_acc = vcvtq_f32_s32(acc);
float_acc = vmulq_f32(float_acc, channel_scale_32x4);
float_acc = vmulq_f32(float_acc, input_scale_32x4);
float_acc = vaddq_f32(float_acc, bias_32x4);
float_acc = vmaxq_f32(float_acc, output_activation_min_vec);
float_acc = vminq_f32(float_acc, output_activation_max_vec);
vst1q_f32(output_ptr + loc, float_acc);
}
}
#endif
for (; c < target_output_depth; c++) {
for (int n = 0; n < num_output_pixels; ++n) {
int loc = n * output_depth + c;
int32 acc = acc_buffer[loc];
float float_acc = acc * input_scale * per_channel_scales[c];
float_acc += bias_data[c];
float_acc = std::max(float_acc, output_activation_min);
float_acc = std::min(float_acc, output_activation_max);
output_ptr[loc] = float_acc;
}
}
}
output_ptr += num_output_values;
}
}
output_ptr += batch_step;
}
}
static void DoDepthwiseConvHybridGeneralStatic(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
static const int kStaticAccBufferMaxSize = 2048;
int32 stack_acc_buffer[kStaticAccBufferMaxSize];
DoDepthwiseConvHybridGeneral(
params, input_scales, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data, per_channel_scales,
input_offsets, thread_start, thread_end, thread_dim, stack_acc_buffer,
kStaticAccBufferMaxSize);
}
inline void DepthwiseConvHybridGeneral(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
#ifndef TF_LITE_STATIC_MEMORY
static const int kStaticAccBufferMaxSize = 2048;
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
if (kStaticAccBufferMaxSize < output_depth) {
std::unique_ptr<int32[]> heap_acc_buffer(new int32[output_depth]);
DoDepthwiseConvHybridGeneral(
params, input_scales, input_shape, input_data, filter_shape,
filter_data, bias_shape, bias_data, output_shape, output_data,
per_channel_scales, input_offsets, thread_start, thread_end, thread_dim,
heap_acc_buffer.get(), output_depth);
return;
}
#endif
DoDepthwiseConvHybridGeneralStatic(
params, input_scales, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data, per_channel_scales,
input_offsets, thread_start, thread_end, thread_dim);
}
}
template <DepthwiseConvOutputRounding kOutputRounding>
inline void DepthwiseConvHybridWithRounding(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
gemmlowp::ScopedProfilingLabel label("DepthwiseConvHybridInt8/8bit");
const int depth_multiplier = params.depth_multiplier;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_GE(dilation_width_factor, 1);
TFLITE_DCHECK_GE(dilation_height_factor, 1);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_depth = input_shape.Dims(3);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
if (optimized_ops::depthwise_conv::Fast3x3FilterKernelSupported<
optimized_ops::depthwise_conv::QuantizationType::kNonPerChannelUint8>(
input_shape, filter_shape, stride_width, stride_height,
dilation_width_factor, dilation_height_factor, pad_width, pad_height,
depth_multiplier, output_shape, 0, nullptr)) {
gemmlowp::ScopedProfilingLabel specialized_label(
"DepthwiseConvHybridInt8/8bit/3x3");
optimized_ops::depthwise_conv::DepthwiseConvHybrid3x3FilterPerChannel<
DepthwiseConvOutputRounding::kUpward>(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
return;
}
#endif
gemmlowp::ScopedProfilingLabel specialized_label(
"DepthwiseConvHybridInt8/8bit/General");
depthwise_conv::DepthwiseConvHybridGeneral(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
inline void DepthwiseConvHybridImpl(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
return DepthwiseConvHybridWithRounding<
DepthwiseConvOutputRounding::kAwayFromZero>(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
template <typename T, typename TS>
struct DepthwiseConvHybridWorkerTask : cpu_backend_threadpool::Task {
DepthwiseConvHybridWorkerTask(const DepthwiseParams& params,
const float* input_scales,
const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& filter_shape,
const T* filter_data,
const RuntimeShape& bias_shape,
const TS* bias_data,
const RuntimeShape& output_shape,
float* output_data,
const float* per_channel_scales,
const int32_t* input_offsets,
int thread_start, int thread_end,
int thread_dim)
: params(params),
input_scales(input_scales),
input_shape(input_shape),
input_data(input_data),
filter_shape(filter_shape),
filter_data(filter_data),
bias_shape(bias_shape),
bias_data(bias_data),
output_shape(output_shape),
output_data(output_data),
per_channel_scales(per_channel_scales),
input_offsets(input_offsets),
thread_start(thread_start),
thread_end(thread_end),
thread_dim(thread_dim) {}
void Run() override {
DepthwiseConvHybridImpl(params, input_scales, input_shape,
input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
private:
const DepthwiseParams& params;
const float* input_scales;
const RuntimeShape& input_shape;
const T* input_data;
const RuntimeShape& filter_shape;
const T* filter_data;
const RuntimeShape& bias_shape;
const TS* bias_data;
const RuntimeShape& output_shape;
float* output_data;
const float* per_channel_scales;
const int32_t* input_offsets;
int thread_start;
int thread_end;
int thread_dim;
};
inline void DepthwiseConvHybridPerChannel(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, int32_t* input_offsets,
CpuBackendContext* cpu_backend_context) {
gemmlowp::ScopedProfilingLabel label("DepthwiseConvHybridInt8");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_batches = output_shape.Dims(0);
const int output_rows = output_shape.Dims(1);
int thread_count_batch = HowManyConvThreads(output_shape, filter_shape, 0);
int thread_count_row = HowManyConvThreads(output_shape, filter_shape, 1);
int thread_dim, thread_count, thread_dim_size;
if (thread_count_batch > thread_count_row) {
thread_dim = 0;
thread_dim_size = output_batches;
thread_count = thread_count_batch;
} else {
thread_dim = 1;
thread_dim_size = output_rows;
thread_count = thread_count_row;
}
const int max_threads = cpu_backend_context->max_num_threads();
thread_count = std::max(1, std::min(thread_count, max_threads));
if (thread_count == 1) {
DepthwiseConvHybridImpl(params, input_scales, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
per_channel_scales, input_offsets,
0, output_rows,
1);
} else {
std::vector<DepthwiseConvHybridWorkerTask<int8, float>> tasks;
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
int thread_end =
thread_start + (thread_dim_size - thread_start) / (thread_count - i);
tasks.emplace_back(params, input_scales, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
per_channel_scales, input_offsets, thread_start,
thread_end, thread_dim);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
}
}
}
#endif | #include <stddef.h>
#include <cstdint>
#include <initializer_list>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DEPTHWISE_CONVOLUTION_REF();
TfLiteRegistration* Register_DEPTHWISE_CONVOLUTION_GENERIC_OPT();
TfLiteRegistration* Register_DEPTHWISE_CONVOLUTION_NEON_OPT();
}
}
namespace {
using ::testing::ElementsAreArray;
class BaseDepthwiseConvolutionOpModel : public SingleOpModel {
public:
BaseDepthwiseConvolutionOpModel(
TfLiteRegistration* registration, const TensorData& input,
const TensorData& filter, const TensorData& output, Padding padding_type,
int dilation_factor = 1, int stride_width = 1, int stride_height = 1,
ActivationFunctionType fused_activation_function =
ActivationFunctionType_NONE) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[3];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
} else {
if (filter.per_channel_quantization) {
std::vector<float> bias_scale(
filter.per_channel_quantization_scales.size());
std::vector<int64_t> bias_zero_points(
filter.per_channel_quantization_scales.size());
for (size_t i = 0; i < filter.per_channel_quantization_scales.size();
++i) {
bias_scale[i] =
input.scale * filter.per_channel_quantization_scales[i];
bias_zero_points[i] = 0;
}
TensorData bias{TensorType_INT32,
{bias_size},
0,
0,
0,
0,
true,
bias_scale,
bias_zero_points,
0};
bias_ = AddInput(bias);
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
}
output_ = AddOutput(output);
int input_depth = GetShape(input_)[3];
int output_depth = GetShape(filter_)[3];
int depth_mul = output_depth / input_depth;
SetBuiltinOp(
BuiltinOperator_DEPTHWISE_CONV_2D,
BuiltinOptions_DepthwiseConv2DOptions,
CreateDepthwiseConv2DOptions(
builder_, padding_type, stride_width, stride_height, depth_mul,
fused_activation_function, dilation_factor, dilation_factor)
.Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEPTHWISE_CONV_2D, registration);
BuildInterpreter({GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
protected:
int input_;
int filter_;
int bias_;
int output_;
};
class PerChannelHybridDepthwiseConvolutionOpModel
: public BaseDepthwiseConvolutionOpModel {
public:
using BaseDepthwiseConvolutionOpModel::BaseDepthwiseConvolutionOpModel;
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetFilter(std::initializer_list<float> data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetBias(std::initializer_list<float> data) {
PopulateTensor(bias_, data);
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
void SetFilter(const std::vector<float>& data) {
PerChannelSymmetricQuantizeAndPopulate(filter_, data);
}
void SetBias(const std::vector<float>& data) { PopulateTensor(bias_, data); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
const auto kKernelMap = new std::map<string, TfLiteRegistration*>({
{"Reference", ops::builtin::Register_DEPTHWISE_CONVOLUTION_REF()},
{"GenericOptimized",
ops::builtin::Register_DEPTHWISE_CONVOLUTION_GENERIC_OPT()},
{"NeonOptimized", ops::builtin::Register_DEPTHWISE_CONVOLUTION_NEON_OPT()},
});
class PerChannelHybridDepthwiseConvolutionOptimizedOpTest
: public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
class PerChannelHybridDepthwiseConvolutionOpTest : public SingleOpTest {
protected:
const std::map<string, TfLiteRegistration*>& GetKernelMap() override {
return *kKernelMap;
}
};
void RandomTest(int b, int h, int w, int c, int fs, bool padding, int sw) {
const float element_max = 1.0;
const int input_size = b * h * w * c;
const int filter_size = 1 * fs * fs * c;
const int bias_size = c;
std::vector<float> input_data(input_size);
std::vector<float> filter_data(filter_size);
std::vector<float> bias_data(bias_size);
for (int i = 0; i < input_size; ++i) {
input_data[i] = UniformRandomFloat(-element_max, element_max);
}
for (int i = 0; i < filter_size; ++i) {
filter_data[i] = UniformRandomFloat(-element_max, element_max);
}
for (int i = 0; i < bias_size; ++i) {
bias_data[i] = UniformRandomFloat(-element_max, element_max);
}
const TensorData input({TensorType_FLOAT32, {b, h, w, c}});
const TensorData output({TensorType_FLOAT32, {}});
std::vector<float> scales;
std::vector<int64_t> offsets;
for (int i = 0; i < c; i++) {
scales.push_back(1.0 / 127.0);
offsets.push_back(0.0);
}
const TensorData filter({TensorType_INT8,
{1, fs, fs, c},
0,
0,
0,
0,
true,
scales,
offsets,
3});
PerChannelHybridDepthwiseConvolutionOpModel hybrid_generic(
ops::builtin::Register_DEPTHWISE_CONVOLUTION_REF(), input, filter, output,
padding ? Padding_SAME : Padding_VALID,
1,
sw,
sw);
hybrid_generic.SetInput(input_data);
hybrid_generic.SetFilter(filter_data);
hybrid_generic.SetBias(bias_data);
ASSERT_EQ(hybrid_generic.Invoke(), kTfLiteOk);
std::vector<float> hybrid_generic_output = hybrid_generic.GetOutput();
PerChannelHybridDepthwiseConvolutionOpModel hybrid_optimized(
ops::builtin::Register_DEPTHWISE_CONVOLUTION_NEON_OPT(), input, filter,
output, padding ? Padding_SAME : Padding_VALID,
1,
sw,
sw);
hybrid_optimized.SetInput(input_data);
hybrid_optimized.SetFilter(filter_data);
hybrid_optimized.SetBias(bias_data);
ASSERT_EQ(hybrid_optimized.Invoke(), kTfLiteOk);
std::vector<float> hybrid_optimized_output = hybrid_optimized.GetOutput();
EXPECT_THAT(hybrid_generic_output,
ElementsAreArray(ArrayFloatNear(hybrid_optimized_output)));
}
void RandomTest(int b, int w, int h, int c, int fs) {
RandomTest(b, w, h, c, fs, false, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest32) {
RandomTest(1, 10, 10, 8, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest64) {
RandomTest(1, 112, 112, 64, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest128) {
RandomTest(1, 56, 56, 128, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest256) {
RandomTest(1, 28, 28, 256, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest512) {
RandomTest(1, 14, 14, 512, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest, AccuracyTest1024) {
RandomTest(1, 3, 3, 1024, 3);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest32) {
RandomTest(1, 112, 112, 32, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest64) {
RandomTest(1, 112, 112, 64, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest128) {
RandomTest(1, 56, 56, 128, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest256) {
RandomTest(1, 28, 28, 256, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest512) {
RandomTest(1, 14, 14, 512, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddingTest1024) {
RandomTest(1, 3, 3, 1024, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
AccuracyPaddiacc_buffer_sizengTest4096) {
RandomTest(1, 3, 3, 4096, 3, true, 1);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest32) {
RandomTest(1, 112, 112, 32, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest64) {
RandomTest(1, 112, 112, 64, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest128) {
RandomTest(1, 56, 56, 128, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest256) {
RandomTest(1, 28, 28, 256, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest512) {
RandomTest(1, 14, 14, 512, 3, false, 2);
}
TEST_F(PerChannelHybridDepthwiseConvolutionOptimizedOpTest,
Accuracy2x2StrideTest1024) {
RandomTest(1, 3, 3, 1024, 3, false, 1);
}
TEST_P(PerChannelHybridDepthwiseConvolutionOpTest, SimpleTest) {
PerChannelHybridDepthwiseConvolutionOpModel m(
GetRegistration(), {TensorType_FLOAT32, {1, 2, 3, 2}},
{TensorType_INT8,
{1, 2, 2, 4},
0,
0,
0,
0,
true,
{1, 2, 3, 4},
{0, 0, 0, 0},
3},
{TensorType_FLOAT32, {}}, Padding_VALID);
m.SetInput({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
m.SetFilter(
{
1, 2, 3, 4,
3, 4, 5, 6,
7, 8, 5, 6,
3, 4, 1, 2,
});
m.SetBias({3, -2, 4, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{42.9373, 47.9451, 22.0706, 22.0627, 3, -4.00784, -29.1294, -54.1098},
0.16)));
}
TEST_P(PerChannelHybridDepthwiseConvolutionOpTest, Simple3x3FilterTest) {
PerChannelHybridDepthwiseConvolutionOpModel m(
GetRegistration(), {TensorType_FLOAT32, {1, 3, 3, 8}},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{1, 2, 3, 4, 4, 3, 2, 1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_FLOAT32, {}}, Padding_VALID);
m.SetInput({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetBias({0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{9, 18, 0, 0, 36, 54, 0, 0}, 0.16)));
}
TEST_P(PerChannelHybridDepthwiseConvolutionOpTest,
Simple3x3FilterPaddingSameTest) {
PerChannelHybridDepthwiseConvolutionOpModel m(
GetRegistration(), {TensorType_FLOAT32, {1, 3, 3, 8}},
{TensorType_INT8,
{1, 3, 3, 8},
0,
0,
0,
0,
true,
{1, 2, 3, 4, 4, 3, 2, 1},
{0, 0, 0, 0, 0, 0, 0, 0},
3},
{TensorType_FLOAT32, {}}, Padding_SAME);
m.SetInput({
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0,
1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1,
0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0});
m.SetFilter(
{
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8});
m.SetBias({0, 0, 0, 0, 0, 0, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{
4, 8, 0, 0, 16, 24, 0, 0, 6, 12, 0, 0, 24, 36, 0,
0, 4, 8, 0, 0, 16, 24, 0, 0, 6, 12, 0, 0, 24, 36,
0, 0, 9, 18, 0, 0, 36, 54, 0, 0, 6, 12, 0, 0, 24,
36, 0, 0, 4, 8, 0, 0, 16, 24, 0, 0, 6, 12, 0, 0,
24, 36, 0, 0, 4, 8, 0, 0, 16, 24, 0, 0,
},
0.16)));
}
INSTANTIATE_TEST_SUITE_P(
PerChannelHybridDepthwiseConvolutionOpTest,
PerChannelHybridDepthwiseConvolutionOpTest,
::testing::ValuesIn(SingleOpTest::GetKernelTags(*kKernelMap)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/depthwise_conv_hybrid_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6884867c-9dbe-412d-b6e1-d8bbf28fb1e0 | cpp | google/quiche | test_ip_packets | quiche/quic/test_tools/test_ip_packets.cc | quiche/quic/test_tools/test_ip_packets_test.cc | #include "quiche/quic/test_tools/test_ip_packets.h"
#include <cstdint>
#include <limits>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/internet_checksum.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_data_writer.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/common/quiche_ip_address.h"
#include "quiche/common/quiche_ip_address_family.h"
#if defined(__linux__)
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/udp.h>
#endif
namespace quic::test {
namespace {
constexpr uint16_t kIpv4HeaderSize = 20;
constexpr uint16_t kIpv6HeaderSize = 40;
constexpr uint16_t kUdpHeaderSize = 8;
constexpr uint8_t kUdpProtocol = 0x11;
#if defined(__linux__)
static_assert(kIpv4HeaderSize == sizeof(iphdr));
static_assert(kIpv6HeaderSize == sizeof(ip6_hdr));
static_assert(kUdpHeaderSize == sizeof(udphdr));
static_assert(kUdpProtocol == IPPROTO_UDP);
#endif
std::string CreateIpv4Header(int payload_length,
quiche::QuicheIpAddress source_address,
quiche::QuicheIpAddress destination_address,
uint8_t protocol) {
QUICHE_CHECK_GT(payload_length, 0);
QUICHE_CHECK_LE(payload_length,
std::numeric_limits<uint16_t>::max() - kIpv4HeaderSize);
QUICHE_CHECK(source_address.address_family() ==
quiche::IpAddressFamily::IP_V4);
QUICHE_CHECK(destination_address.address_family() ==
quiche::IpAddressFamily::IP_V4);
std::string header(kIpv4HeaderSize, '\0');
quiche::QuicheDataWriter header_writer(header.size(), header.data());
header_writer.WriteUInt8(0x45);
header_writer.WriteUInt8(0x00);
header_writer.WriteUInt16(kIpv4HeaderSize + payload_length);
header_writer.WriteUInt16(0x0000);
header_writer.WriteUInt16(0x0000);
header_writer.WriteUInt8(64);
header_writer.WriteUInt8(protocol);
header_writer.WriteUInt16(0x0000);
header_writer.WriteStringPiece(source_address.ToPackedString());
header_writer.WriteStringPiece(destination_address.ToPackedString());
QUICHE_CHECK_EQ(header_writer.remaining(), 0u);
return header;
}
std::string CreateIpv6Header(int payload_length,
quiche::QuicheIpAddress source_address,
quiche::QuicheIpAddress destination_address,
uint8_t next_header) {
QUICHE_CHECK_GT(payload_length, 0);
QUICHE_CHECK_LE(payload_length, std::numeric_limits<uint16_t>::max());
QUICHE_CHECK(source_address.address_family() ==
quiche::IpAddressFamily::IP_V6);
QUICHE_CHECK(destination_address.address_family() ==
quiche::IpAddressFamily::IP_V6);
std::string header(kIpv6HeaderSize, '\0');
quiche::QuicheDataWriter header_writer(header.size(), header.data());
header_writer.WriteUInt32(0x60000000);
header_writer.WriteUInt16(payload_length);
header_writer.WriteUInt8(next_header);
header_writer.WriteUInt8(64);
header_writer.WriteStringPiece(source_address.ToPackedString());
header_writer.WriteStringPiece(destination_address.ToPackedString());
QUICHE_CHECK_EQ(header_writer.remaining(), 0u);
return header;
}
}
std::string CreateIpPacket(const quiche::QuicheIpAddress& source_address,
const quiche::QuicheIpAddress& destination_address,
absl::string_view payload,
IpPacketPayloadType payload_type) {
QUICHE_CHECK(source_address.address_family() ==
destination_address.address_family());
uint8_t payload_protocol;
switch (payload_type) {
case IpPacketPayloadType::kUdp:
payload_protocol = kUdpProtocol;
break;
default:
QUICHE_NOTREACHED();
return "";
}
std::string header;
switch (source_address.address_family()) {
case quiche::IpAddressFamily::IP_V4:
header = CreateIpv4Header(payload.size(), source_address,
destination_address, payload_protocol);
break;
case quiche::IpAddressFamily::IP_V6:
header = CreateIpv6Header(payload.size(), source_address,
destination_address, payload_protocol);
break;
default:
QUICHE_NOTREACHED();
return "";
}
return absl::StrCat(header, payload);
}
std::string CreateUdpPacket(const QuicSocketAddress& source_address,
const QuicSocketAddress& destination_address,
absl::string_view payload) {
QUICHE_CHECK(source_address.host().address_family() ==
destination_address.host().address_family());
QUICHE_CHECK(!payload.empty());
QUICHE_CHECK_LE(payload.size(),
static_cast<uint16_t>(std::numeric_limits<uint16_t>::max() -
kUdpHeaderSize));
std::string header(kUdpHeaderSize, '\0');
quiche::QuicheDataWriter header_writer(header.size(), header.data());
header_writer.WriteUInt16(source_address.port());
header_writer.WriteUInt16(destination_address.port());
header_writer.WriteUInt16(kUdpHeaderSize + payload.size());
InternetChecksum checksum;
switch (source_address.host().address_family()) {
case quiche::IpAddressFamily::IP_V4: {
checksum.Update(source_address.host().ToPackedString());
checksum.Update(destination_address.host().ToPackedString());
uint8_t protocol[] = {0x00, kUdpProtocol};
checksum.Update(protocol, sizeof(protocol));
uint16_t udp_length =
quiche::QuicheEndian::HostToNet16(kUdpHeaderSize + payload.size());
checksum.Update(reinterpret_cast<uint8_t*>(&udp_length),
sizeof(udp_length));
break;
}
case quiche::IpAddressFamily::IP_V6: {
checksum.Update(source_address.host().ToPackedString());
checksum.Update(destination_address.host().ToPackedString());
uint32_t udp_length =
quiche::QuicheEndian::HostToNet32(kUdpHeaderSize + payload.size());
checksum.Update(reinterpret_cast<uint8_t*>(&udp_length),
sizeof(udp_length));
uint8_t protocol[] = {0x00, 0x00, 0x00, kUdpProtocol};
checksum.Update(protocol, sizeof(protocol));
break;
}
default:
QUICHE_NOTREACHED();
return "";
}
checksum.Update(header.data(), header.size());
checksum.Update(payload.data(), payload.size());
uint16_t checksum_val = checksum.Value();
header_writer.WriteBytes(&checksum_val, sizeof(checksum_val));
QUICHE_CHECK_EQ(header_writer.remaining(), 0u);
return absl::StrCat(header, payload);
}
} | #include "quiche/quic/test_tools/test_ip_packets.h"
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_ip_address.h"
namespace quic::test {
namespace {
TEST(TestIpPacketsTest, CreateIpv4Packet) {
quiche::QuicheIpAddress source_ip;
ASSERT_TRUE(source_ip.FromString("192.0.2.45"));
ASSERT_TRUE(source_ip.IsIPv4());
QuicSocketAddress source_address{source_ip, 54131};
quiche::QuicheIpAddress destination_ip;
ASSERT_TRUE(destination_ip.FromString("192.0.2.67"));
ASSERT_TRUE(destination_ip.IsIPv4());
QuicSocketAddress destination_address(destination_ip, 57542);
std::string packet =
CreateIpPacket(source_ip, destination_ip,
CreateUdpPacket(source_address, destination_address,
"foo"),
IpPacketPayloadType::kUdp);
constexpr static char kExpected[] =
"\x45"
"\x00"
"\x00\x1F"
"\x00\x00"
"\x00\x00"
"\x40"
"\x11"
"\x00\x00"
"\xC0\x00\x02\x2D"
"\xC0\x00\x02\x43"
"\xD3\x73"
"\xE0\xC6"
"\x00\x0B"
"\xF1\xBC"
"foo";
EXPECT_EQ(absl::string_view(packet),
absl::string_view(kExpected, sizeof(kExpected) - 1));
}
TEST(TestIpPacketsTest, CreateIpv6Packet) {
quiche::QuicheIpAddress source_ip;
ASSERT_TRUE(source_ip.FromString("2001:db8::45"));
ASSERT_TRUE(source_ip.IsIPv6());
QuicSocketAddress source_address{source_ip, 51941};
quiche::QuicheIpAddress destination_ip;
ASSERT_TRUE(destination_ip.FromString("2001:db8::67"));
ASSERT_TRUE(destination_ip.IsIPv6());
QuicSocketAddress destination_address(destination_ip, 55341);
std::string packet =
CreateIpPacket(source_ip, destination_ip,
CreateUdpPacket(source_address, destination_address,
"foo"),
IpPacketPayloadType::kUdp);
constexpr static char kExpected[] =
"\x60\x00\x00\x00"
"\x00\x0b"
"\x11"
"\x40"
"\x20\x01\x0D\xB8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x45"
"\x20\x01\x0D\xB8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x67"
"\xCA\xE5"
"\xD8\x2D"
"\x00\x0B"
"\x2B\x37"
"foo";
EXPECT_EQ(absl::string_view(packet),
absl::string_view(kExpected, sizeof(kExpected) - 1));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/test_ip_packets.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/test_ip_packets_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b9511aae-495e-4df9-8c02-e84d9e35edec | cpp | google/cel-cpp | sets_functions | extensions/sets_functions.cc | extensions/sets_functions_test.cc | #include "extensions/sets_functions.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel::extensions {
namespace {
absl::StatusOr<Value> SetsContains(ValueManager& value_factory,
const ListValue& list,
const ListValue& sublist) {
bool any_missing = false;
CEL_RETURN_IF_ERROR(sublist.ForEach(
value_factory,
[&list, &value_factory,
&any_missing](const Value& sublist_element) -> absl::StatusOr<bool> {
CEL_ASSIGN_OR_RETURN(auto contains,
list.Contains(value_factory, sublist_element));
any_missing =
!contains->Is<BoolValue>() || !contains.GetBool().NativeValue();
return !any_missing;
}));
return value_factory.CreateBoolValue(!any_missing);
}
absl::StatusOr<Value> SetsIntersects(ValueManager& value_factory,
const ListValue& list,
const ListValue& sublist) {
bool exists = false;
CEL_RETURN_IF_ERROR(list.ForEach(
value_factory,
[&value_factory, &sublist,
&exists](const Value& list_element) -> absl::StatusOr<bool> {
CEL_ASSIGN_OR_RETURN(auto contains,
sublist.Contains(value_factory, list_element));
exists = contains->Is<BoolValue>() && contains.GetBool().NativeValue();
return !exists;
}));
return value_factory.CreateBoolValue(exists);
}
absl::StatusOr<Value> SetsEquivalent(ValueManager& value_factory,
const ListValue& list,
const ListValue& sublist) {
CEL_ASSIGN_OR_RETURN(auto contains_sublist,
SetsContains(value_factory, list, sublist));
if (contains_sublist.Is<BoolValue>() &&
!contains_sublist.GetBool().NativeValue()) {
return contains_sublist;
}
return SetsContains(value_factory, sublist, list);
}
absl::Status RegisterSetsContainsFunction(FunctionRegistry& registry) {
return registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, const ListValue&,
const ListValue&>::CreateDescriptor("sets.contains",
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, const ListValue&,
const ListValue&>::WrapFunction(SetsContains));
}
absl::Status RegisterSetsIntersectsFunction(FunctionRegistry& registry) {
return registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, const ListValue&,
const ListValue&>::CreateDescriptor("sets.intersects",
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, const ListValue&,
const ListValue&>::WrapFunction(SetsIntersects));
}
absl::Status RegisterSetsEquivalentFunction(FunctionRegistry& registry) {
return registry.Register(
BinaryFunctionAdapter<
absl::StatusOr<Value>, const ListValue&,
const ListValue&>::CreateDescriptor("sets.equivalent",
false),
BinaryFunctionAdapter<absl::StatusOr<Value>, const ListValue&,
const ListValue&>::WrapFunction(SetsEquivalent));
}
}
absl::Status RegisterSetsFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(RegisterSetsContainsFunction(registry));
CEL_RETURN_IF_ERROR(RegisterSetsIntersectsFunction(registry));
CEL_RETURN_IF_ERROR(RegisterSetsEquivalentFunction(registry));
return absl::OkStatus();
}
} | #include "extensions/sets_functions.h"
#include <memory>
#include <string>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function_adapter.h"
#include "eval/public/cel_options.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/arena.h"
namespace cel::extensions {
namespace {
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::api::expr::parser::ParseWithMacros;
using ::google::api::expr::runtime::Activation;
using ::google::api::expr::runtime::CelExpressionBuilder;
using ::google::api::expr::runtime::CelValue;
using ::google::api::expr::runtime::CreateCelExpressionBuilder;
using ::google::api::expr::runtime::FunctionAdapter;
using ::google::api::expr::runtime::InterpreterOptions;
using ::absl_testing::IsOk;
using ::google::protobuf::Arena;
struct TestInfo {
std::string expr;
};
class CelSetsFunctionsTest : public testing::TestWithParam<TestInfo> {};
TEST_P(CelSetsFunctionsTest, EndToEnd) {
const TestInfo& test_info = GetParam();
std::vector<Macro> all_macros = Macro::AllMacros();
auto result = ParseWithMacros(test_info.expr, all_macros, "<input>");
EXPECT_THAT(result, IsOk());
ParsedExpr parsed_expr = *result;
Expr expr = parsed_expr.expr();
SourceInfo source_info = parsed_expr.source_info();
InterpreterOptions options;
options.enable_heterogeneous_equality = true;
options.enable_empty_wrapper_null_unboxing = true;
options.enable_qualified_identifier_rewrites = true;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterSetsFunctions(builder->GetRegistry()->InternalGetRegistry(),
cel::RuntimeOptions{}));
ASSERT_OK(google::api::expr::runtime::RegisterBuiltinFunctions(
builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, &source_info));
Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue out, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(out.IsBool()) << test_info.expr << " -> " << out.DebugString();
EXPECT_TRUE(out.BoolOrDie()) << test_info.expr << " -> " << out.DebugString();
}
INSTANTIATE_TEST_SUITE_P(
CelSetsFunctionsTest, CelSetsFunctionsTest,
testing::ValuesIn<TestInfo>({
{"sets.contains([], [])"},
{"sets.contains([1], [])"},
{"sets.contains([1], [1])"},
{"sets.contains([1], [1, 1])"},
{"sets.contains([1, 1], [1])"},
{"sets.contains([2, 1], [1])"},
{"sets.contains([1], [1.0, 1u])"},
{"sets.contains([1, 2], [2u, 2.0])"},
{"sets.contains([1, 2u], [2, 2.0])"},
{"!sets.contains([1], [2])"},
{"!sets.contains([1], [1, 2])"},
{"!sets.contains([1], [\"1\", 1])"},
{"!sets.contains([1], [1.1, 2])"},
{"sets.intersects([1], [1])"},
{"sets.intersects([1], [1, 1])"},
{"sets.intersects([1, 1], [1])"},
{"sets.intersects([2, 1], [1])"},
{"sets.intersects([1], [1, 2])"},
{"sets.intersects([1], [1.0, 2])"},
{"sets.intersects([1, 2], [2u, 2, 2.0])"},
{"sets.intersects([1, 2], [1u, 2, 2.3])"},
{"!sets.intersects([], [])"},
{"!sets.intersects([1], [])"},
{"!sets.intersects([1], [2])"},
{"!sets.intersects([1], [\"1\", 2])"},
{"!sets.intersects([1], [1.1, 2u])"},
{"sets.equivalent([], [])"},
{"sets.equivalent([1], [1])"},
{"sets.equivalent([1], [1, 1])"},
{"sets.equivalent([1, 1, 2], [2, 2, 1])"},
{"sets.equivalent([1, 1], [1])"},
{"sets.equivalent([1], [1u, 1.0])"},
{"sets.equivalent([1], [1u, 1.0])"},
{"sets.equivalent([1, 2, 3], [3u, 2.0, 1])"},
{"!sets.equivalent([2, 1], [1])"},
{"!sets.equivalent([1], [1, 2])"},
{"!sets.equivalent([1, 2], [2u, 2, 2.0])"},
{"!sets.equivalent([1, 2], [1u, 2, 2.3])"},
{"sets.equivalent([false, true], [true, false])"},
{"!sets.equivalent([true], [false])"},
{"sets.equivalent(['foo', 'bar'], ['bar', 'foo'])"},
{"!sets.equivalent(['foo'], ['bar'])"},
{"sets.equivalent([b'foo', b'bar'], [b'bar', b'foo'])"},
{"!sets.equivalent([b'foo'], [b'bar'])"},
{"sets.equivalent([null], [null])"},
{"!sets.equivalent([null], [])"},
{"sets.equivalent([type(1), type(1u)], [type(1u), type(1)])"},
{"!sets.equivalent([type(1)], [type(1u)])"},
{"sets.equivalent([duration('0s'), duration('1s')], [duration('1s'), "
"duration('0s')])"},
{"!sets.equivalent([duration('0s')], [duration('1s')])"},
{"sets.equivalent([timestamp('1970-01-01T00:00:00Z'), "
"timestamp('1970-01-01T00:00:01Z')], "
"[timestamp('1970-01-01T00:00:01Z'), "
"timestamp('1970-01-01T00:00:00Z')])"},
{"!sets.equivalent([timestamp('1970-01-01T00:00:00Z')], "
"[timestamp('1970-01-01T00:00:01Z')])"},
{"sets.equivalent([[false, true]], [[false, true]])"},
{"!sets.equivalent([[false, true]], [[true, false]])"},
{"sets.equivalent([{'foo': true, 'bar': false}], [{'bar': false, "
"'foo': true}])"},
}));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/sets_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/sets_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
38258bcd-dd0a-45b3-b9e0-17ede1681162 | cpp | google/tensorstore | registry | tensorstore/internal/metrics/registry.cc | tensorstore/internal/metrics/registry_test.cc | #include "tensorstore/internal/metrics/registry.h"
#include <cassert>
#include <memory>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/strings/match.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/metrics/collect.h"
namespace tensorstore {
namespace internal_metrics {
void MetricRegistry::AddInternal(std::string_view metric_name,
MetricRegistry::Metric m,
std::shared_ptr<void> hook) {
ABSL_CHECK(m) << metric_name;
absl::MutexLock l(&mu_);
ABSL_CHECK(
entries_.try_emplace(metric_name, Entry{std::move(m), std::move(hook)})
.second)
<< metric_name;
}
std::vector<CollectedMetric> MetricRegistry::CollectWithPrefix(
std::string_view prefix) {
std::vector<CollectedMetric> all;
all.reserve(entries_.size());
absl::MutexLock l(&mu_);
for (auto& kv : entries_) {
if (prefix.empty() || absl::StartsWith(kv.first, prefix)) {
auto opt_metric = kv.second.poly(CollectMetricTag{});
if (opt_metric.has_value()) {
all.emplace_back(*std::move(opt_metric));
assert(all.back().metric_name == kv.first);
}
}
}
for (auto& hook : collect_hooks_) {
hook(prefix, all);
}
return all;
}
std::optional<CollectedMetric> MetricRegistry::Collect(std::string_view name) {
absl::MutexLock l(&mu_);
auto it = entries_.find(name);
if (it == entries_.end()) return std::nullopt;
auto opt_metric = it->second.poly(CollectMetricTag{});
assert(!opt_metric.has_value() || opt_metric->metric_name == it->first);
return opt_metric;
}
MetricRegistry& GetMetricRegistry() {
static absl::NoDestructor<MetricRegistry> registry;
return *registry;
}
void MetricRegistry::Reset() {
absl::MutexLock l(&mu_);
for (auto& [k, v] : entries_) {
v.poly(ResetMetricTag{});
}
}
}
} | #include "tensorstore/internal/metrics/registry.h"
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/internal/metrics/collect.h"
namespace {
using ::tensorstore::internal_metrics::CollectedMetric;
using ::tensorstore::internal_metrics::MetricRegistry;
TEST(RegistryTest, Arbitrary) {
MetricRegistry registry;
registry.AddGeneric("/my/metric", [] {
CollectedMetric metric;
metric.metric_name = "/my/metric";
return metric;
});
registry.AddGeneric("/my/metric2", [] {
CollectedMetric metric;
metric.metric_name = "/my/metric2";
return metric;
});
EXPECT_FALSE(registry.Collect("/my/foo").has_value());
auto collected = registry.Collect("/my/metric");
ASSERT_TRUE(collected.has_value());
EXPECT_EQ("/my/metric", collected->metric_name);
auto all = registry.CollectWithPrefix("/my");
EXPECT_EQ(2, all.size());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/registry.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/metrics/registry_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9a766995-4e5a-4ca3-8824-00f4bbbcc0c9 | cpp | tensorflow/tensorflow | gather | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gather.cc | tensorflow/lite/delegates/gpu/cl/kernels/gather_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gather.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
std::optional<bool> IsGatherLegal(mhlo::GatherOp op) { return std::nullopt; }
class LegalizeGatherToSlice : public OpConversionPattern<mhlo::GatherOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::GatherOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeGatherToSlice::matchAndRewrite(
mhlo::GatherOp gather_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
Value operand = gather_op.getOperand();
Value start_indices = gather_op.getStartIndices();
static const int rank_two = 2;
static const int max_batch_size = 50;
ShapedType operand_type = mlir::cast<ShapedType>(operand.getType());
ShapedType start_indices_type =
mlir::cast<ShapedType>(start_indices.getType());
ShapedType result_type =
mlir::cast<ShapedType>(gather_op.getResult().getType());
if (!operand_type.hasStaticShape() || !start_indices_type.hasStaticShape() ||
!result_type.hasStaticShape()) {
return rewriter.notifyMatchFailure(
gather_op,
"Dynamic shaped inputs are not supported when legalizing mhlo.gather "
"op to tf.slice.");
}
auto start_index_map = gather_op.getDimensionNumbers().getStartIndexMap();
auto collapsed_slice_dims =
gather_op.getDimensionNumbers().getCollapsedSliceDims();
auto offset_dims = gather_op.getDimensionNumbers().getOffsetDims();
auto slice_sizes = gather_op.getSliceSizes();
llvm::SmallVector<int64_t, 2> slice_sizes_vector;
slice_sizes_vector.reserve(slice_sizes.size());
for (int64_t s : slice_sizes.getValues<int64_t>()) {
slice_sizes_vector.push_back(s);
}
llvm::SmallVector<int64_t, 1> batch_dims;
int offset_index = 0;
for (int64_t i = 0; i < result_type.getRank(); ++i) {
if (offset_index >= offset_dims.size() || offset_dims[offset_index] != i) {
batch_dims.push_back(i);
} else {
++offset_index;
}
}
if (batch_dims.size() != 1 || batch_dims[0] != 0) {
return failure();
}
int64_t batch_dim = batch_dims[0];
if (operand_type.getDimSize(batch_dim) > max_batch_size ||
operand_type.getRank() != rank_two ||
start_indices_type.getRank() != rank_two ||
operand_type.getDimSize(batch_dim) !=
start_indices_type.getDimSize(batch_dim) ||
slice_sizes_vector[batch_dim] != 1) {
return failure();
}
for (int64_t i = 0; i < start_index_map.size(); i++) {
if (start_index_map[i] != i) {
return failure();
}
}
if (collapsed_slice_dims.size() != start_index_map.size() - 1 ||
collapsed_slice_dims.size() != 1 || collapsed_slice_dims[0] != 0) {
return failure();
}
int64_t index_vector_dim =
gather_op.getDimensionNumbers().getIndexVectorDim();
if (failed(NormalizeIndexVector(gather_op, start_indices, start_indices_type,
index_vector_dim, rewriter))) {
return failure();
}
ImplicitLocOpBuilder builder(gather_op.getLoc(), rewriter);
auto max_start_indices = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter,
llvm::SmallVector<int64_t>(
{operand_type.getDimSize(0) - slice_sizes_vector[0],
operand_type.getDimSize(1) - slice_sizes_vector[1]}),
start_indices_type.getElementType());
auto min_start_indices = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, llvm::SmallVector<int64_t>({0, 0}),
start_indices_type.getElementType());
auto start_indices_max_op = rewriter.create<TFL::MaximumOp>(
gather_op.getLoc(), start_indices, min_start_indices);
auto clamped_start_indices_op = rewriter.create<TFL::MinimumOp>(
gather_op.getLoc(), start_indices_max_op, max_start_indices);
int64_t batch_size = start_indices_type.getDimSize(batch_dim);
auto slice_size = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, slice_sizes_vector, rewriter.getI32Type());
if (batch_size == 1) {
auto squeeze_op = rewriter.create<TFL::SqueezeOp>(
gather_op.getLoc(),
RankedTensorType::get({rank_two}, start_indices_type.getElementType()),
clamped_start_indices_op,
rewriter.getI64ArrayAttr(llvm::ArrayRef<int64_t>({batch_dim})));
auto slice_op =
rewriter.create<TFL::SliceOp>(gather_op.getLoc(), gather_op.getType(),
operand, squeeze_op, slice_size);
rewriter.replaceOp(gather_op, slice_op);
return mlir::success();
}
llvm::SmallVector<Value, 1> slices;
slices.reserve(batch_size);
for (int64_t i = 0; i < batch_size; ++i) {
auto zero = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, llvm::SmallVector<int64_t>({i, 0}),
rewriter.getI32Type());
auto two = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter, llvm::SmallVector<int64_t>({1, 2}),
rewriter.getI32Type());
auto begin = rewriter.create<TFL::SliceOp>(
gather_op.getLoc(),
RankedTensorType::get({1, 2}, start_indices_type.getElementType()),
clamped_start_indices_op, zero, two);
auto squeeze_op = rewriter.create<TFL::SqueezeOp>(
gather_op.getLoc(),
RankedTensorType::get({rank_two}, start_indices_type.getElementType()),
begin, rewriter.getI64ArrayAttr(llvm::ArrayRef<int64_t>({batch_dim})));
auto slice_op = rewriter.create<TFL::SliceOp>(
gather_op.getLoc(),
RankedTensorType::get({1, slice_sizes_vector[1]},
operand_type.getElementType()),
operand, squeeze_op, slice_size);
slices.push_back(slice_op);
}
auto concat_op = rewriter.create<TFL::ConcatenationOp>(
gather_op.getLoc(), result_type, slices, 0,
rewriter.getStringAttr("NONE"));
rewriter.replaceOp(gather_op, concat_op);
return mlir::success();
}
struct TransposeParams {
std::vector<int64_t> permutation;
std::vector<int64_t> canonicalized_output_shape;
std::vector<int64_t> canonicalized_offset_dims;
};
TransposeParams CanonicalizeOffset(ShapedType result_type,
ArrayRef<int64_t> original_offset_dims) {
TransposeParams transpose_params;
int output_rank = result_type.getRank();
for (int start = output_rank - original_offset_dims.size();
start < output_rank; ++start) {
transpose_params.canonicalized_offset_dims.push_back(start);
}
std::vector<int64_t> batch_dims;
int offset_index = 0;
for (int64_t i = 0; i < output_rank; ++i) {
if (offset_index >= original_offset_dims.size() ||
original_offset_dims[offset_index] != i) {
batch_dims.push_back(i);
} else {
++offset_index;
}
}
int batch_idx = 0;
int offset_idx = 0;
int batch_dim_size = batch_dims.size();
for (int i = 0; i < output_rank; ++i) {
if (batch_idx >= batch_dims.size()) {
transpose_params.permutation.push_back(batch_dim_size + offset_idx);
++offset_idx;
} else if (offset_idx < original_offset_dims.size() &&
original_offset_dims[offset_idx] < batch_dims[batch_idx]) {
transpose_params.permutation.push_back(batch_dim_size + offset_idx);
++offset_idx;
} else {
transpose_params.permutation.push_back(batch_idx++);
}
}
for (auto dim : batch_dims) {
transpose_params.canonicalized_output_shape.push_back(
result_type.getDimSize(dim));
}
for (auto dim : original_offset_dims) {
transpose_params.canonicalized_output_shape.push_back(
result_type.getDimSize(dim));
}
return transpose_params;
}
class LegalizeGatherToGatherND : public OpConversionPattern<mhlo::GatherOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::GatherOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeGatherToGatherND::matchAndRewrite(
mhlo::GatherOp gather_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
Value operand = gather_op.getOperand();
Value start_indices = gather_op.getStartIndices();
ShapedType operand_type = mlir::cast<ShapedType>(operand.getType());
ShapedType start_indices_type =
mlir::cast<ShapedType>(start_indices.getType());
ShapedType result_type =
mlir::cast<ShapedType>(gather_op.getResult().getType());
if (!operand_type.hasStaticShape()) {
gather_op.emitOpError() << "Dynamic shaped operand is not supported.";
return failure();
}
int64_t index_vector_dim =
gather_op.getDimensionNumbers().getIndexVectorDim();
if (failed(NormalizeIndexVector(gather_op, start_indices, start_indices_type,
index_vector_dim, rewriter))) {
return failure();
}
auto start_index_map = gather_op.getDimensionNumbers().getStartIndexMap();
auto collapsed_slice_dims =
gather_op.getDimensionNumbers().getCollapsedSliceDims();
if (start_index_map.size() != collapsed_slice_dims.size()) {
return rewriter.notifyMatchFailure(
gather_op,
"different size for start index map and collapsed slice dims");
}
for (auto c : collapsed_slice_dims) {
if (llvm::count(start_index_map, c) == 0) {
return rewriter.notifyMatchFailure(
gather_op, "collapsed slice dim isn't present in start index map");
}
}
auto slice_sizes = gather_op.getSliceSizes();
int64_t index = 0;
for (int64_t s : slice_sizes.getValues<int64_t>()) {
if (llvm::count(start_index_map, index)) {
if (s != 1) {
return rewriter.notifyMatchFailure(gather_op,
"unsupported slice sizes");
}
} else {
if (s != operand_type.getShape()[index]) {
return rewriter.notifyMatchFailure(gather_op,
"unsupported slice sizes");
}
}
++index;
}
auto offset_dims = gather_op.getDimensionNumbers().getOffsetDims();
SmallVector<int64_t, 4> offset_dims_vector(offset_dims.begin(),
offset_dims.end());
const TransposeParams& transpose_params =
CanonicalizeOffset(result_type,
offset_dims_vector);
int64_t offset = start_indices_type.getRank() - 1;
for (int64_t o : transpose_params.canonicalized_offset_dims) {
if (o != offset) {
return rewriter.notifyMatchFailure(gather_op, "unsupported offset dims");
}
++offset;
}
llvm::SmallVector<int64_t, 4> transpose_dimensions;
llvm::SmallVector<int64_t, 4> transpose_shape;
for (auto s : start_index_map) {
transpose_dimensions.push_back(s);
transpose_shape.push_back(operand_type.getShape()[s]);
}
for (int64_t i = 0, e = operand_type.getRank(); i < e; ++i) {
if (llvm::count(start_index_map, i) == 0) {
transpose_dimensions.push_back(i);
transpose_shape.push_back(operand_type.getShape()[i]);
}
}
operand_type =
RankedTensorType::get(transpose_shape, operand_type.getElementType());
operand = rewriter.create<mhlo::TransposeOp>(
gather_op.getLoc(), operand_type, operand,
rewriter.getI64TensorAttr(transpose_dimensions));
bool need_transpose_after = false;
for (int i = 0; i < transpose_params.permutation.size(); ++i) {
if (i != transpose_params.permutation[i]) {
need_transpose_after = true;
break;
}
}
auto tf_gather_nd_result_type =
RankedTensorType::get(transpose_params.canonicalized_output_shape,
result_type.getElementType());
if (start_indices_type.getElementType().isUnsignedInteger(32)) {
start_indices = rewriter.create<TFL::CastOp>(
gather_op->getLoc(),
RankedTensorType::get(start_indices_type.getShape(),
rewriter.getI64Type()),
start_indices);
}
auto tf_gather_nd_op = rewriter.create<TFL::GatherNdOp>(
gather_op->getLoc(), tf_gather_nd_result_type, operand, start_indices);
if (!need_transpose_after) {
rewriter.replaceOp(gather_op, tf_gather_nd_op->getOpResults());
return success();
}
rewriter.replaceOpWithNewOp<mhlo::TransposeOp>(
gather_op, result_type, tf_gather_nd_op,
rewriter.getI64TensorAttr(transpose_params.permutation));
return success();
}
void PopulateGatherPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeGatherToSlice, LegalizeGatherToGatherND>(ctx);
target.addDynamicallyLegalOp<mhlo::GatherOp>(IsGatherLegal);
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/gather_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, GatherBatch) {
auto status = GatherBatchTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherBatchConst) {
auto status = GatherBatchTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherHeight) {
auto status = GatherHeightTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherHeightConst) {
auto status = GatherHeightTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherWidth) {
auto status = GatherWidthTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherWidthConst) {
auto status = GatherWidthTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherChannels) {
auto status = GatherChannelsTest(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, GatherChannelsConst) {
auto status = GatherChannelsTest(&exec_env_, true);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gather.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/gather_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd75d663-745a-42b4-abd8-5e8e7c6c4e3f | cpp | tensorflow/tensorflow | summary_tensor_op | tensorflow/core/kernels/summary_tensor_op.cc | tensorflow/core/kernels/summary_tensor_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
template <typename T>
class SummaryTensorOpV2 : public OpKernel {
public:
explicit SummaryTensorOpV2(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* c) override {
const Tensor& tag = c->input(0);
OP_REQUIRES(c, TensorShapeUtils::IsScalar(tag.shape()),
errors::InvalidArgument("tag must be scalar"));
const Tensor& tensor = c->input(1);
const Tensor& serialized_summary_metadata_tensor = c->input(2);
OP_REQUIRES(
c,
TensorShapeUtils::IsScalar(serialized_summary_metadata_tensor.shape()),
errors::InvalidArgument("serialized_summary_metadata must be scalar"));
Summary s;
Summary::Value* v = s.add_value();
v->set_tag(string(tag.scalar<tstring>()()));
if (tensor.dtype() == DT_STRING) {
tensor.AsProtoField(v->mutable_tensor());
} else {
tensor.AsProtoTensorContent(v->mutable_tensor());
}
ParseFromTString(serialized_summary_metadata_tensor.scalar<tstring>()(),
v->mutable_metadata());
Tensor* summary_tensor = nullptr;
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
}
};
#define REGISTER(T) \
REGISTER_KERNEL_BUILDER( \
Name("TensorSummaryV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
SummaryTensorOpV2<T>);
TF_CALL_ALL_TYPES(REGISTER)
#undef REGISTER
template <typename T>
class SummaryTensorOp : public OpKernel {
public:
explicit SummaryTensorOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* c) override {
const Tensor& tensor = c->input(0);
Summary s;
Summary::Value* v = s.add_value();
v->set_node_name(c->op_kernel().name());
if (tensor.dtype() == DT_STRING) {
tensor.AsProtoField(v->mutable_tensor());
} else {
tensor.AsProtoTensorContent(v->mutable_tensor());
}
Tensor* summary_tensor = nullptr;
OP_REQUIRES_OK(c, c->allocate_output(0, TensorShape({}), &summary_tensor));
CHECK(SerializeToTString(s, &summary_tensor->scalar<tstring>()()));
}
};
#define REGISTER(T) \
REGISTER_KERNEL_BUILDER( \
Name("TensorSummary").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
SummaryTensorOp<T>);
TF_CALL_ALL_TYPES(REGISTER)
#undef REGISTER
} | #include <functional>
#include <memory>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/histogram/histogram.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void EXPECT_SummaryMatches(const Summary& actual,
const string& expected_str) {
Summary expected;
CHECK(protobuf::TextFormat::ParseFromString(expected_str, &expected));
EXPECT_EQ(expected.DebugString(), actual.DebugString());
}
class SummaryTensorOpV2Test : public OpsTestBase {
protected:
void MakeOp() {
TF_ASSERT_OK(NodeDefBuilder("myop", "TensorSummaryV2")
.Input(FakeInput(DT_STRING))
.Input(FakeInput(DT_STRING))
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SummaryTensorOpV2Test, BasicPluginData) {
MakeOp();
AddInputFromArray<tstring>(TensorShape({}), {"tag_foo"});
AddInputFromArray<tstring>(TensorShape({}), {"some string tensor content"});
SummaryMetadata summary_metadata;
SummaryMetadata::PluginData* plugin_data =
summary_metadata.mutable_plugin_data();
plugin_data->set_plugin_name("foo");
plugin_data->set_content("content_for_plugin_foo");
AddInputFromArray<tstring>(TensorShape({}),
{summary_metadata.SerializeAsString()});
TF_ASSERT_OK(RunOpKernel());
Tensor* out_tensor = GetOutput(0);
ASSERT_EQ(0, out_tensor->dims());
Summary summary;
ParseProtoUnlimited(&summary, out_tensor->scalar<tstring>()());
ASSERT_EQ(1, summary.value_size());
Tensor string_content_tensor;
CHECK(string_content_tensor.FromProto(summary.value(0).tensor()));
ASSERT_EQ("some string tensor content",
string_content_tensor.scalar<tstring>()());
ASSERT_EQ("tag_foo", summary.value(0).tag());
ASSERT_EQ("foo", summary.value(0).metadata().plugin_data().plugin_name());
ASSERT_EQ("content_for_plugin_foo",
summary.value(0).metadata().plugin_data().content());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_tensor_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/summary_tensor_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f2aec7d9-511e-42b4-a9ad-4688d7f8d448 | cpp | tensorflow/tensorflow | while_loop_analysis | third_party/xla/xla/service/while_loop_analysis.cc | third_party/xla/xla/service/while_loop_analysis_test.cc | #include "xla/service/while_loop_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include "absl/base/casts.h"
#include "absl/container/flat_hash_map.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
using std::nullopt;
using std::optional;
namespace m = match;
static const HloInstruction* NonConstantOperand(const HloInstruction* instr) {
const HloInstruction* result = nullptr;
for (const HloInstruction* operand : instr->operands()) {
if (!operand->IsConstant()) {
if (result != nullptr) {
CHECK_EQ(result, operand);
}
result = operand;
}
}
CHECK_NE(result, nullptr);
return result;
}
static optional<int64_t> GetGTEOperandIndex(const HloInstruction* instr,
const HloInstruction* gte_operand) {
VLOG(2) << "GetGTEOperandIndex(" << instr->ToString()
<< ", GTE Operand: " << gte_operand->ToString() << ")";
optional<int64_t> tuple_idx;
for (const HloInstruction* operand : instr->operands()) {
if (Match(operand, m::Constant())) {
continue;
}
auto possibly_gte_operand = operand;
if (operand->opcode() == HloOpcode::kCopy) {
possibly_gte_operand = operand->operand(0);
}
if (possibly_gte_operand->opcode() != HloOpcode::kGetTupleElement) {
return nullopt;
}
if (!Match(possibly_gte_operand,
m::GetTupleElement(m::Op().Is(gte_operand))) &&
!Match(possibly_gte_operand,
m::GetTupleElement(m::CustomCall(m::Op().Is(gte_operand))))) {
return nullopt;
}
int64_t operand_tuple_idx = possibly_gte_operand->tuple_index();
if (!tuple_idx.has_value()) {
tuple_idx = operand_tuple_idx;
} else {
if (operand_tuple_idx != tuple_idx) {
return nullopt;
}
}
}
return tuple_idx;
}
std::vector<const HloInstruction*> GetAuxiliaryLoopInductionVars(
const HloInstruction* while_op) {
std::vector<const HloInstruction*> aux_ind_gte;
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
auto* while_body = while_op->while_body();
auto* while_body_param = while_body->parameter_instruction(0);
VLOG(2) << "Aux Induction Variables for loop:" << while_op->ToShortString();
VLOG(2) << "the parameter instr:" << while_body_param->ToShortString();
VLOG(2) << "the parameter user count:" << while_body_param->users().size();
if (while_body_param == nullptr) return aux_ind_gte;
std::map<int64_t, const HloInstruction*> extractions;
for (const HloInstruction* indx_instr : while_body_param->users()) {
if (indx_instr->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
auto it = extractions.find(indx_instr->tuple_index());
if (it != extractions.end()) {
it->second = nullptr;
VLOG(2) << "two extractions at same index:" << indx_instr->ToString();
} else {
extractions.insert(std::make_pair(indx_instr->tuple_index(), indx_instr));
VLOG(2) << "inserting extraction :" << indx_instr->ToString();
}
}
VLOG(2) << "total extractions size:" << extractions.size() << std::endl;
if (extractions.empty()) {
return aux_ind_gte;
}
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While body root is not a tuple:" << while_body_root->ToString();
return aux_ind_gte;
}
int64_t index = -1;
std::map<int64_t, const HloInstruction*> insertions;
for (const HloInstruction* operand : while_body_root->operands()) {
index++;
if (!operand->IsConstant()) {
auto it = insertions.find(index);
if (it != insertions.end()) {
it->second = nullptr;
VLOG(2) << "two insertions at same index:" << operand->ToString();
} else {
insertions.insert(std::make_pair(index, operand));
VLOG(2) << "inserting insertions:" << operand->ToString();
}
}
}
if (insertions.empty()) {
return aux_ind_gte;
}
std::map<int64_t, std::pair<const HloInstruction*, const HloInstruction*>>
candidate_pairs;
for (; index >= 0; --index) {
const HloInstruction *ext, *inst;
ext = (extractions.find(index) != extractions.end())
? extractions.find(index)->second
: nullptr;
inst = (insertions.find(index) != insertions.end())
? insertions.find(index)->second
: nullptr;
if (ext != nullptr && inst != nullptr) {
if (ext != inst) {
candidate_pairs.insert(
std::make_pair(index, std::make_pair(ext, inst)));
}
}
}
VLOG(2) << "total candidate pairs:" << candidate_pairs.size() << std::endl;
const auto add_dependencies = [](const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs) {
HloInstruction* non_const_operand = nullptr;
int num_non_constants = 0;
for (HloInstruction* operand : hlo->operands()) {
if (!operand->IsConstant()) {
num_non_constants++;
non_const_operand = operand;
}
}
if (num_non_constants == 1 &&
(hlo->opcode() == HloOpcode::kGetTupleElement ||
hlo->opcode() == HloOpcode::kAdd ||
hlo->opcode() == HloOpcode::kMultiply ||
hlo->opcode() == HloOpcode::kDivide ||
hlo->opcode() == HloOpcode::kSubtract)) {
inputs->push_back(non_const_operand);
}
};
std::unique_ptr<HloReachabilityMap> hrm =
HloReachabilityMap::BuildWithRestrictions(
while_body,
absl::FunctionRef<void(const HloInstruction* hlo,
std::vector<HloInstruction*>* inputs)>(
add_dependencies));
for (auto candidates : candidate_pairs) {
VLOG(2) << "are reachable?:" << (candidates.second.first)->ToString()
<< "*************" << (candidates.second.second)->ToString()
<< std::endl;
if (hrm->IsReachable(candidates.second.first, candidates.second.second)) {
aux_ind_gte.push_back(candidates.second.first);
VLOG(2) << "YES";
} else {
VLOG(2) << "NO";
}
}
VLOG(2) << "num auxiliary candidates :" << aux_ind_gte.size();
return aux_ind_gte;
}
optional<int64_t> GetLoopInductionVarTupleIdx(const HloInstruction* while_op) {
CHECK_EQ(while_op->opcode(), HloOpcode::kWhile);
VLOG(2) << "Finding induction variable for loop "
<< while_op->ToShortString();
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_param = while_cond->parameter_instruction(0);
optional<int64_t> indvar_tuple_idx =
GetGTEOperandIndex(while_cond_root, while_cond_param);
if (!indvar_tuple_idx) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple &&
while_body_root->opcode() != HloOpcode::kCustomCall) {
VLOG(2) << "While body's root is not a tuple or custom-call instruction: "
<< while_body_root->ToString();
return nullopt;
}
const HloInstruction* while_body_inc;
if (while_body_root->opcode() == HloOpcode::kTuple) {
while_body_inc = while_body_root->operand(*indvar_tuple_idx);
} else {
if (while_body_root->operand_count() == 1 &&
while_body_root->operand(0)->opcode() == HloOpcode::kTuple) {
auto* while_body_root_input_tuple = while_body_root->operand(0);
if (*indvar_tuple_idx >= while_body_root_input_tuple->operand_count()) {
VLOG(2) << "Cannot find the induction variable in the output root "
"custom-call "
<< while_body_root->ToString();
return std::nullopt;
}
while_body_inc = while_body_root_input_tuple->operand(*indvar_tuple_idx);
} else {
if (*indvar_tuple_idx >= while_body_root->operand_count()) {
VLOG(2) << "Cannot find the induction variable in the output root "
"custom-call "
<< while_body_root->ToString();
return std::nullopt;
}
while_body_inc = while_body_root->operand(*indvar_tuple_idx);
}
}
auto* while_body_param = while_body->parameter_instruction(0);
optional<int64_t> while_body_indvar_tuple_idx =
GetGTEOperandIndex(while_body_inc, while_body_param);
if (!while_body_indvar_tuple_idx) {
VLOG(2)
<< "Induction variable not found in while body increment instruction: "
<< while_body_inc->ToString();
return nullopt;
}
if (while_body_indvar_tuple_idx != indvar_tuple_idx) {
VLOG(2) << "Tuple index of induction variable does not match between loop "
"condition ("
<< *indvar_tuple_idx << ") and while body ("
<< *while_body_indvar_tuple_idx << ")";
return nullopt;
}
auto* while_init = while_op->operand(0);
if (while_init->opcode() != HloOpcode::kTuple) {
VLOG(2) << "While init expected to be a tuple: " << while_init->ToString();
return nullopt;
}
VLOG(2) << "Induction variable's tuple index: " << *indvar_tuple_idx;
return indvar_tuple_idx;
}
optional<int64_t> CheckedAdd(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa + bb);
if (a >= 0 == b >= 0 && result >= 0 != a >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> CheckedSubtract(int64_t a, int64_t b) {
uint64_t aa = absl::bit_cast<uint64_t>(a);
uint64_t bb = absl::bit_cast<uint64_t>(b);
int64_t result = absl::bit_cast<int64_t>(aa - bb);
if (a >= 0 != b >= 0 && result >= 0 == b >= 0) {
return nullopt;
}
return result;
}
optional<int64_t> MatchTrivialLoopTripCount(const HloInstruction* while_op,
int64_t indvar_tuple_idx,
const Literal& indvar_init) {
optional<int64_t> indvar_init_val =
LiteralUtil::LiteralAsScalarInt64(indvar_init);
if (!indvar_init_val) {
VLOG(2) << "Pattern-match failed: induction variable init is not a "
"constant scalar representable as an int64_t: "
<< indvar_init.ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
HloInstruction* while_body_indvar_update;
if (while_body_root->opcode() == HloOpcode::kCustomCall) {
if (while_body_root->operand_count() == 1 &&
while_body_root->operand(0)->opcode() == HloOpcode::kTuple) {
auto* while_body_root_input_tuple = while_body_root->mutable_operand(0);
while_body_indvar_update =
while_body_root_input_tuple->mutable_operand(indvar_tuple_idx);
} else {
while_body_indvar_update =
while_body_root->mutable_operand(indvar_tuple_idx);
}
} else {
while_body_indvar_update =
while_body_root->mutable_operand(indvar_tuple_idx);
}
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
HloInstruction* trip_count_increase_step_instr = nullptr;
int64_t trip_count_step = 0;
if (!Match(while_body_indvar_update,
m::AddAnyOrder(m::Op().Is(while_body_indvar),
m::Op(&trip_count_increase_step_instr)))) {
if (trip_count_increase_step_instr == nullptr) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"updated by an add operation: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!trip_count_increase_step_instr->IsConstant() ||
!ShapeUtil::IsEffectiveScalar(
trip_count_increase_step_instr->shape())) {
VLOG(2) << "Pattern-match failed: induction variable is not getting "
"incremented by constant: "
<< while_body_indvar_update->ToString();
return nullopt;
}
if (!LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.has_value()) {
VLOG(2)
<< "Pattern-match failed: trip count step is not an integral type: "
<< trip_count_increase_step_instr->shape().ToString();
return nullopt;
}
VLOG(2) << "Pattern-match for trip count step failed: "
<< trip_count_increase_step_instr->ToString();
}
trip_count_step = LiteralUtil::LiteralAsScalarInt64(
trip_count_increase_step_instr->literal())
.value();
if (trip_count_step <= 0) {
VLOG(2) << "Pattern-match failed: trip count step is not a natural number: "
<< trip_count_step;
return nullopt;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
HloInstruction* while_cond_bound = nullptr;
if (!Match(while_cond_root,
m::Op().WithBinaryOperandsAnyOrder(
m::Op().Is(while_cond_indvar),
m::ConstantEffectiveScalar(&while_cond_bound)))) {
VLOG(2) << "Pattern-match failed: while condition is not of the form "
"op(i, N) or op(N, i).";
return nullopt;
}
optional<int64_t> while_cond_bound_val =
LiteralUtil::LiteralAsScalarInt64(while_cond_bound->literal());
if (!while_cond_bound_val) {
VLOG(2) << "Pattern-match failed: while condition induction variable is "
"not a constant scalar representable as an int64_t.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLt)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i < N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (trips) {
const int64_t remainder = std::remainder(*trips, trip_count_step);
const int64_t div = std::floor(*trips / trip_count_step);
if (remainder == 0) {
return std::max(int64_t{0}, div);
}
trips = CheckedAdd(div, 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (*trips < *while_cond_bound_val) {
return std::max(int64_t{0}, *trips);
}
return std::max(int64_t{0}, div);
}
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX.";
return nullopt;
}
if (Match(while_cond_root,
m::Op()
.WithComparisonDirection(ComparisonDirection::kLe)
.WithOperand(0, m::Op().Is(while_cond_indvar)))) {
VLOG(2) << "Pattern-match succeeded: loop condition is i <= N: "
<< while_cond_root->ToString();
optional<int64_t> trips =
CheckedSubtract(*while_cond_bound_val, *indvar_init_val);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
trips = CheckedAdd(std::floor(*trips / trip_count_step), 1);
if (!trips) {
VLOG(2) << "Pattern-match failed: Trip count exceeds INT64_MAX";
return nullopt;
}
return std::max<int64_t>(0, *trips);
}
VLOG(2) << "Pattern-match failed: while condition follows unknown pattern: "
<< while_cond_root->ToString();
return nullopt;
}
optional<int64_t> ComputeWhileLoopTripCount(const HloInstruction* while_op,
int64_t max_brute_force_iters) {
VLOG(2) << "Getting trip count for loop " << while_op->ToString();
optional<int64_t> indvar_tuple_idx = GetLoopInductionVarTupleIdx(while_op);
if (!indvar_tuple_idx) {
return nullopt;
}
HloEvaluator evaluator(0);
auto* while_init = while_op->operand(0);
auto* indvar_init = while_init->operand(*indvar_tuple_idx);
absl::StatusOr<Literal> indvar_init_result = evaluator.Evaluate(indvar_init);
if (!indvar_init_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable init, "
<< indvar_init_result.status() << ", " << indvar_init->ToString();
return nullopt;
}
Literal indvar_iter_val = std::move(indvar_init_result).value();
if (auto trip_count = MatchTrivialLoopTripCount(while_op, *indvar_tuple_idx,
indvar_iter_val)) {
return trip_count;
}
auto* while_body = while_op->while_body();
auto* while_body_indvar_update =
while_body->root_instruction()->operand(*indvar_tuple_idx);
auto* while_body_indvar = NonConstantOperand(while_body_indvar_update);
auto* while_cond = while_op->while_condition();
auto* while_cond_root = while_cond->root_instruction();
auto* while_cond_indvar = NonConstantOperand(while_cond_root);
for (int64_t trip_count = 0; trip_count != max_brute_force_iters + 1;
++trip_count) {
absl::StatusOr<Literal> result = evaluator.EvaluateWithSubstitutions(
while_cond_root, {{while_cond_indvar, &indvar_iter_val}});
if (!result.ok()) {
VLOG(2) << "Couldn't evaluate while cond: " << result.status();
return nullopt;
}
if (result.value().data<bool>() == absl::Span<const bool>{false}) {
VLOG(2) << "Loop has static trip count of " << trip_count;
return trip_count;
}
absl::StatusOr<Literal> indvar_next_result =
evaluator.EvaluateWithSubstitutions(
while_body_indvar_update, {{while_body_indvar, &indvar_iter_val}});
if (!indvar_next_result.ok()) {
VLOG(2) << "Couldn't evaluate induction variable update: "
<< indvar_next_result.status();
return nullopt;
}
indvar_iter_val = std::move(indvar_next_result).value();
}
VLOG(2) << "Loop has unknown trip count.";
return nullopt;
}
static HloInstruction* GetOnlyGTE(HloInstruction* inst) {
if (inst->user_count() != 1) {
return nullptr;
}
HloInstruction* user = inst->users().back();
if (user->opcode() != HloOpcode::kGetTupleElement) {
return nullptr;
}
return user;
}
optional<int64_t> ComputeWhileLoopTripCountUpperBound(
const HloInstruction* while_op) {
auto exact_trip_count = ComputeWhileLoopTripCount(while_op);
if (exact_trip_count) {
VLOG(2) << "Loop has exact trip count.";
return exact_trip_count;
}
auto* while_cond = while_op->while_condition();
auto* while_cond_param = while_cond->parameter_instruction(0);
auto* cond_gte = GetOnlyGTE(while_cond_param);
if (!cond_gte) {
VLOG(2) << "Induction variable not found in loop condition: "
<< while_cond->root_instruction()->ToString();
return nullopt;
}
auto* while_body = while_op->while_body();
auto* while_body_root = while_body->root_instruction();
if (while_body_root->opcode() != HloOpcode::kTuple) {
VLOG(3) << "While body's root is not a tuple instruction: "
<< while_body_root->ToString();
return nullopt;
}
int64_t indvar_index = cond_gte->tuple_index();
auto* while_body_indvar = while_body_root->operand(indvar_index);
if (while_body_indvar->opcode() != HloOpcode::kConstant) {
VLOG(3) << "While body does not set the IV to a constant: "
<< while_body_indvar->ToString();
return nullopt;
}
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
auto new_param = HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({cond_gte->shape()}), "temp");
replacements[cond_gte] =
HloInstruction::CreateGetTupleElement(new_param.get(), 0);
replacements[while_cond_param] = std::move(new_param);
auto new_module = std::make_unique<HloModule>("temp_mod", HloModuleConfig{});
auto* new_computation = new_module->AddEmbeddedComputation(
while_cond->CloneWithReplacements(&replacements));
HloEvaluator evaluator(0);
Literal fake_input = Literal::CreateFromShape(
new_computation->parameter_instruction(0)->shape());
TF_CHECK_OK(fake_input.CopyFrom(while_body_indvar->literal(),
{0},
{}));
absl::StatusOr<Literal> eval_result =
evaluator.Evaluate(*new_computation, {std::move(fake_input)});
if (!eval_result.ok()) {
VLOG(2) << "Couldn't evaluate while loop condition.";
return nullopt;
}
Literal cond_result_pred = std::move(eval_result.value());
CHECK(Shape::Equal().IgnoreLayout()(cond_result_pred.shape(),
ShapeUtil::MakeShape(PRED, {})));
bool cond_returns_true = cond_result_pred.GetFirstElement<bool>();
if (!cond_returns_true) {
VLOG(2) << "Upper bound on the trip count is 1";
return 1;
}
VLOG(2) << "Loop has no known upper bound on the trip count.";
return nullopt;
}
} | #include "xla/service/while_loop_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class WhileLoopAnalysisTest : public HloTestBase {
protected:
[[nodiscard]] absl::StatusOr<int64_t> MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir);
};
absl::StatusOr<int64_t> WhileLoopAnalysisTest::MakeWhileLoopAndGetTripCount(
int init, int limit, int step, ComparisonDirection dir) {
std::string hlo_string_template = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
index = s32[] get-tuple-element(p_body), index=1
one = s32[] constant({{STEP}})
inc = s32[] add(index, one)
ROOT root = (f32[2], s32[]) tuple(val, inc)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant({{LIMIT}})
ROOT result = pred[] compare(gte, const), direction={{COMP_DIR}}
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] constant({{INIT}})
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
}
)";
std::string hlo_string =
absl::StrReplaceAll(hlo_string_template,
{{"{{INIT}}", absl::StrCat(init)},
{"{{LIMIT}}", absl::StrCat(limit)},
{"{{STEP}}", absl::StrCat(step)},
{"{{COMP_DIR}}", ComparisonDirectionToString(dir)}});
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::optional<int64_t> trip_count = MatchTrivialLoopTripCount(
while_op, 1,
Cast<HloConstantInstruction>(
module->GetComputationWithName("entry")->GetInstructionWithName(
"param.1"))
->literal());
CHECK(trip_count.has_value());
return *trip_count;
}
TEST_F(WhileLoopAnalysisTest, SingleIterationUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(-1)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 1);
}
TEST_F(WhileLoopAnalysisTest, SimpleLoopWithCustomCallNonTuple) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
custom-call.1 = (s32[]{:T(128)}, s32[3]{0}) custom-call(loop_var.1), custom_call_target="CustomCallStart"
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(custom-call.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(custom-call.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
ROOT custom-call.2 = (s32[]{:T(128)}, s32[3]{0}) custom-call(idx, output), custom_call_target="CustomCallEnd"
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant(5)
ROOT less-than = pred[] compare(get-tuple-element.5, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* while_op = m->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 5);
}
TEST_F(WhileLoopAnalysisTest, SimpleLoopWithCustomCall) {
std::string hlo_string = R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
custom-call.1 = (s32[]{:T(128)}, s32[3]{0}) custom-call(loop_var.1), custom_call_target="CustomCallStart"
get-tuple-element.1 = s32[]{:T(128)} get-tuple-element(custom-call.1), index=0
constant.1 = s32[]{:T(128)} constant(1)
idx = s32[]{:T(128)} add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(custom-call.1), index=1
output = s32[3]{0} add(get-tuple-element.2, get-tuple-element.2)
tuple = (s32[]{:T(128)}, s32[3]{0}) tuple(idx, output)
ROOT custom-call.2 = (s32[]{:T(128)}, s32[3]{0}) custom-call(tuple), custom_call_target="CustomCallEnd"
}
SimpleLoop.condition {
loop_var.2 = (s32[]{:T(128)}, s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[]{:T(128)} constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[]{:T(128)} constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[]{:T(128)}, s32[3]{0}) tuple(constant.3, constant.4)
ROOT while = (s32[]{:T(128)}, s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
}
)";
auto m = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* while_op = m->entry_computation()->root_instruction();
EXPECT_EQ(*ComputeWhileLoopTripCountUpperBound(while_op), 5);
}
TEST_F(WhileLoopAnalysisTest, NoUpperBound) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(42)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
EXPECT_EQ(ComputeWhileLoopTripCountUpperBound(while_op), std::nullopt);
}
int CalculateTripCount(int init, int limit, int step, ComparisonDirection dir) {
int trip_count = 0;
if (dir == ComparisonDirection::kLt) {
for (int i = init; i < limit; i += step) {
trip_count++;
}
} else if (dir == ComparisonDirection::kLe) {
for (int i = init; i <= limit; i += step) {
trip_count++;
}
} else {
LOG(FATAL) << "Unknown comparison direction: "
<< ComparisonDirectionToString(dir);
}
return trip_count;
}
TEST_F(WhileLoopAnalysisTest, ExactBoundTrivialTripCount) {
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLt).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLt));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 1, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 1, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 2, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 2, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 42, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 42, 5, ComparisonDirection::kLe));
EXPECT_EQ(
MakeWhileLoopAndGetTripCount(0, 40, 5, ComparisonDirection::kLe).value(),
CalculateTripCount(0, 40, 5, ComparisonDirection::kLe));
}
TEST_F(WhileLoopAnalysisTest, NoAIVNoConstChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
val3 = s32[] get-tuple-element(p_body), index=2
add = s32[] add(val2, val3)
sub = s32[] subtract(add, val3)
ROOT root = (f32[2], s32[], s32[]) tuple(val1, add, sub)
}
condition {
p_cond = (f32[2], s32[], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
param.2 = s32[] parameter(2)
while_init = (f32[2], s32[], s32[]) tuple(param.0, param.1, param.2)
ROOT while = (f32[2], s32[], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVMultiChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const.1 = s32[] constant(42)
const.2 = s32[] constant(42)
const.3 = s32[] constant(42)
add = s32[] add(val2, const.1)
sub = s32[] subtract(add, const.2)
mul = s32[] multiply(sub, const.3)
ROOT root = (f32[2], s32[]) tuple(val1, mul)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
TEST_F(WhileLoopAnalysisTest, NoAIV) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
add = s32[] add(val2, val2)
const.1 = s32[] constant(42)
mul = s32[] multiply(add, const.1)
div = s32[] divide(mul, add)
ROOT root = (f32[2], s32[]) tuple(val1, div)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 0);
}
TEST_F(WhileLoopAnalysisTest, AIVNoChain) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
add = s32[] add(val2, const)
ROOT root = (f32[2], s32[]) tuple(val1, add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
HloInstruction* while_op = module->entry_computation()->root_instruction();
std::vector<const HloInstruction*> aux_indices =
GetAuxiliaryLoopInductionVars(while_op);
EXPECT_EQ(aux_indices.size(), 1);
EXPECT_EQ(aux_indices[0]->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(aux_indices[0]->tuple_index(), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a2ee7d71-f029-422d-9d19-69394fa2a722 | cpp | google/libaddressinput | json | cpp/src/util/json.cc | cpp/test/util/json_test.cc | #include "json.h"
#include <cassert>
#include <cstddef>
#include <memory>
#include <string>
#include <vector>
#include <rapidjson/document.h>
#include <rapidjson/reader.h>
namespace i18n {
namespace addressinput {
using rapidjson::Document;
using rapidjson::kParseValidateEncodingFlag;
using rapidjson::Value;
class Json::JsonImpl {
public:
JsonImpl(const JsonImpl&) = delete;
JsonImpl& operator=(const JsonImpl&) = delete;
explicit JsonImpl(const std::string& json)
: document_(new Document),
value_(document_.get()),
dictionaries_(),
valid_(false) {
document_->Parse<kParseValidateEncodingFlag>(json.c_str());
valid_ = !document_->HasParseError() && document_->IsObject();
}
~JsonImpl() {
for (auto ptr : dictionaries_) {
delete ptr;
}
}
bool valid() const { return valid_; }
const std::vector<const Json*>& GetSubDictionaries() {
if (dictionaries_.empty()) {
for (Value::ConstMemberIterator member = value_->MemberBegin();
member != value_->MemberEnd(); ++member) {
if (member->value.IsObject()) {
dictionaries_.push_back(new Json(new JsonImpl(&member->value)));
}
}
}
return dictionaries_;
}
bool GetStringValueForKey(const std::string& key, std::string* value) const {
assert(value != nullptr);
Value::ConstMemberIterator member = value_->FindMember(key.c_str());
if (member == value_->MemberEnd() || !member->value.IsString()) {
return false;
}
value->assign(member->value.GetString(), member->value.GetStringLength());
return true;
}
private:
explicit JsonImpl(const Value* value)
: document_(),
value_(value),
dictionaries_(),
valid_(true) {
assert(value_ != nullptr);
assert(value_->IsObject());
}
const std::unique_ptr<Document> document_;
const Value* const value_;
std::vector<const Json*> dictionaries_;
bool valid_;
};
Json::Json() : impl_() {}
Json::~Json() = default;
bool Json::ParseObject(const std::string& json) {
assert(impl_ == nullptr);
impl_.reset(new JsonImpl(json));
if (!impl_->valid()) {
impl_.reset();
}
return impl_ != nullptr;
}
const std::vector<const Json*>& Json::GetSubDictionaries() const {
assert(impl_ != nullptr);
return impl_->GetSubDictionaries();
}
bool Json::GetStringValueForKey(const std::string& key,
std::string* value) const {
assert(impl_ != nullptr);
return impl_->GetStringValueForKey(key, value);
}
Json::Json(JsonImpl* impl) : impl_(impl) {}
}
} | #include "util/json.h"
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::Json;
TEST(JsonTest, EmptyStringIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject(std::string()));
}
TEST(JsonTest, EmptyDictionaryContainsNoKeys) {
Json json;
ASSERT_TRUE(json.ParseObject("{}"));
std::string not_checked;
EXPECT_FALSE(json.GetStringValueForKey("key", ¬_checked));
EXPECT_FALSE(json.GetStringValueForKey(std::string(), ¬_checked));
}
TEST(JsonTest, InvalidJsonIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("{"));
}
TEST(JsonTest, OneKeyIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": "value"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key", &value));
EXPECT_EQ("value", value);
}
TEST(JsonTest, EmptyStringKeyIsNotInObject) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": "value"})"));
std::string not_checked;
EXPECT_FALSE(json.GetStringValueForKey(std::string(), ¬_checked));
}
TEST(JsonTest, EmptyKeyIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"": "value"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey(std::string(), &value));
EXPECT_EQ("value", value);
}
TEST(JsonTest, EmptyValueIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": ""})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key", &value));
EXPECT_TRUE(value.empty());
}
TEST(JsonTest, Utf8EncodingIsValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key": "Ü"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key", &value));
EXPECT_EQ("Ü", value);
}
TEST(JsonTest, InvalidUtf8IsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("{\"key\": \"\xC3\x28\"}"));
}
TEST(JsonTest, NullInMiddleIsNotValid) {
Json json;
static const char kJson[] = "{\"key\": \"val\0ue\"}";
EXPECT_FALSE(json.ParseObject(std::string(kJson, sizeof kJson - 1)));
}
TEST(JsonTest, TwoKeysAreValid) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key1": "value1", "key2": "value2"})"));
std::string value;
EXPECT_TRUE(json.GetStringValueForKey("key1", &value));
EXPECT_EQ("value1", value);
EXPECT_TRUE(json.GetStringValueForKey("key2", &value));
EXPECT_EQ("value2", value);
}
TEST(JsonTest, ListIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("[]"));
}
TEST(JsonTest, StringIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject(R"("value")"));
}
TEST(JsonTest, NumberIsNotValid) {
Json json;
EXPECT_FALSE(json.ParseObject("3"));
}
TEST(JsonTest, NoDictionaryFound) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key":"value"})"));
EXPECT_TRUE(json.GetSubDictionaries().empty());
}
TEST(JsonTest, DictionaryFound) {
Json json;
ASSERT_TRUE(json.ParseObject(R"({"key":{"inner_key":"value"}})"));
const auto& sub_dicts = json.GetSubDictionaries();
ASSERT_EQ(1U, sub_dicts.size());
std::string value;
EXPECT_TRUE(sub_dicts.front()->GetStringValueForKey("inner_key", &value));
EXPECT_EQ("value", value);
}
TEST(JsonTest, DictionariesHaveSubDictionaries) {
Json json;
ASSERT_TRUE(json.ParseObject(
R"({"key":{"inner_key":{"inner_inner_key":"value"}}})"));
const auto& sub_dicts = json.GetSubDictionaries();
ASSERT_EQ(1U, sub_dicts.size());
const auto& sub_sub_dicts = sub_dicts.front()->GetSubDictionaries();
ASSERT_EQ(1U, sub_sub_dicts.size());
std::string value;
EXPECT_TRUE(
sub_sub_dicts.front()->GetStringValueForKey("inner_inner_key", &value));
EXPECT_EQ("value", value);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/util/json.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/util/json_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
8933e6bb-c4c3-4af6-9261-521952ef7498 | cpp | tensorflow/tensorflow | uniform_quantized_add_op | tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op.cc | tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op_test.cc | #include <algorithm>
#include <vector>
#include "absl/algorithm/container.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h"
#include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h"
namespace tensorflow {
namespace {
using errors::InvalidArgument;
absl::StatusOr<TensorShape> CalculateOutputShape(const TensorShape& lhs_shape,
const TensorShape& rhs_shape) {
if (lhs_shape.dims() == 0) {
return rhs_shape;
} else if (rhs_shape.dims() == 0) {
return lhs_shape;
}
std::vector<int64_t> reversed_output_shape;
int l_dim = lhs_shape.dims() - 1;
int r_dim = rhs_shape.dims() - 1;
while (l_dim >= 0 || r_dim >= 0) {
const int64_t l_dim_size = l_dim >= 0 ? lhs_shape.dim_size(l_dim) : 1;
const int64_t r_dim_size = r_dim >= 0 ? rhs_shape.dim_size(r_dim) : 1;
if (l_dim_size != 1 && r_dim_size != 1 && l_dim_size != r_dim_size) {
return InvalidArgument("Cannot Add tensors of shapes: ",
lhs_shape.DebugString(), rhs_shape.DebugString());
}
reversed_output_shape.push_back(l_dim_size == 1 ? r_dim_size : l_dim_size);
--l_dim;
--r_dim;
}
absl::c_reverse(reversed_output_shape);
TensorShape output_shape;
TF_RETURN_IF_ERROR(
TensorShape::BuildTensorShape(reversed_output_shape, &output_shape));
return output_shape;
}
template <typename T>
void QuantizedAdd(const Tensor& lhs, const Tensor& rhs,
const Tensor& output_zero_points,
int output_quantization_min_val,
int output_quantization_max_val, int lhs_quantization_axis,
int rhs_quantization_axis, int output_quantizaiton_axis,
Tensor& output) {
const T* lhs_data = lhs.flat<T>().data();
const T* rhs_data = rhs.flat<T>().data();
T* output_data = output.flat<T>().data();
const int32* output_zero_points_data =
output_zero_points.flat<int32>().data();
for (int64_t output_idx = 0; output_idx < output.NumElements();
++output_idx) {
int64_t output_idx_remain = output_idx;
int64_t lhs_idx = 0;
int64_t rhs_idx = 0;
int64_t lhs_inner_dim_size = 1;
int64_t rhs_inner_dim_size = 1;
int64_t output_zero_points_idx_of_quantization_axis = 0;
for (int output_dim = output.dims() - 1; output_dim >= 0; --output_dim) {
const int64_t output_idx_of_dim =
output_idx_remain % output.dim_size(output_dim);
output_idx_remain /= output.dim_size(output_dim);
if (output_quantizaiton_axis == output_dim) {
output_zero_points_idx_of_quantization_axis = output_idx_of_dim;
}
const int lhs_dim = output_dim - (output.dims() - lhs.dims());
if (lhs_dim >= 0) {
const int64_t lhs_idx_of_dim =
lhs.dim_size(lhs_dim) == 1 ? 0 : output_idx_of_dim;
lhs_idx += lhs_idx_of_dim * lhs_inner_dim_size;
lhs_inner_dim_size *= lhs.dim_size(lhs_dim);
}
const int rhs_dim = output_dim - (output.dims() - rhs.dims());
if (rhs_dim >= 0) {
const int64_t rhs_idx_of_dim =
rhs.dim_size(rhs_dim) == 1 ? 0 : output_idx_of_dim;
rhs_idx += rhs_idx_of_dim * rhs_inner_dim_size;
rhs_inner_dim_size *= rhs.dim_size(rhs_dim);
}
}
const int32_t output_zero_point =
output_zero_points_data[output_zero_points_idx_of_quantization_axis];
const int32_t unclamped = static_cast<int32_t>(lhs_data[lhs_idx]) +
static_cast<int32_t>(rhs_data[rhs_idx]) +
output_zero_point;
output_data[output_idx] = static_cast<T>(std::clamp(
unclamped, output_quantization_min_val, output_quantization_max_val));
}
}
template <typename T>
Status EvalQuantizedAdd(OpKernelContext* context, const Tensor& lhs,
const Tensor& rhs, const Tensor& lhs_scales,
const Tensor& lhs_zero_points, const Tensor& rhs_scales,
const Tensor& rhs_zero_points,
const Tensor& output_scales,
const Tensor& output_zero_points,
int output_quantization_min_val,
int output_quantization_max_val,
int lhs_quantization_axis, int rhs_quantization_axis,
int output_quantization_axis, Tensor& output) {
const DataType dtype = DataTypeToEnum<T>::v();
Tensor zeros_of_output_scales_shape;
TF_RETURN_IF_ERROR(context->allocate_temp(DT_INT32, output_scales.shape(),
&zeros_of_output_scales_shape));
zeros_of_output_scales_shape.flat<int32_t>().setZero();
Tensor lhs_requantized;
TF_RETURN_IF_ERROR(
context->allocate_temp(dtype, lhs.shape(), &lhs_requantized));
const int lhs_requantize_output_quantization_axis =
output_quantization_axis == -1 ? -1 : lhs_quantization_axis;
TF_RETURN_IF_ERROR(EvalRequantize<T, T>(
context, lhs, lhs_scales, lhs_zero_points, output_scales,
zeros_of_output_scales_shape,
lhs_quantization_axis, lhs_requantize_output_quantization_axis,
std::numeric_limits<T>::min(),
std::numeric_limits<T>::max(), lhs_requantized));
Tensor rhs_requantized;
TF_RETURN_IF_ERROR(
context->allocate_temp(dtype, rhs.shape(), &rhs_requantized));
TF_RETURN_IF_ERROR(EvalRequantize<T, T>(
context, rhs, rhs_scales, rhs_zero_points, output_scales,
zeros_of_output_scales_shape,
rhs_quantization_axis, output_quantization_axis,
std::numeric_limits<T>::min(),
std::numeric_limits<T>::max(), rhs_requantized));
QuantizedAdd<T>(lhs_requantized, rhs_requantized, output_zero_points,
output_quantization_min_val, output_quantization_max_val,
lhs_quantization_axis, rhs_quantization_axis,
output_quantization_axis, output);
return absl::OkStatus();
}
}
template <typename T>
class UniformQuantizedAddOp : public OpKernel {
public:
explicit UniformQuantizedAddOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES(context, (std::is_same<T, qint32>()),
InvalidArgument("Unsupported operand type."));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_min_val",
&output_quantization_min_val_));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_max_val",
&output_quantization_max_val_));
OP_REQUIRES_OK(context, context->GetAttr("lhs_quantization_axis",
&lhs_quantization_axis_));
OP_REQUIRES_OK(context, context->GetAttr("rhs_quantization_axis",
&rhs_quantization_axis_));
OP_REQUIRES_OK(context, context->GetAttr("output_quantization_axis",
&output_quantization_axis_));
OP_REQUIRES(
context,
(lhs_quantization_axis_ >= -1 && rhs_quantization_axis_ >= -1 &&
output_quantization_axis_ >= -1),
InvalidArgument("lhs, rhs and output quantization_axis must be -1 or "
"within [0, dims)"));
}
void Compute(OpKernelContext* context) override {
const Tensor& lhs = context->input(0);
const Tensor& rhs = context->input(1);
const Tensor& lhs_scales = context->input(2);
const Tensor& lhs_zero_points = context->input(3);
const Tensor& rhs_scales = context->input(4);
const Tensor& rhs_zero_points = context->input(5);
const Tensor& output_scales = context->input(6);
const Tensor& output_zero_points = context->input(7);
OP_REQUIRES_OK(
context, QuantizationAxisAndShapeValid(lhs.shape(), lhs_scales.shape(),
lhs_zero_points.shape(),
lhs_quantization_axis_));
OP_REQUIRES_OK(
context, QuantizationAxisAndShapeValid(rhs.shape(), rhs_scales.shape(),
rhs_zero_points.shape(),
rhs_quantization_axis_));
auto output_shape_status = CalculateOutputShape(lhs.shape(), rhs.shape());
OP_REQUIRES_OK(context, output_shape_status.status());
const auto& output_shape = output_shape_status.value();
OP_REQUIRES_OK(context,
QuantizationAxisAndShapeValid(
output_shape, output_scales.shape(),
output_zero_points.shape(), output_quantization_axis_));
OP_REQUIRES(
context,
(!(lhs_quantization_axis_ >= 0 && output_quantization_axis_ >= 0) ||
(lhs.dims() - lhs_quantization_axis_ ==
output_shape.dims() - output_quantization_axis_)),
InvalidArgument("If lhs and output is both per-axis quantized, the "
"quantization axis must match."));
OP_REQUIRES(
context,
(!(rhs_quantization_axis_ >= 0 && output_quantization_axis_ >= 0) ||
(rhs.dims() - rhs_quantization_axis_ ==
output_shape.dims() - output_quantization_axis_)),
InvalidArgument("If rhs and output is both per-axis quantized, the "
"quantization axis must match."));
OP_REQUIRES(context,
(AllElementsPositive<float>(lhs_scales) &&
AllElementsPositive<float>(rhs_scales) &&
AllElementsPositive<float>(output_scales)),
InvalidArgument(
"lhs/rhs/output scales elements must be all positive."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
OP_REQUIRES_OK(
context, EvalQuantizedAdd<T>(
context, lhs, rhs, lhs_scales, lhs_zero_points, rhs_scales,
rhs_zero_points, output_scales, output_zero_points,
output_quantization_min_val_, output_quantization_max_val_,
lhs_quantization_axis_, rhs_quantization_axis_,
output_quantization_axis_, *output));
}
private:
int lhs_quantization_axis_;
int rhs_quantization_axis_;
int output_quantization_axis_;
int output_quantization_min_val_;
int output_quantization_max_val_;
};
REGISTER_KERNEL_BUILDER(
Name("UniformQuantizedAdd").Device(DEVICE_CPU).TypeConstraint<qint32>("T"),
UniformQuantizedAddOp<qint32>);
} | #include <limits>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
constexpr int32_t kInt32Min = std::numeric_limits<int32_t>::min();
constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max();
}
class UniformQuantizedAddOpTest : public OpsTestBase {
protected:
};
TEST_F(UniformQuantizedAddOpTest, InvalidShape) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", 1)
.Attr("rhs_quantization_axis", 0)
.Attr("output_quantization_axis", 1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<qint32>(TensorShape({2}), {-100, 0});
AddInputFromArray<float>(TensorShape({3}), {2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20});
AddInputFromArray<float>(TensorShape({2}), {2, 3});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
AddInputFromArray<float>(TensorShape({3}), {2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40});
EXPECT_TRUE(absl::IsInvalidArgument(RunOpKernel()));
}
TEST_F(UniformQuantizedAddOpTest, PerChannelSameScale) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", 1)
.Attr("rhs_quantization_axis", 0)
.Attr("output_quantization_axis", 1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100});
AddInputFromArray<float>(TensorShape({3}), {2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20});
AddInputFromArray<float>(TensorShape({3}), {2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0});
AddInputFromArray<float>(TensorShape({3}), {2, 3, 4});
AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-126, -4, 118, -120, 2, 124});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleLhsMultiDims) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-126, -24, 78, -120, -18, 84});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleRhsMultiDims) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100});
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-126, -24, 78, -120, -18, 84});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerChannelDifferentScale) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", 1)
.Attr("rhs_quantization_axis", 0)
.Attr("output_quantization_axis", 1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100});
AddInputFromArray<float>(TensorShape({3}), {2, 3, 1});
AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20});
AddInputFromArray<float>(TensorShape({3}), {1, 3, 2});
AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0});
AddInputFromArray<float>(TensorShape({3}), {4, 3, 2});
AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-58, -4, 129, -55, 2, 132});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerChannelDifferentScaleBroadcastLhs) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", 1)
.Attr("rhs_quantization_axis", 1)
.Attr("output_quantization_axis", 1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({1, 3}), {-100, 0, 100});
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<float>(TensorShape({3}), {1, 3, 2});
AddInputFromArray<int32>(TensorShape({3}), {0, 0, 0});
AddInputFromArray<float>(TensorShape({3}), {2, 3, 1});
AddInputFromArray<int32>(TensorShape({3}), {-20, 0, 20});
AddInputFromArray<float>(TensorShape({3}), {4, 3, 2});
AddInputFromArray<int32>(TensorShape({3}), {-40, 0, 40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-58, -4, 129, -55, 2, 132});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerTensorDifferentScale) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<qint32>(TensorShape({3}), {-100, 0, 100});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {1});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {4});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-58, -32, -6, -55, -29, -3});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleTensorAddScalar) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<qint32>(TensorShape({}), {-100});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-126, -124, -122, -120, -118, -116});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleScalarAddTensor) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({}), {-100});
AddInputFromArray<qint32>(TensorShape({2, 3}), {-6, -4, -2, 0, 2, 4});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 3}));
test::FillValues<qint32>(&expected, {-126, -124, -122, -120, -118, -116});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, PerTensorSameScaleScalarAddScalar) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({}), {-6});
AddInputFromArray<qint32>(TensorShape({}), {-100});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({}));
test::FillValues<qint32>(&expected, {-126});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, TensorAddEmptyTensor) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({2, 1, 1}), {-6, -12});
AddInputFromArray<qint32>(TensorShape({2, 0, 1}), {});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 0, 1}));
test::FillValues<qint32>(&expected, {});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
TEST_F(UniformQuantizedAddOpTest, ScalarAddEmptyTensor) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformQuantizedAdd")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("T", DT_QINT32)
.Attr("lhs_quantization_axis", -1)
.Attr("rhs_quantization_axis", -1)
.Attr("output_quantization_axis", -1)
.Attr("lhs_quantization_min_val", kInt32Min)
.Attr("lhs_quantization_max_val", kInt32Max)
.Attr("rhs_quantization_min_val", kInt32Min)
.Attr("rhs_quantization_max_val", kInt32Max)
.Attr("output_quantization_min_val", kInt32Min)
.Attr("output_quantization_max_val", kInt32Max)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint32>(TensorShape({}), {-6});
AddInputFromArray<qint32>(TensorShape({2, 0, 1}), {});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-20});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<int32>(TensorShape({}), {-40});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({2, 0, 1}));
test::FillValues<qint32>(&expected, {});
test::ExpectTensorEqual<qint32>(expected, *GetOutput(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_quantized_add_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02af6d36-38bc-425f-b15d-5894c9ee2f50 | cpp | tensorflow/tensorflow | resource_variable | tensorflow/lite/experimental/resource/resource_variable.cc | tensorflow/lite/experimental/resource/resource_variable_test.cc | #include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <cstdlib>
#include <cstring>
#include <map>
#include <memory>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
namespace tflite {
namespace resource {
ResourceVariable::ResourceVariable() {
memset(&tensor_, 0, sizeof(TfLiteTensor));
}
ResourceVariable::ResourceVariable(ResourceVariable&& other) {
tensor_ = other.tensor_;
is_initialized_ = other.is_initialized_;
memset(&other.tensor_, 0, sizeof(TfLiteTensor));
other.is_initialized_ = false;
}
ResourceVariable::~ResourceVariable() {
if (is_initialized_) {
free(tensor_.data.raw);
if (tensor_.dims) {
TfLiteIntArrayFree(tensor_.dims);
}
}
}
TfLiteStatus ResourceVariable::AssignFrom(const TfLiteTensor* tensor) {
char* old_raw = tensor_.data.raw;
size_t old_bytes = tensor_.bytes;
TfLiteIntArray* old_dims = tensor_.dims;
memset(&tensor_, 0, sizeof(tensor_));
tensor_.name = "ResourceVariable";
tensor_.allocation_type = kTfLiteDynamic;
tensor_.type = tensor->type;
tensor_.params = tensor->params;
tensor_.quantization = tensor->quantization;
if (TfLiteIntArrayEqual(old_dims, tensor->dims)) {
tensor_.dims = old_dims;
} else {
TfLiteIntArrayFree(old_dims);
tensor_.dims = TfLiteIntArrayCopy(tensor->dims);
}
tensor_.data.raw = old_raw;
if (old_bytes != tensor->bytes) {
TfLiteTensorRealloc(tensor->bytes, &tensor_);
} else {
tensor_.bytes = old_bytes;
}
memcpy(tensor_.data.raw, tensor->data.raw, tensor_.bytes);
is_initialized_ = true;
return kTfLiteOk;
}
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id) {
if (resources->count(resource_id) != 0) {
return;
}
resources->emplace(resource_id, std::make_unique<ResourceVariable>());
}
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id) {
auto it = resources->find(resource_id);
if (it != resources->end()) {
return static_cast<ResourceVariable*>(it->second.get());
}
return nullptr;
}
bool IsBuiltinResource(const TfLiteTensor* tensor) {
return tensor && tensor->type == kTfLiteResource &&
tensor->delegate == nullptr;
}
}
} | #include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace resource {
void InitTensor(const std::vector<int>& shape, TfLiteAllocationType alloc_type,
float default_value, TfLiteTensor* tensor) {
memset(tensor, 0, sizeof(TfLiteTensor));
int num_elements = 1;
for (auto dim : shape) num_elements *= dim;
if (shape.empty()) num_elements = 0;
float* buf = static_cast<float*>(malloc(sizeof(float) * num_elements));
for (int i = 0; i < num_elements; ++i) buf[i] = default_value;
const int bytes = num_elements * sizeof(buf[0]);
auto* dims = ConvertArrayToTfLiteIntArray(shape.size(), shape.data());
TfLiteTensorReset(TfLiteType::kTfLiteFloat32, nullptr, dims, {},
reinterpret_cast<char*>(buf), bytes, alloc_type, nullptr,
false, tensor);
}
TEST(ResourceTest, NonDynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, DynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteDynamic, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, AssignSameSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {1};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(ResourceTest, AssignDifferentSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {2};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float) * 2, value->bytes);
ASSERT_THAT(value, DimsAre({2}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(IsBuiltinResource, IsBuiltinResourceTest) {
TfLiteTensor tensor;
tensor.type = kTfLiteResource;
tensor.delegate = nullptr;
EXPECT_TRUE(IsBuiltinResource(&tensor));
EXPECT_FALSE(IsBuiltinResource(nullptr));
tensor.type = kTfLiteFloat32;
EXPECT_FALSE(IsBuiltinResource(&tensor));
tensor.type = kTfLiteResource;
TfLiteDelegate delegate;
tensor.delegate = &delegate;
EXPECT_FALSE(IsBuiltinResource(&tensor));
}
TEST(ResourceTest, GetMemoryUsage) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {100};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(100 * sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({100}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(100 * sizeof(float), var.GetMemoryUsage());
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/resource/resource_variable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/resource/resource_variable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db9bf754-48fc-47e9-953c-1b2d185fbf79 | cpp | tensorflow/tensorflow | generator | tensorflow/lite/schema/builtin_ops_header/generator.cc | tensorflow/lite/schema/builtin_ops_header/generator_test.cc | #include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <string>
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace builtin_ops_header {
namespace {
const char* kFileHeader =
R"(
#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_
#define TENSORFLOW_LITE_BUILTIN_OPS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
)";
const char* kFileFooter =
R"(} TfLiteBuiltinOperator;
#ifdef __cplusplus
}
#endif
#endif
)";
}
bool IsValidInputEnumName(const std::string& name) {
const char* begin = name.c_str();
const char* ch = begin;
while (*ch != '\0') {
if (ch != begin) {
if (*ch != '_') {
return false;
}
++ch;
}
bool empty = true;
while (isupper(*ch) || isdigit(*ch)) {
empty = false;
++ch;
}
if (empty) {
return false;
}
}
return true;
}
std::string ConstantizeVariableName(const std::string& name) {
std::string result = "kTfLiteBuiltin";
bool uppercase = true;
for (char input_char : name) {
if (input_char == '_') {
uppercase = true;
} else if (uppercase) {
result += toupper(input_char);
uppercase = false;
} else {
result += tolower(input_char);
}
}
return result;
}
bool GenerateHeader(std::ostream& os) {
auto enum_names = tflite::EnumNamesBuiltinOperator();
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
if (!IsValidInputEnumName(enum_name)) {
std::cerr << "Invalid input enum name: " << enum_name << std::endl;
return false;
}
}
os << kFileHeader;
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
os << " ";
os << ConstantizeVariableName(enum_name);
os << " = ";
os << enum_value;
os << ",\n";
}
os << kFileFooter;
return true;
}
}
} | #include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <fstream>
#include <gtest/gtest.h>
namespace {
using tflite::builtin_ops_header::ConstantizeVariableName;
using tflite::builtin_ops_header::IsValidInputEnumName;
TEST(TestIsValidInputEnumName, TestWithValidInputNames) {
EXPECT_TRUE(IsValidInputEnumName("ADD"));
EXPECT_TRUE(IsValidInputEnumName("CONV_2D"));
EXPECT_TRUE(IsValidInputEnumName("L2_POOL_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLeadingUnderscore) {
EXPECT_FALSE(IsValidInputEnumName("_ADD"));
EXPECT_FALSE(IsValidInputEnumName("_CONV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLowerCase) {
EXPECT_FALSE(IsValidInputEnumName("_AdD"));
EXPECT_FALSE(IsValidInputEnumName("_COnV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithOtherCharacters) {
EXPECT_FALSE(IsValidInputEnumName("_AdD!2D"));
EXPECT_FALSE(IsValidInputEnumName("_COnV?2D"));
}
TEST(TestIsValidInputEnumName, TestWithDoubleUnderscores) {
EXPECT_FALSE(IsValidInputEnumName("ADD__2D"));
EXPECT_FALSE(IsValidInputEnumName("CONV__2D"));
}
TEST(TestConstantizeVariableName, TestWithValidInputNames) {
EXPECT_EQ(ConstantizeVariableName("ADD"), "kTfLiteBuiltinAdd");
EXPECT_EQ(ConstantizeVariableName("CONV_2D"), "kTfLiteBuiltinConv2d");
EXPECT_EQ(ConstantizeVariableName("L2_POOL_2D"), "kTfLiteBuiltinL2Pool2d");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/schema/builtin_ops_header/generator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/schema/builtin_ops_header/generator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f27a8a3-8772-4af9-a79e-dda37cf9c8dc | cpp | tensorflow/tensorflow | graph_info | tensorflow/lite/graph_info.cc | tensorflow/lite/graph_info_test.cc | #include "tensorflow/lite/graph_info.h"
#include <algorithm>
#include <vector>
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
template <class T>
void Uniquefy(std::vector<T>* items) {
std::sort(items->begin(), items->end());
items->erase(std::unique(items->begin(), items->end()), items->end());
}
class PartitionGraphIntoIndependentNodeSubsetsImpl {
public:
PartitionGraphIntoIndependentNodeSubsetsImpl(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets, bool greedily,
const ControlEdges& control_edges)
: info_(info),
node_subsets_(node_subsets),
node_type_(info_->num_total_nodes(), NodeSubset::kTfNonPartition),
greedily_(greedily),
control_edges_(control_edges),
num_incoming_control_edges_(info_->num_execution_nodes(), 0) {
for (auto node_index : TfLiteIntArrayView(nodes_to_partition)) {
node_type_[node_index] = NodeSubset::kTfPartition;
}
Uniquefy(&control_edges_);
}
void Partition() {
node_subsets_->clear();
tensor_epochs_.clear();
tensor_epochs_.resize(info_->num_tensors(), kEpochAlwaysReady);
node_epochs_.clear();
node_epochs_.resize(info_->num_execution_nodes(), kEpochNotReady);
num_incoming_control_edges_.clear();
num_incoming_control_edges_.resize(info_->num_execution_nodes(), 0);
for (const auto& edge : control_edges_) {
++num_incoming_control_edges_[edge.second];
}
for (int node_index = 0; node_index < info_->num_execution_nodes();
node_index++) {
const TfLiteNode& node = info_->node(node_index);
for (int output_tensor_index : TfLiteIntArrayView(node.outputs)) {
if (output_tensor_index == kTfLiteOptionalTensor) continue;
tensor_epochs_[output_tensor_index] = kEpochNotReady;
}
}
while (true) {
BuildNodeSubset();
if (node_subsets_->back().nodes.empty()) {
node_subsets_->pop_back();
break;
}
}
for (int output_index : info_->outputs()) {
int output_epoch = tensor_epochs_[output_index];
if (output_epoch == kEpochAlwaysReady) {
continue;
}
NodeSubset& output_subset = (*node_subsets_)[output_epoch];
output_subset.output_tensors.push_back(output_index);
}
for (NodeSubset& node_subset : *node_subsets_) {
Uniquefy(&node_subset.input_tensors);
Uniquefy(&node_subset.output_tensors);
}
}
private:
enum {
kEpochNotReady = -1,
kEpochAlwaysReady = -2
};
bool UpdateNode(int node_index) {
const TfLiteNode& node = info_->node(node_index);
NodeSubset& current_subset = node_subsets_->back();
int current_epoch = node_subsets_->size() - 1;
if (node_epochs_[node_index] != kEpochNotReady) {
return false;
}
for (int input_tensor_index : TfLiteIntArrayView(node.inputs)) {
if (input_tensor_index != kTfLiteOptionalTensor &&
tensor_epochs_[input_tensor_index] == kEpochNotReady) {
return false;
}
}
if (num_incoming_control_edges_[node_index] != 0) {
return false;
}
int original_node_idx = info_->node_index(node_index);
if (current_subset.type == NodeSubset::kTfUnexplored) {
current_subset.type = node_type_[original_node_idx];
}
if (current_subset.type == node_type_[original_node_idx]) {
node_epochs_[node_index] = current_epoch;
current_subset.nodes.push_back(original_node_idx);
for (int output_tensor_index : TfLiteIntArrayView(node.outputs)) {
if (output_tensor_index == kTfLiteOptionalTensor) continue;
tensor_epochs_[output_tensor_index] = current_epoch;
}
for (int input_tensor_index : TfLiteIntArrayView(node.inputs)) {
if (input_tensor_index == kTfLiteOptionalTensor) {
continue;
}
int input_epoch = tensor_epochs_[input_tensor_index];
int node_epoch = current_epoch;
if (input_epoch != node_epoch) {
current_subset.input_tensors.push_back(input_tensor_index);
if (input_epoch >= 0) {
NodeSubset& input_subset = (*node_subsets_)[input_epoch];
input_subset.output_tensors.push_back(input_tensor_index);
}
}
}
for (auto edge_iter =
std::lower_bound(control_edges_.begin(), control_edges_.end(),
ControlEdge(node_index, 0));
edge_iter != control_edges_.end() && edge_iter->first == node_index;
++edge_iter) {
--num_incoming_control_edges_[edge_iter->second];
}
return true;
} else {
return false;
}
}
void BuildNodeSubset() {
node_subsets_->emplace_back(NodeSubset());
while (true) {
bool did_something = false;
for (int node_index = 0; node_index < info_->num_execution_nodes();
node_index++) {
if (UpdateNode(node_index)) {
did_something = true;
} else {
if (did_something && !greedily_) {
return;
}
}
}
if (!did_something) return;
}
}
const GraphInfo* info_;
std::vector<NodeSubset>* node_subsets_;
std::vector<NodeSubset::Type> node_type_;
std::vector<int> tensor_epochs_;
std::vector<int> node_epochs_;
const bool greedily_;
ControlEdges control_edges_;
std::vector<int> num_incoming_control_edges_;
};
}
TfLiteStatus PartitionGraphIntoIndependentNodeSubsets(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets, bool greedily,
const ControlEdges* control_edges) {
ControlEdges my_control_edges;
if (control_edges == nullptr) {
control_edges = &my_control_edges;
if (greedily) {
for (int last_op_with_side_effect = -1, node_index = 0;
node_index < info->num_execution_nodes(); ++node_index) {
const auto& node = info->node(node_index);
if (node.might_have_side_effect) {
if (last_op_with_side_effect != -1) {
my_control_edges.emplace_back(last_op_with_side_effect, node_index);
}
last_op_with_side_effect = node_index;
}
}
}
}
PartitionGraphIntoIndependentNodeSubsetsImpl(
info, nodes_to_partition, node_subsets, greedily, *control_edges)
.Partition();
return kTfLiteOk;
}
} | #include "tensorflow/lite/graph_info.h"
#include <stddef.h>
#include <algorithm>
#include <memory>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
using ::testing::Eq;
using ::testing::ExplainMatchResult;
using ::testing::Pointwise;
using NodeSubsets = std::vector<NodeSubset>;
TfLiteIntArray* ConvertVector(const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
}
class SimpleTestGraph : public GraphInfo {
public:
SimpleTestGraph(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<std::tuple<std::vector<int>, std::vector<int>, bool>>&
nodes,
int node_index_offset = 0)
: inputs_(inputs),
outputs_(outputs),
node_index_offset_(node_index_offset) {
NeedsTensors(inputs_);
NeedsTensors(outputs_);
for (int i = 0; i < node_index_offset; ++i) AddNode({}, {}, false);
for (const auto& [inputs, outputs, might_have_side_effect] : nodes) {
AddNode(inputs, outputs, might_have_side_effect);
}
registrations_.resize(nodes.size());
}
~SimpleTestGraph() override {
for (auto& node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
}
}
size_t num_total_nodes() const override { return nodes_.size(); }
size_t num_execution_nodes() const override {
return nodes_.size() - node_index_offset_;
}
const TfLiteNode& node(size_t index) const override {
return nodes_[index + node_index_offset_];
}
size_t node_index(size_t index) const override {
return index + node_index_offset_;
}
size_t num_tensors() const override { return tensors_.size(); }
const TfLiteRegistration& registration(size_t index) const override {
return registrations_[index + node_index_offset_];
}
TfLiteTensor* tensor(size_t index) override { return &tensors_[index]; }
TfLiteTensor* tensors() override { return tensors_.data(); }
const std::vector<int>& inputs() const override { return inputs_; }
const std::vector<int>& outputs() const override { return outputs_; }
const std::vector<int>& variables() const override { return variables_; }
private:
void AddNode(const std::vector<int>& inputs, const std::vector<int>& outputs,
bool might_have_side_effect) {
NeedsTensors(inputs);
NeedsTensors(outputs);
nodes_.push_back(TfLiteNode());
TfLiteNode& node = nodes_.back();
node.inputs = ConvertVector(inputs);
node.outputs = ConvertVector(outputs);
node.might_have_side_effect = might_have_side_effect;
}
void NeedsTensors(const std::vector<int>& tensors) {
for (const int tensor : tensors)
tensors_.resize(std::max<int>(tensor + 1, tensors_.size()));
}
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
std::vector<TfLiteRegistration> registrations_;
size_t node_index_offset_;
};
void PartitionGraphOrDie(const SimpleTestGraph& graph,
const std::vector<int>& nodes_to_partition,
NodeSubsets* subgraphs, const bool greedily,
const ControlEdges* control_edges) {
TfLiteIntArray* nodes_to_partition_int_array =
ConvertVector(nodes_to_partition);
ASSERT_EQ(PartitionGraphIntoIndependentNodeSubsets(
&graph, nodes_to_partition_int_array, subgraphs, greedily,
control_edges),
kTfLiteOk);
TfLiteIntArrayFree(nodes_to_partition_int_array);
}
NodeSubsets PartitionGraph(const SimpleTestGraph& graph,
const std::vector<int>& nodes_to_partition,
const bool greedily = true,
const ControlEdges* control_edges = nullptr) {
NodeSubsets subgraphs;
PartitionGraphOrDie(graph, nodes_to_partition, &subgraphs, greedily,
control_edges);
return subgraphs;
}
MATCHER(EqNodeSubset, "") {
const NodeSubset& a = std::get<0>(arg);
const NodeSubset& b = std::get<1>(arg);
if (a.type != b.type) {
*result_listener << "mismatched .type ";
return ExplainMatchResult(Eq(b.type), a.type, result_listener);
}
if (a.nodes != b.nodes) {
*result_listener << "mismatched .nodes ";
return ExplainMatchResult(Pointwise(Eq(), b.nodes), a.nodes,
result_listener);
}
if (a.input_tensors != b.input_tensors) {
*result_listener << "mismatched .input_tensors ";
return ExplainMatchResult(Pointwise(Eq(), b.input_tensors), a.input_tensors,
result_listener);
}
if (a.output_tensors != b.output_tensors) {
*result_listener << "mismatched .output_tensors ";
return ExplainMatchResult(Pointwise(Eq(), b.output_tensors),
a.output_tensors, result_listener);
}
return true;
}
TEST(PartitionTest, Nodes0PartitionNodes0) {
EXPECT_THAT(PartitionGraph({
{},
{},
{},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({})));
}
TEST(PartitionTest, Nodes0PartitionNodes0Tensors1) {
EXPECT_THAT(PartitionGraph({
{0},
{0},
{},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({})));
}
TEST(PartitionTest, Nodes1PartitionNodes0) {
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{0},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes0_WithOffset) {
constexpr int node_index_offset = 17;
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
node_index_offset,
},
{}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{node_index_offset},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes0Inputs0) {
EXPECT_THAT(
PartitionGraph({
{},
{0},
{
{{}, {0}, false},
},
},
{0}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{},
{0},
},
})));
}
TEST(PartitionTest, Nodes1PartitionNodes1) {
EXPECT_THAT(
PartitionGraph({
{0},
{1},
{
{{0}, {1}, false},
},
},
{0}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes1) {
EXPECT_THAT(
PartitionGraph({
{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
},
{1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfPartition,
{1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes1_WithOffset) {
constexpr int node_index_offset = 17;
EXPECT_THAT(
PartitionGraph({{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
node_index_offset},
{node_index_offset + 1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfNonPartition,
{node_index_offset + 0},
{0},
{1},
},
{
NodeSubset::kTfPartition,
{node_index_offset + 1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes2PartitionNodes2) {
EXPECT_THAT(
PartitionGraph({
{0},
{2},
{
{{0}, {1}, false},
{{1}, {2}, false},
},
},
{0, 1}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{2},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2) {
EXPECT_THAT(
PartitionGraph({
{0},
{3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1, 2}, {3}, false},
},
},
{0, 2}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
{
NodeSubset::kTfPartition,
{2},
{1, 2},
{3},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2Greedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{2, 3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1}, {3}, false},
},
},
{0, 2}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 2},
{0},
{1, 3},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
})));
}
TEST(PartitionTest, Nodes3PartitionNodes2ClusteredNonGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{2, 3},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{1}, {3}, false},
},
},
{0, 2},
false),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0},
{0},
{1},
},
{
NodeSubset::kTfNonPartition,
{1},
{1},
{2},
},
{
NodeSubset::kTfPartition,
{2},
{1},
{3},
},
})));
}
TEST(PartitionTest, Nodes4PartitionNodes3_WithControlDependency) {
EXPECT_THAT(
PartitionGraph({
{0},
{4},
{
{{0}, {1}, true},
{{1}, {2}, true},
{{2}, {3}, false},
{{1, 3}, {}, true},
{{1}, {4}, true},
},
},
{0, 1, 3, 4}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{1, 2},
},
{
NodeSubset::kTfNonPartition,
{2},
{2},
{3},
},
{
NodeSubset::kTfPartition,
{3, 4},
{1, 3},
{4},
},
})));
}
TEST(PartitionTest, Nodes4PartitionNodes3_WithExternalControlDependency) {
const ControlEdges control_edges = {
{0, 1},
{1, 3},
{3, 4},
};
EXPECT_THAT(
PartitionGraph({
{0},
{4},
{
{{0}, {1}, false},
{{1}, {2}, false},
{{2}, {3}, false},
{{1, 3}, {}, false},
{{1}, {4}, false},
},
},
{0, 1, 3, 4},
true, &control_edges),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{1, 2},
},
{
NodeSubset::kTfNonPartition,
{2},
{2},
{3},
},
{
NodeSubset::kTfPartition,
{3, 4},
{1, 3},
{4},
},
})));
}
TEST(PartitionTest, ComplexGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{4, 7},
{
{{0}, {1}, false},
{{1}, {2, 5}, false},
{{2}, {3}, false},
{{3}, {4}, false},
{{5}, {6}, false},
{{6}, {7}, false},
},
},
{0, 1, 4, 5}),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1, 4, 5},
{0},
{2, 7},
},
{
NodeSubset::kTfNonPartition,
{2, 3},
{2},
{4},
},
})));
}
TEST(PartitionTest, ComplexNonGreedily) {
EXPECT_THAT(
PartitionGraph({
{0},
{4, 7},
{
{{0}, {1}, false},
{{1}, {2, 5}, false},
{{2}, {3}, false},
{{3}, {4}, false},
{{5}, {6}, false},
{{6}, {7}, false},
},
},
{0, 1, 4, 5},
false),
Pointwise(EqNodeSubset(), NodeSubsets({
{
NodeSubset::kTfPartition,
{0, 1},
{0},
{2, 5},
},
{
NodeSubset::kTfNonPartition,
{2, 3},
{2},
{4},
},
{
NodeSubset::kTfPartition,
{4, 5},
{5},
{7},
},
})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/graph_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/graph_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a8c62eca-f2f6-4cb4-a2bd-14bcec19ffd3 | cpp | tensorflow/tensorflow | mlir_bridge_pass_util | tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc | tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/tf2xla/tf2xla_defs.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/status.h"
namespace tensorflow {
using ::mlir::failure;
using ::mlir::LogicalResult;
using ::mlir::success;
namespace {
constexpr absl::string_view kPartitionedCall = "TPUPartitionedCall";
LogicalResult HasAttr(
const Graph& graph, const FunctionLibraryDefinition* function_library,
const std::function<bool(const Graph& graph)>& predicate) {
if (predicate(graph)) {
return success();
}
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
if (!function_library) return failure();
for (const std::string& func_name :
function_library->ReachableDefinitions(graph_def).ListFunctionNames()) {
const FunctionDef* func_def = function_library->Find(func_name);
std::unique_ptr<FunctionBody> func_body;
absl::Status status = FunctionDefToBodyHelper(
*func_def, AttrSlice(&func_def->attr()), function_library, &func_body);
if (!status.ok()) {
LOG(ERROR) << "Failed to parse " << func_name << ": "
<< absl::StatusMessageAsCStr(status);
return failure();
}
if (predicate(*func_body->graph)) {
return success();
}
}
return failure();
}
bool HasPsWithResourceVariable(const Graph& graph) {
const std::string jobType = "ps";
const std::string nodeType = "_Arg";
const std::string attrKey = "T";
for (const Node* node : graph.nodes()) {
if (node->type_string() == nodeType) {
auto device_name = node->assigned_device_name();
DeviceNameUtils::ParsedName device;
if (DeviceNameUtils::ParseFullName(device_name, &device) &&
device.has_job && device.job == jobType) {
for (const auto& attr : node->attrs()) {
auto attr_key = attr.first;
auto attr_value = attr.second;
if (attr_key == attrKey &&
attr_value.value_case() == AttrValue::kType &&
attr_value.type() == DT_RESOURCE) {
return true;
break;
}
}
}
}
}
return false;
}
bool IsNonReplicatedGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
auto predicate = [](const Graph& graph) {
const std::string kStatefulPartitionedCallOp = "StatefulPartitionedCall";
for (const Node* node : graph.nodes()) {
auto node_op = node->type_string();
if (node_op == kStatefulPartitionedCallOp) {
auto attr = node->attrs().FindByString(std::string(kMustCompileAttr));
if (attr != nullptr && attr->b() == true) {
return true;
}
}
}
return false;
};
return HasAttr(graph, function_library, predicate).succeeded();
}
bool IsReplicatedGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
auto predicate = [](const Graph& graph) {
for (const Node* node : graph.nodes()) {
if (node->attrs().FindByString(std::string(kTpuReplicateAttr))) {
return true;
}
}
return false;
};
return HasAttr(graph, function_library, predicate).succeeded();
}
bool IsReplicatedGraph(mlir::ModuleOp module) {
auto walk_result = module.walk([&](mlir::Operation* op) {
const llvm::StringRef tpu_replicate_attr_name(kTpuReplicateAttr.data(),
kTpuReplicateAttr.size());
auto replicate_attr =
op->getAttrOfType<mlir::StringAttr>(tpu_replicate_attr_name);
if (replicate_attr) return mlir::WalkResult::interrupt();
return mlir::WalkResult::advance();
});
return walk_result.wasInterrupted();
}
bool DoesGraphContainTPUPartitionedCall(const Graph& graph) {
for (const Node* node : graph.nodes()) {
if (node->type_string() == kPartitionedCall) return true;
}
return false;
}
bool DoReachableFuncsContainTPUPartitionedCall(
const GraphDef& graph_def, const FunctionLibraryDefinition& flib_def) {
for (const std::string& func_name :
flib_def.ReachableDefinitions(graph_def).ListFunctionNames()) {
const FunctionDef* func_def = flib_def.Find(func_name);
std::unique_ptr<FunctionBody> func_body;
if (!FunctionDefToBodyHelper(*func_def, AttrSlice(&func_def->attr()),
&flib_def, &func_body)
.ok())
return false;
if (DoesGraphContainTPUPartitionedCall(*func_body->graph)) return true;
}
return false;
}
bool AreFunctionsFromFlibDefInference(
const FunctionLibraryDefinition& flib_def) {
for (const std::string& func_name : flib_def.ListFunctionNames()) {
const FunctionDef* func_def = flib_def.Find(func_name);
for (const NodeDef& node_def : func_def->node_def()) {
if (node_def.op() == kPartitionedCall) return true;
}
}
return false;
}
}
bool IsSupportedByNonReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library) {
return IsNonReplicatedGraph(graph, function_library) &&
HasPsWithResourceVariable(graph);
}
bool IsSupportedByReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library) {
return IsReplicatedGraph(graph, function_library);
}
bool IsSupportedByReplicatedBridge(mlir::ModuleOp module) {
return IsReplicatedGraph(module);
}
bool HasTPUPartitionedCallOpInModule(mlir::ModuleOp module) {
bool has_tpu_partitioned_call = false;
for (auto func_op : module.getOps<mlir::func::FuncOp>()) {
func_op->walk([&](mlir::TF::TPUPartitionedCallOp op) {
has_tpu_partitioned_call = true;
});
if (has_tpu_partitioned_call) break;
}
return has_tpu_partitioned_call;
}
bool IsInferenceGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
if (DoesGraphContainTPUPartitionedCall(graph)) return true;
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
if (DoReachableFuncsContainTPUPartitionedCall(graph_def, graph.flib_def()))
return true;
if (AreFunctionsFromFlibDefInference(graph.flib_def())) return true;
if (function_library == nullptr) return false;
if (DoReachableFuncsContainTPUPartitionedCall(graph_def, *function_library))
return true;
if (AreFunctionsFromFlibDefInference(*function_library)) return true;
return false;
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h"
#include <vector>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/tpu_functional_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/tf2xla/tf2xla_defs.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/enable_tf2_utils.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
FunctionDef PassThroughResource() {
return FunctionDefHelper::Define(
"PassThroughResource",
{"in: resource"},
{"out: resource"},
{},
{{{"out"}, "Identity", {"in"}, {{"T", DataType::DT_RESOURCE}}}});
}
TEST(IsSupportedByNonReplicatedBridge, NonReplicatedGraph) {
const FunctionDef& fd = PassThroughResource();
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kEagerRuntime);
tensorflow::set_tf2_execution(true);
ConfigProto config = ConfigProto();
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::_Arg(root.WithOpName("A"), DT_RESOURCE, 0);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* call;
NameAttrList f_name_attr;
f_name_attr.set_name(fd.signature().name());
TF_ASSERT_OK(
NodeBuilder("B", "StatefulPartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_RESOURCE})
.Attr("Tout", {DT_RESOURCE})
.Attr("f", f_name_attr)
.Finalize(root.graph(), &call));
call->AddAttr(std::string(kMustCompileAttr), true);
TF_ASSERT_OK(root.ToGraph(&graph));
for (Node* node : graph.nodes()) {
node->set_assigned_device_name("/job:ps/replica:0/task:0/device:GPU:0");
}
EXPECT_TRUE(
IsSupportedByNonReplicatedBridge(graph, nullptr));
}
TEST(IsSupportedByReplicatedBridge, ReplicatedGraph) {
const FunctionDef& fd = test::function::XTimesTwo();
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kEagerRuntime);
tensorflow::set_tf2_execution(true);
ConfigProto config = ConfigProto();
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* call;
NameAttrList f_name_attr;
f_name_attr.set_name(fd.signature().name());
TF_ASSERT_OK(
NodeBuilder("B", "StatefulPartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_FLOAT})
.Attr("Tout", {DT_FLOAT})
.Attr("f", f_name_attr)
.Finalize(root.graph(), &call));
call->AddAttr(std::string(kTpuReplicateAttr), "cluster");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(
IsSupportedByReplicatedBridge(graph, nullptr));
}
TEST(IsSupportedByReplicatedBridge, ReplicatedModule) {
const char* const code = R"mlir(
func.func @entry_func_1(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.Identity"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_TRUE(IsSupportedByReplicatedBridge(*module));
}
TEST(HasTPUPartitionedCallOpInModule, HasTPUPartitionedCallModule) {
const char* const code = R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() {
%outputs_0 = "tf.TPUOrdinalSelector"() {device = ""} : () -> tensor<?xi32>
"tf.TPUPartitionedCall"(%outputs_0) {f = @reachable_func} : (tensor<?xi32>) -> ()
func.return
}
func.func @reachable_func() {
func.return
}
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_TRUE(HasTPUPartitionedCallOpInModule(*module));
}
TEST(HasTPUPartitionedCallOpInModule, HasNotTPUPartitionedCallModule) {
const char* const code = R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() {
"tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @reachable_func} : () -> ()
func.return
}
func.func @reachable_func() {
func.return
}
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_FALSE(HasTPUPartitionedCallOpInModule(*module));
}
TEST(IsInferenceGraph, GraphContrainsTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
ops::TPUPartitionedCall f(root.WithOpName("f"), {x}, 0,
{DT_FLOAT}, f_name_attr);
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(IsInferenceGraph(graph, nullptr));
}
TEST(IsInferenceGraph, GraphDoesNotContrainTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_FALSE(IsInferenceGraph(graph, nullptr));
}
TEST(IsInferenceGraph, FlibDefIsNotNullptrAndContainsTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
{{"tpu_op"}, "TPUPartitionedCall", {}, {{"Tout", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(IsInferenceGraph(graph, &flib_def));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2c1d0483-3721-4fbd-81ae-17a6f8237a77 | cpp | google/tensorstore | subprocess | tensorstore/internal/os/subprocess.h | tensorstore/internal/os/subprocess_test.cc | #ifndef TENSORSTORE_INTERNAL_SUBPROCESS_H_
#define TENSORSTORE_INTERNAL_SUBPROCESS_H_
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
class Subprocess;
struct SubprocessOptions {
std::string executable;
std::vector<std::string> args;
std::optional<absl::flat_hash_map<std::string, std::string>> env;
struct Inherit {};
struct Ignore {};
struct Redirect {
std::string filename;
};
std::variant<Ignore, Redirect> stdin_action = Ignore{};
std::variant<Inherit, Ignore, Redirect> stdout_action = Inherit{};
std::variant<Inherit, Ignore, Redirect> stderr_action = Inherit{};
};
Result<Subprocess> SpawnSubprocess(const SubprocessOptions& options);
class Subprocess {
public:
Subprocess(const Subprocess&) = default;
Subprocess& operator=(const Subprocess&) = default;
Subprocess(Subprocess&&) = default;
Subprocess& operator=(Subprocess&&) = default;
~Subprocess();
absl::Status Kill(int signal = 9) const;
Result<int> Join(bool block = true) const;
private:
friend Result<Subprocess> SpawnSubprocess(const SubprocessOptions& options);
struct Impl;
Subprocess(std::shared_ptr<Subprocess::Impl> impl) : impl_(std::move(impl)) {}
std::shared_ptr<Subprocess::Impl> impl_;
};
}
}
#endif | #include "tensorstore/internal/os/subprocess.h"
#include <cstdio>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::internal::JoinPath;
using ::tensorstore::internal::SpawnSubprocess;
using ::tensorstore::internal::SubprocessOptions;
static std::string* program_name = nullptr;
const char kSubprocessArg[] = "--is_subprocess";
const char kSleepArg[] = "--sleep";
TEST(SubprocessTest, Join) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 33);
}
TEST(SubprocessTest, Kill) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSleepArg, kSubprocessArg};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
EXPECT_THAT(child->Join(false),
tensorstore::MatchesStatus(absl::StatusCode::kUnavailable));
child->Kill().IgnoreError();
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_NE(exit_code, 33);
}
TEST(SubprocessTest, DontInherit) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
opts.stdout_action = SubprocessOptions::Ignore();
opts.stderr_action = SubprocessOptions::Ignore();
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 33);
}
TEST(SubprocessTest, Redirects) {
::tensorstore::internal_testing::ScopedTemporaryDirectory temp_dir;
std::string out_file = JoinPath(temp_dir.path(), "stdout");
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
opts.env.emplace(::tensorstore::internal::GetEnvironmentMap());
opts.env->insert_or_assign("SUBPROCESS_TEST_ENV", "1");
opts.stdout_action = SubprocessOptions::Redirect{out_file};
opts.stderr_action = SubprocessOptions::Redirect{out_file};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 33);
std::string filedata;
TENSORSTORE_CHECK_OK(riegeli::ReadAll(riegeli::FdReader(out_file), filedata));
EXPECT_THAT(filedata, ::testing::HasSubstr("PASS"));
}
TEST(SubprocessTest, Drop) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {kSubprocessArg};
auto child = SpawnSubprocess(opts);
TENSORSTORE_ASSERT_OK(child.status());
child->Kill().IgnoreError();
}
TEST(SubprocessTest, Env) {
SubprocessOptions opts;
opts.executable = *program_name;
opts.args = {"--env=SUBPROCESS_TEST_ENV"};
opts.env = absl::flat_hash_map<std::string, std::string>({
#ifdef _WIN32
{"PATH", ::tensorstore::internal::GetEnv("PATH").value_or("")},
#endif
{"SUBPROCESS_TEST_ENV", "1"}});
auto child = SpawnSubprocess(opts);
ASSERT_TRUE(child.ok());
int exit_code;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(exit_code, child->Join());
EXPECT_EQ(exit_code, 41);
}
}
int main(int argc, char* argv[]) {
program_name = new std::string(argv[0]);
ABSL_LOG(INFO) << *program_name;
for (int i = 1; i < argc; i++) {
std::string_view argv_i(argv[i]);
if (argv_i == kSubprocessArg) {
printf("PASS\n");
return 33;
}
if (argv_i == kSleepArg) {
absl::SleepFor(absl::Seconds(1));
}
if (absl::StartsWith(argv_i, "--env=")) {
auto env_str = argv_i.substr(6);
if (env_str.empty()) {
return 40;
}
if (tensorstore::internal::GetEnv(env_str.data()).has_value()) {
return 41;
}
return 42;
}
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/subprocess.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/subprocess_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9973afe3-dec3-41d3-a3be-739f6757b498 | cpp | google/tensorstore | uint64_sharded_decoder | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include <stddef.h>
#include <stdint.h>
#include <optional>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/cord_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
Result<std::vector<MinishardIndexEntry>> DecodeMinishardIndex(
const absl::Cord& input, ShardingSpec::DataEncoding encoding) {
absl::Cord decoded_input;
if (encoding != ShardingSpec::DataEncoding::raw) {
TENSORSTORE_ASSIGN_OR_RETURN(decoded_input, DecodeData(input, encoding));
} else {
decoded_input = input;
}
if ((decoded_input.size() % 24) != 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid minishard index length: ", decoded_input.size()));
}
std::vector<MinishardIndexEntry> result(decoded_input.size() / 24);
static_assert(sizeof(MinishardIndexEntry) == 24);
auto decoded_flat = decoded_input.Flatten();
ChunkId chunk_id{0};
uint64_t byte_offset = 0;
for (size_t i = 0; i < result.size(); ++i) {
auto& entry = result[i];
chunk_id.value += absl::little_endian::Load64(decoded_flat.data() + i * 8);
entry.chunk_id = chunk_id;
byte_offset += absl::little_endian::Load64(decoded_flat.data() + i * 8 +
8 * result.size());
entry.byte_range.inclusive_min = byte_offset;
byte_offset += absl::little_endian::Load64(decoded_flat.data() + i * 8 +
16 * result.size());
entry.byte_range.exclusive_max = byte_offset;
if (!entry.byte_range.SatisfiesInvariants()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid byte range in minishard index for chunk ",
entry.chunk_id.value, ": ", entry.byte_range));
}
}
absl::c_sort(result,
[](const MinishardIndexEntry& a, const MinishardIndexEntry& b) {
return a.chunk_id.value < b.chunk_id.value;
});
return result;
}
std::optional<ByteRange> FindChunkInMinishard(
span<const MinishardIndexEntry> minishard_index, ChunkId chunk_id) {
auto it =
absl::c_lower_bound(minishard_index, chunk_id,
[](const MinishardIndexEntry& e, ChunkId chunk_id) {
return e.chunk_id.value < chunk_id.value;
});
if (it == minishard_index.end() || it->chunk_id.value != chunk_id.value) {
return std::nullopt;
}
return it->byte_range;
}
Result<absl::Cord> DecodeData(const absl::Cord& input,
ShardingSpec::DataEncoding encoding) {
if (encoding == ShardingSpec::DataEncoding::raw) {
return input;
}
absl::Cord uncompressed;
TENSORSTORE_RETURN_IF_ERROR(
zlib::Decode(input, &uncompressed, true));
return uncompressed;
}
Result<ByteRange> DecodeShardIndexEntry(std::string_view input) {
if (input.size() != 16) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Expected 16 bytes, but received: ", input.size(), " bytes"));
}
ByteRange r;
r.inclusive_min = absl::little_endian::Load64(input.data());
r.exclusive_max = absl::little_endian::Load64(input.data() + 8);
if (!r.SatisfiesInvariants()) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Shard index specified invalid byte range: ", r));
}
return r;
}
Result<std::vector<MinishardIndexEntry>>
DecodeMinishardIndexAndAdjustByteRanges(const absl::Cord& encoded,
const ShardingSpec& sharding_spec) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index,
DecodeMinishardIndex(encoded, sharding_spec.minishard_index_encoding));
for (auto& entry : minishard_index) {
auto result = GetAbsoluteShardByteRange(entry.byte_range, sharding_spec);
if (!result.ok()) {
return MaybeAnnotateStatus(
result.status(),
tensorstore::StrCat("Error decoding minishard index entry for chunk ",
entry.chunk_id.value));
}
entry.byte_range = std::move(result).value();
}
return minishard_index;
}
namespace {
absl::Status SplitMinishard(const ShardingSpec& sharding_spec,
const absl::Cord& shard_data, uint64_t minishard,
span<const MinishardIndexEntry> minishard_index,
std::vector<EncodedChunk>& chunks) {
std::optional<ChunkId> prev_chunk_id;
for (const auto& existing_entry : minishard_index) {
if (prev_chunk_id &&
existing_entry.chunk_id.value == prev_chunk_id->value) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Chunk ", existing_entry.chunk_id.value,
" occurs more than once in the minishard index "
"for minishard ",
minishard));
}
prev_chunk_id = existing_entry.chunk_id;
const auto GetChunkByteRange = [&]() -> Result<ByteRange> {
TENSORSTORE_RETURN_IF_ERROR(
OptionalByteRangeRequest(existing_entry.byte_range)
.Validate(shard_data.size()));
return existing_entry.byte_range;
};
TENSORSTORE_ASSIGN_OR_RETURN(
auto chunk_byte_range, GetChunkByteRange(),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Invalid existing byte range for chunk ",
existing_entry.chunk_id.value)));
chunks.push_back(
EncodedChunk{{minishard, existing_entry.chunk_id},
internal::GetSubCord(shard_data, chunk_byte_range)});
}
return absl::OkStatus();
}
}
Result<std::vector<EncodedChunk>> SplitShard(const ShardingSpec& sharding_spec,
const absl::Cord& shard_data) {
std::vector<EncodedChunk> chunks;
if (shard_data.empty()) return chunks;
const uint64_t num_minishards = sharding_spec.num_minishards();
if (shard_data.size() < num_minishards * 16) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Existing shard has size ", shard_data.size(),
", but expected at least: ", num_minishards * 16));
}
std::vector<char> shard_index(16 * num_minishards);
internal::CopyCordToSpan(shard_data, shard_index);
for (uint64_t minishard = 0; minishard < num_minishards; ++minishard) {
const auto GetMinishardIndexByteRange = [&]() -> Result<ByteRange> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index_byte_range,
DecodeShardIndexEntry(
std::string_view(shard_index.data() + 16 * minishard, 16)));
TENSORSTORE_ASSIGN_OR_RETURN(
minishard_index_byte_range,
GetAbsoluteShardByteRange(minishard_index_byte_range, sharding_spec));
TENSORSTORE_RETURN_IF_ERROR(
OptionalByteRangeRequest(minishard_index_byte_range)
.Validate(shard_data.size()));
return minishard_index_byte_range;
};
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_ibr, GetMinishardIndexByteRange(),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Error decoding existing shard index entry for minishard ",
minishard)));
if (minishard_ibr.size() == 0) continue;
TENSORSTORE_ASSIGN_OR_RETURN(
auto minishard_index,
DecodeMinishardIndexAndAdjustByteRanges(
internal::GetSubCord(shard_data, minishard_ibr), sharding_spec),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Error decoding existing minishard index for minishard ",
minishard)));
TENSORSTORE_RETURN_IF_ERROR(SplitMinishard(
sharding_spec, shard_data, minishard, minishard_index, chunks));
}
return chunks;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace zlib = tensorstore::zlib;
using ::tensorstore::MatchesStatus;
using ::tensorstore::neuroglancer_uint64_sharded::DecodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::EncodeMinishardIndex;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
void TestEncodeMinishardRoundTrip(
std::vector<MinishardIndexEntry> minishard_index) {
auto out = EncodeMinishardIndex(minishard_index);
absl::Cord compressed;
zlib::Options options{9, true};
zlib::Encode(out, &compressed, options);
EXPECT_THAT(
DecodeMinishardIndex(out, ShardingSpec::DataEncoding::raw),
::testing::Optional(::testing::ElementsAreArray(minishard_index)));
EXPECT_THAT(
DecodeMinishardIndex(compressed, ShardingSpec::DataEncoding::gzip),
::testing::Optional(::testing::ElementsAreArray(minishard_index)));
}
TEST(DecodeMinishardIndexTest, Empty) {
TestEncodeMinishardRoundTrip({});
}
TEST(DecodeMinishardIndexTest, SingleEntry) {
TestEncodeMinishardRoundTrip({{{0x0123456789abcdef}, {0x11, 0x23}}});
}
TEST(DecodeMinishardIndexTest, MultipleEntries) {
TestEncodeMinishardRoundTrip({
{{1}, {3, 10}},
{{7}, {12, 15}},
});
}
TEST(DecodeMinishardIndexTest, InvalidGzip) {
EXPECT_THAT(
DecodeMinishardIndex(absl::Cord("abc"), ShardingSpec::DataEncoding::gzip),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error decoding zlib-compressed data"));
}
TEST(DecodeMinishardIndexTest, InvalidSizeRaw) {
EXPECT_THAT(
DecodeMinishardIndex(absl::Cord("abc"), ShardingSpec::DataEncoding::raw),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid minishard index length: 3"));
}
TEST(DecodeMinishardIndexTest, InvalidSizeGzip) {
absl::Cord temp;
zlib::Options options{9, true};
zlib::Encode(absl::Cord("abc"), &temp, options);
EXPECT_THAT(DecodeMinishardIndex(temp, ShardingSpec::DataEncoding::gzip),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid minishard index length: 3"));
}
TEST(DecodeMinishardIndexTest, InvalidInterval) {
std::vector<MinishardIndexEntry> minishard_index{{{3}, {1, 0}}};
auto encoded = EncodeMinishardIndex(minishard_index);
EXPECT_THAT(
DecodeMinishardIndex(encoded, ShardingSpec::DataEncoding::raw),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Invalid byte range in minishard index for chunk 3: \\[1, 0\\)"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e59be81d-78e8-4178-b423-a8826f3c6fff | cpp | tensorflow/tensorflow | scatter_simplifier | third_party/xla/xla/service/scatter_simplifier.cc | third_party/xla/xla/service/scatter_simplifier_test.cc | #include "xla/service/scatter_simplifier.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/permutation_util.h"
#include "xla/service/call_inliner.h"
#include "xla/service/gather_scatter_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> FlattenAndTransposeUpdates(
HloInstruction* updates, absl::Span<const int64_t> update_window_dims,
absl::Span<const int64_t> inserted_window_dims,
int64_t scatter_indices_size) {
int64_t updates_rank = updates->shape().rank();
std::vector<int64_t> permutation;
const int64_t num_scatter_dims = updates_rank - update_window_dims.size();
permutation.reserve(updates_rank);
for (int i = 0; i < updates_rank; ++i) {
if (!absl::c_linear_search(update_window_dims, i)) {
permutation.push_back(i);
}
}
absl::c_copy(update_window_dims, std::back_inserter(permutation));
TF_ASSIGN_OR_RETURN(updates, MaybeTranspose(updates, permutation));
if (num_scatter_dims > 1) {
TF_ASSIGN_OR_RETURN(updates, CollapseFirstNDims(updates, num_scatter_dims));
} else if (num_scatter_dims == 0) {
TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, {0}));
}
if (!inserted_window_dims.empty()) {
std::vector<int64_t> new_dims;
new_dims.reserve(inserted_window_dims.size());
for (int64_t i : inserted_window_dims) {
new_dims.push_back(i + 1);
}
TF_ASSIGN_OR_RETURN(updates, InsertDegenerateDims(updates, new_dims));
}
return updates;
}
std::vector<int64_t> MakeUpdatePermutation(
const std::vector<int64_t>& operand_permutation) {
std::vector<int64_t> update_permutation;
update_permutation.reserve(operand_permutation.size() + 1);
update_permutation.push_back(0);
for (auto& dim : operand_permutation) {
update_permutation.push_back(dim + 1);
}
return update_permutation;
}
absl::StatusOr<std::vector<HloInstruction*>> TransformScatterUpdates(
HloScatterInstruction* scatter,
const std::vector<int64_t>& update_permutation,
int64_t scatter_indices_size) {
std::vector<HloInstruction*> scatter_updates;
const auto& attrs = scatter->scatter_dimension_numbers();
scatter_updates.reserve(scatter->scatter_updates().size());
for (auto* update : scatter->scatter_updates()) {
TF_ASSIGN_OR_RETURN(
scatter_updates.emplace_back(),
FlattenAndTransposeUpdates(update, attrs.update_window_dims(),
attrs.inserted_window_dims(),
scatter_indices_size));
}
return MaybeTranspose(scatter_updates, update_permutation);
}
ScatterDimensionNumbers MakeScatterDimensionNumbers(
int64_t operand_rank, int64_t scatter_indices_vector_size) {
ScatterDimensionNumbers dim_numbers;
dim_numbers.mutable_update_window_dims()->Reserve(
static_cast<int>(operand_rank));
for (int i = 0; i < operand_rank; ++i) {
dim_numbers.add_update_window_dims(1 + i);
}
dim_numbers.mutable_scatter_dims_to_operand_dims()->Reserve(
static_cast<int>(scatter_indices_vector_size));
for (int i = 0; i < scatter_indices_vector_size; ++i) {
dim_numbers.add_scatter_dims_to_operand_dims(i);
}
dim_numbers.set_index_vector_dim(1);
return dim_numbers;
}
}
absl::StatusOr<HloInstruction*> ScatterSimplifier::ExpandInstruction(
HloInstruction* inst) {
auto* scatter = Cast<HloScatterInstruction>(inst);
if (scatter->called_computations().size() != 1) {
return InvalidArgumentStrCat(
"Expected scatter->called_computations() to have exactly one element, "
"got ",
scatter->called_computations().size());
}
HloComputation* called_computation = scatter->called_computations().front();
const auto& attrs = scatter->scatter_dimension_numbers();
const int operand_rank =
attrs.update_window_dims().size() + attrs.inserted_window_dims().size();
if (operand_rank == 0) {
absl::InlinedVector<HloInstruction*, 2> scatter_operands_and_updates;
scatter_operands_and_updates.reserve(2 * scatter->operand_count());
absl::c_copy(scatter->scatter_operands(),
std::back_inserter(scatter_operands_and_updates));
absl::c_copy(scatter->scatter_updates(),
std::back_inserter(scatter_operands_and_updates));
auto* call_op = scatter->AddInstruction(HloInstruction::CreateCall(
scatter->shape(), scatter_operands_and_updates, called_computation));
TF_RETURN_IF_ERROR(scatter->ReplaceAllUsesWith(call_op));
TF_ASSIGN_OR_RETURN(auto map, CallInliner::Inline(call_op));
return map[call_op];
}
auto [operand_permutation, operand_permutation_inverse] =
MakeOperandStartIndexPermutations(attrs.scatter_dims_to_operand_dims(),
operand_rank);
auto update_permutation = MakeUpdatePermutation(operand_permutation);
TF_ASSIGN_OR_RETURN(auto* scatter_indices,
TransformStartIndices(scatter->scatter_indices(),
attrs.index_vector_dim()));
TF_ASSIGN_OR_RETURN(
auto scatter_updates,
TransformScatterUpdates(scatter, update_permutation,
scatter_indices->shape().dimensions(0)));
TF_ASSIGN_OR_RETURN(
auto scatter_operands,
MaybeTranspose(scatter->scatter_operands(), operand_permutation));
auto dim_numbers = MakeScatterDimensionNumbers(
operand_rank, attrs.scatter_dims_to_operand_dims().size());
Shape output_shape;
if (scatter_operands.size() == 1) {
output_shape = scatter_operands.front()->shape();
} else {
std::vector<Shape> shapes;
shapes.reserve(scatter_operands.size());
for (auto* operand : scatter_operands) {
shapes.push_back(operand->shape());
}
output_shape = ShapeUtil::MakeTupleShape(shapes);
}
auto* result = scatter->AddInstruction(HloInstruction::CreateScatter(
output_shape, scatter_operands, scatter_indices, scatter_updates,
called_computation, dim_numbers,
scatter->indices_are_sorted(), scatter->unique_indices()));
if (IsIdentityPermutation(operand_permutation)) {
return result;
}
if (scatter->scatter_operands().size() == 1) {
return MaybeTranspose(result, operand_permutation_inverse);
}
std::vector<HloInstruction*> result_items;
result_items.reserve(scatter->scatter_operands().size());
for (int i = 0; i < scatter->scatter_operands().size(); ++i) {
TF_ASSIGN_OR_RETURN(result_items.emplace_back(),
MakeGetTupleElementHlo(result, i));
TF_ASSIGN_OR_RETURN(
result_items.back(),
MaybeTranspose(result_items.back(), operand_permutation_inverse));
}
return MaybeMakeTuple(result_items);
}
bool ScatterSimplifier::IsSimplifiedScatter(
const HloScatterInstruction* scatter) {
const auto& dims = scatter->scatter_dimension_numbers();
auto operand_rank = scatter->scatter_operands().front()->shape().rank();
if (operand_rank == 0) return false;
bool nonstandard_index_vector_dim =
dims.index_vector_dim() != scatter->scatter_indices()->shape().rank() - 1;
int64_t num_scatter_dims =
scatter->scatter_updates().front()->shape().rank() -
dims.update_window_dims().size();
bool scatter_indices_reordered =
!IsIdentityPermutation(dims.scatter_dims_to_operand_dims());
bool scatter_dim_not_first =
absl::c_linear_search(dims.update_window_dims(), 0);
bool update_window_dims_sorted = absl::c_is_sorted(dims.update_window_dims());
return !(nonstandard_index_vector_dim || num_scatter_dims > 1 ||
scatter_indices_reordered || scatter_dim_not_first ||
!update_window_dims_sorted || !dims.inserted_window_dims().empty());
}
bool ScatterSimplifier::InstructionMatchesPattern(HloInstruction* inst) {
auto* scatter = DynCast<HloScatterInstruction>(inst);
return scatter && !IsSimplifiedScatter(scatter);
}
} | #include "xla/service/scatter_simplifier.h"
#include <optional>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ScatterSimplifierTest : public HloTestBase {};
TEST_F(ScatterSimplifierTest, InsertsIndexVectorAndWindowDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
p2 = f32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = f32[3,3] parameter(0)
operand1 = f32[3,3] parameter(1)
indices = s32[2] parameter(2)
update0 = f32[2,3] parameter(3)
update1 = f32[2,3] parameter(4)
ROOT scatter = (f32[3,3], f32[3,3]) scatter(operand0, operand1, indices,
update0, update1),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[SCATTER_DIMS_WITH_VECTOR:.*]] = s32[2,1]{1,0} reshape(%indices)
CHECK: %[[RESHAPED_UPDATES0:.*]] = f32[2,1,3]{2,1,0} reshape(%update0)
CHECK: %[[RESHAPED_UPDATES1:.*]] = f32[2,1,3]{2,1,0} reshape(%update1)
CHECK: ROOT %scatter = (f32[3,3]{1,0}, f32[3,3]{1,0}) scatter(
CHECK-SAME: %operand0, %operand1, %[[SCATTER_DIMS_WITH_VECTOR]],
CHECK-SAME: %[[RESHAPED_UPDATES0]], %[[RESHAPED_UPDATES1]]),
CHECK-SAME: update_window_dims={1,2},
CHECK-SAME: inserted_window_dims={},
CHECK-SAME: scatter_dims_to_operand_dims={0},
CHECK-SAME: index_vector_dim=1,
CHECK-SAME: to_apply=%scatter_computation
)");
}
TEST_F(ScatterSimplifierTest, CollapsesScatterDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,1,2] parameter(1)
update = f32[2,1,1,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={2, 3},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=2
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[RESHAPED_INDICES:.*]] = s32[2,2]{1,0} reshape(%indices)
CHECK: %[[RESHAPED_UPDATES:.*]] = f32[2,1,3]{2,1,0} reshape(%update)
CHECK: scatter(
CHECK-SAME: %[[RESHAPED_INDICES]]
CHECK-SAME: %[[RESHAPED_UPDATES]]
)");
}
TEST_F(ScatterSimplifierTest, NoOpForSimpleScatter) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
update = f32[2,1,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), std::nullopt);
}
TEST_F(ScatterSimplifierTest, MovesIndexVectorDim) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,3] parameter(0)
indices = s32[2,1] parameter(1)
update = f32[1,3,3] parameter(2)
ROOT scatter = f32[3,3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1, 2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[TRANSPOSED_INDICES:.*]] = s32[1,2]{1,0}
CHECK-SAME: transpose(%indices), dimensions={1,0}
CHECK: scatter(%operand, %[[TRANSPOSED_INDICES]], %update),
CHECK-SAME: index_vector_dim=1
)");
}
TEST_F(ScatterSimplifierTest, TransformsUpdatesAndOperandUsingScatterDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,4,5] parameter(0)
indices = s32[2,2] parameter(1)
update = f32[2,1,1,3] parameter(2)
ROOT scatter = f32[3,4,5] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={1, 2, 3},
inserted_window_dims={},
scatter_dims_to_operand_dims={2,0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[T_OPERAND:.*]] = f32[5,3,4]{2,1,0} transpose(%operand),
CHECK-SAME: dimensions={2,0,1}
CHECK: %[[T_UPDATES:.*]] = f32[2,3,1,1]{3,2,1,0} transpose(%update),
CHECK-SAME: dimensions={0,3,1,2}
CHECK: %[[SCATTER:.*]] = {{.*}} scatter(
CHECK-SAME: %[[T_OPERAND]], %indices, %[[T_UPDATES]])
CHECK-SAME: scatter_dims_to_operand_dims={0,1},
CHECK: ROOT %{{.*}} = f32[3,4,5]
CHECK-SAME: transpose(%[[SCATTER]]), dimensions={1,2,0}
)");
}
TEST_F(ScatterSimplifierTest, MakesScatterDimensionsLeadingInUpdates) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3] parameter(0)
indices = s32[1,1] parameter(1)
update = f32[2,1] parameter(2)
ROOT scatter = f32[3] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={0},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: %[[TRANSPOSED_UPDATES:.*]] = f32[1,2]{1,0}
CHECK-SAME: transpose(%update), dimensions={1,0}
CHECK: scatter(
CHECK-SAME: %[[TRANSPOSED_UPDATES]]
CHECK-SAME: update_window_dims={1},
)");
}
TEST_F(ScatterSimplifierTest, ZeroDimScatterIndices) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[4,4] parameter(0)
indices = s32[2] parameter(1)
update = f32[3,3] parameter(2)
ROOT scatter = f32[4,4]{1,0} scatter(operand, indices, update),
update_window_dims={0,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0,1},
index_vector_dim=0,
to_apply=scatter_computation
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: scatter(
)");
}
TEST_F(ScatterSimplifierTest,
IsSimplifiedScatterReturnsFalseForUnsortedWindowDims) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
%p0 = f32[] parameter(0)
ROOT result = f32[] parameter(1)
}
ENTRY kernel_entry {
operand = f32[3,2] parameter(0)
indices = s32[1,1] parameter(1)
update = f32[1,2,2] parameter(2)
ROOT scatter = f32[3,2] scatter(operand, indices, update),
to_apply=scatter_computation,
update_window_dims={2,1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
auto module = ParseAndReturnUnverifiedModule(kModuleStr).value();
auto scatter = module->entry_computation()->root_instruction();
EXPECT_FALSE(ScatterSimplifier::IsSimplifiedScatter(
Cast<HloScatterInstruction>(scatter)));
}
TEST_F(ScatterSimplifierTest, ScatterIntoScalar) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY kernel_entry {
operand = s32[] parameter(0)
indices = s32[0]{0} parameter(1)
updates = s32[] parameter(2)
ROOT scatter = s32[] scatter(operand, indices, updates),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=scatter_computation
}
)";
auto module = ParseAndReturnUnverifiedModule(kModuleStr).value();
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: ENTRY
CHECK: %[[OPERAND:.*]] = s32[] parameter(0)
CHECK: %[[UPDATES:.*]] = s32[] parameter(2)
CHECK: ROOT %{{.*}} = s32[] add(%[[OPERAND]], %[[UPDATES]])
)");
}
TEST_F(ScatterSimplifierTest, VariadicScatterIntoScalar) {
constexpr absl::string_view kModuleStr = R"(
HloModule scatter_simplifier
scatter_computation {
p0 = f32[] parameter(0)
p1 = bf16[] parameter(1)
p2 = f32[] parameter(2)
p3 = bf16[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = f32[] parameter(0)
operand1 = bf16[] parameter(1)
indices = s32[0]{0} parameter(2)
updates0 = f32[] parameter(3)
updates1 = bf16[] parameter(4)
ROOT scatter = (f32[], bf16[]) scatter(operand0, operand1, indices, updates0, updates1),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=scatter_computation
})";
RunAndFilecheckHloRewrite(kModuleStr, ScatterSimplifier(), R"(
CHECK: ENTRY
CHECK: %[[UPDATES0:.*]] = f32[] parameter(3)
CHECK: %[[UPDATES1:.*]] = bf16[] parameter(4)
CHECK: ROOT %{{.*}} = (f32[], bf16[]) tuple(%[[UPDATES0]], %[[UPDATES1]])
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6e96f8c-48b7-4a54-83c3-09a699070b82 | cpp | tensorflow/tensorflow | identity_op | tensorflow/compiler/tf2xla/kernels/identity_op.cc | tensorflow/core/kernels/identity_op_test.cc | #include "absl/log/check.h"
#include "tensorflow/compiler/tf2xla/kernels/tensor_list_utils.h"
#include "tensorflow/compiler/tf2xla/mlir_xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
class IdentityOp : public XlaOpKernel {
public:
explicit IdentityOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* ctx) override {
for (int i = 0; i < ctx->num_inputs(); ++i) {
if (IsTensorListInput(ctx, i)) {
ctx->SetTensorListOutput(i, ctx->Input(i));
} else {
DCHECK(ctx->input_type(i) != DT_VARIANT);
ctx->op_kernel_context()->set_output(
i, ctx->op_kernel_context()->input(i));
}
}
}
private:
IdentityOp(const IdentityOp&) = delete;
void operator=(const IdentityOp&) = delete;
};
REGISTER_XLA_OP(
Name("Identity").AllowResourceTypes().AllowVariantTypes().CompilationOnly(),
IdentityOp);
REGISTER_XLA_OP(Name("IdentityN")
.AllowResourceTypes()
.AllowVariantTypes()
.CompilationOnly(),
IdentityOp);
REGISTER_XLA_OP(Name("PlaceholderWithDefault"), IdentityOp);
REGISTER_XLA_OP(Name("PreventGradient"), MlirXlaOpKernel);
REGISTER_XLA_OP(Name("StopGradient").AllowVariantTypes(), IdentityOp);
REGISTER_XLA_OP(Name("Snapshot"), IdentityOp);
REGISTER_XLA_OP(Name("_EagerConst"), IdentityOp);
}
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class IdentityOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "Identity")
.Input(FakeInput(input_type))
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IdentityOpTest, Int32Success_6) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, Int32Success_2_3) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_STRING));
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(IdentityOpTest, RefInputError) { TF_ASSERT_OK(Init(DT_INT32_REF)); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/identity_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/identity_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c1927ee1-1ede-4759-a812-191e2251d57c | cpp | tensorflow/tensorflow | dlpack | tensorflow/c/eager/dlpack.cc | tensorflow/c/eager/dlpack_test.cc | #include "tensorflow/c/eager/dlpack.h"
#include <string>
#include "include/dlpack/dlpack.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace {
struct TfDlManagedTensorCtx {
TensorReference reference;
std::vector<int64_t> shape;
std::vector<int64_t> strides;
DLManagedTensor tensor;
explicit TfDlManagedTensorCtx(const TensorReference& ref) : reference(ref) {}
};
const Tensor* GetTensorFromHandle(TFE_TensorHandle* h, TF_Status* status) {
if (h == nullptr) {
status->status = tensorflow::errors::InvalidArgument("Invalid handle");
return nullptr;
}
tensorflow::TensorHandle* handle =
tensorflow::TensorHandleFromInterface(tensorflow::unwrap(h));
if (handle->Type() != TensorHandle::LOCAL) {
status->status = tensorflow::errors::InvalidArgument(
"DLPack doesn't support ", handle->TypeString(), " tensor");
return nullptr;
}
const tensorflow::Tensor* tensor;
status->status = handle->Tensor(&tensor);
if (!status->status.ok()) {
return nullptr;
}
return tensor;
}
void DLManagedTensorDeleter(DLManagedTensor* arg) {
TfDlManagedTensorCtx* owner =
static_cast<TfDlManagedTensorCtx*>(arg->manager_ctx);
owner->reference.Unref();
delete owner;
}
DLDataType GetDlDataType(TF_DataType data_type, TF_Status* status) {
DLDataType dtype;
dtype.lanes = 1;
dtype.bits = TF_DataTypeSize(data_type) * 8;
switch (data_type) {
case TF_DataType::TF_BOOL:
dtype.code = DLDataTypeCode::kDLBool;
break;
case TF_DataType::TF_HALF:
case TF_DataType::TF_FLOAT:
case TF_DataType::TF_DOUBLE:
dtype.code = DLDataTypeCode::kDLFloat;
break;
case TF_DataType::TF_INT8:
case TF_DataType::TF_INT16:
case TF_DataType::TF_INT32:
case TF_DataType::TF_INT64:
dtype.code = DLDataTypeCode::kDLInt;
break;
case TF_DataType::TF_UINT8:
case TF_DataType::TF_UINT16:
case TF_DataType::TF_UINT32:
case TF_DataType::TF_UINT64:
dtype.code = DLDataTypeCode::kDLUInt;
break;
case TF_DataType::TF_BFLOAT16:
dtype.code = DLDataTypeCode::kDLBfloat;
break;
case TF_DataType::TF_COMPLEX64:
case TF_DataType::TF_COMPLEX128:
dtype.code = DLDataTypeCode::kDLComplex;
break;
default:
status->status = tensorflow::errors::InvalidArgument(
DataType_Name(static_cast<DataType>(data_type)),
" is not supported by dlpack");
break;
}
return dtype;
}
DLDevice GetDlContext(TFE_TensorHandle* h, TF_Status* status) {
DLDevice ctx;
const char* device_name =
tensorflow::unwrap(h)->BackingDeviceName(&status->status);
DeviceNameUtils::ParsedName parsed_name;
tensorflow::DeviceNameUtils::ParseFullName(device_name, &parsed_name);
std::string device_type = parsed_name.type;
int device_id = 0;
if (parsed_name.has_id) {
device_id = parsed_name.id;
}
ctx.device_id = device_id;
if (device_type == "CPU") {
ctx.device_type = DLDeviceType::kDLCPU;
} else if (device_type == "GPU") {
#if TENSORFLOW_USE_ROCM
ctx.device_type = DLDeviceType::kDLROCM;
#else
ctx.device_type = DLDeviceType::kDLCUDA;
#endif
} else {
status->status = tensorflow::errors::InvalidArgument(
"Unsupported Device Type for dlpack");
}
return ctx;
}
absl::optional<std::string> DeviceNameFromDlContext(const DLDevice& ctx,
TF_Status* status) {
switch (ctx.device_type) {
case DLDeviceType::kDLCPU:
return "CPU:0";
case DLDeviceType::kDLCUDA:
return absl::StrCat("GPU:", ctx.device_id);
case DLDeviceType::kDLROCM:
return absl::StrCat("GPU:", ctx.device_id);
default:
return absl::nullopt;
}
}
Status TfDataTypeFormDlDataType(const DLDataType& dtype,
TF_DataType* tf_dtype) {
switch (dtype.code) {
case DLDataTypeCode::kDLBool:
if (dtype.bits != 8) {
return tensorflow::errors::InvalidArgument(
"Only DLPack bools of bitwidth 8 are supported, got: ", dtype.bits);
}
*tf_dtype = TF_DataType::TF_BOOL;
return absl::OkStatus();
case DLDataTypeCode::kDLUInt:
switch (dtype.bits) {
case 8:
*tf_dtype = TF_DataType::TF_UINT8;
return absl::OkStatus();
case 16:
*tf_dtype = TF_DataType::TF_UINT16;
return absl::OkStatus();
case 32:
*tf_dtype = TF_DataType::TF_UINT32;
return absl::OkStatus();
case 64:
*tf_dtype = TF_DataType::TF_UINT64;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument("Unsupported UInt bits: ",
dtype.bits);
}
return absl::OkStatus();
case DLDataTypeCode::kDLInt:
switch (dtype.bits) {
case 8:
*tf_dtype = TF_DataType::TF_INT8;
return absl::OkStatus();
case 16:
*tf_dtype = TF_DataType::TF_INT16;
return absl::OkStatus();
case 32:
*tf_dtype = TF_DataType::TF_INT32;
return absl::OkStatus();
case 64:
*tf_dtype = TF_DataType::TF_INT64;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument("Unsupported Int bits: ",
dtype.bits);
}
return absl::OkStatus();
case DLDataTypeCode::kDLFloat:
switch (dtype.bits) {
case 16:
*tf_dtype = TF_DataType::TF_HALF;
return absl::OkStatus();
case 32:
*tf_dtype = TF_DataType::TF_FLOAT;
return absl::OkStatus();
case 64:
*tf_dtype = TF_DataType::TF_DOUBLE;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument("Unsupported Float bits: ",
dtype.bits);
}
break;
case DLDataTypeCode::kDLBfloat:
switch (dtype.bits) {
case 16:
*tf_dtype = TF_DataType::TF_BFLOAT16;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument(
"Unsupported BFloat bits: ", dtype.bits);
}
break;
case DLDataTypeCode::kDLComplex:
switch (dtype.bits) {
case 64:
*tf_dtype = TF_DataType::TF_COMPLEX64;
return absl::OkStatus();
case 128:
*tf_dtype = TF_DataType::TF_COMPLEX128;
return absl::OkStatus();
default:
return tensorflow::errors::InvalidArgument(
"Unsupported Complex bits: ", dtype.bits);
}
break;
default:
return tensorflow::errors::InvalidArgument("Unsupported Type Codes: ",
dtype.code);
}
}
void DeallocatorWrapperFunc(void* data, size_t len, void* dlmt_vptr) {
TFE_CallDLManagedTensorDeleter(dlmt_vptr);
}
bool IsValidStrideCompactRowMajorData(int64_t* shape_arr, int64_t* stride_arr,
int ndim) {
bool valid = true;
int64_t expected_stride = 1;
for (int i = ndim - 1; i >= 0; --i) {
if (shape_arr[i] == 0) return true;
if (shape_arr[i] != 1 && stride_arr[i] != expected_stride) {
valid = false;
}
expected_stride *= shape_arr[i];
}
return valid;
}
}
void TFE_CallDLManagedTensorDeleter(void* dlm_ptr) {
DLManagedTensor* dlMTensor = static_cast<DLManagedTensor*>(dlm_ptr);
if (dlMTensor->deleter != nullptr) {
dlMTensor->deleter(dlMTensor);
}
}
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) {
auto tf_dlm_context = GetDlContext(h, status);
if (!status->status.ok()) {
return nullptr;
}
auto* tf_dlm_data = TFE_TensorHandleDevicePointer(h, status);
if (!status->status.ok()) {
return nullptr;
}
const Tensor* tensor = GetTensorFromHandle(h, status);
TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype());
auto tf_dlm_type = GetDlDataType(data_type, status);
if (!status->status.ok()) {
return nullptr;
}
TensorReference tensor_ref(*tensor);
auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref);
tf_dlm_tensor_ctx->reference = tensor_ref;
DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor;
dlm_tensor->manager_ctx = tf_dlm_tensor_ctx;
dlm_tensor->deleter = &DLManagedTensorDeleter;
dlm_tensor->dl_tensor.device = tf_dlm_context;
int ndim = tensor->dims();
dlm_tensor->dl_tensor.ndim = ndim;
dlm_tensor->dl_tensor.data = tf_dlm_data;
dlm_tensor->dl_tensor.dtype = tf_dlm_type;
std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape;
std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides;
shape_arr->resize(ndim);
stride_arr->resize(ndim, 1);
for (int i = 0; i < ndim; i++) {
(*shape_arr)[i] = tensor->dim_size(i);
}
for (int i = ndim - 2; i >= 0; --i) {
(*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1];
}
dlm_tensor->dl_tensor.shape = shape_arr->data();
dlm_tensor->dl_tensor.strides = stride_arr->data();
dlm_tensor->dl_tensor.byte_offset =
0;
return static_cast<void*>(dlm_tensor);
}
TFE_TensorHandle* TFE_HandleFromDLPack(void* dlm, TF_Status* status,
TFE_Context* ctx) {
DLManagedTensor* dlmt = static_cast<DLManagedTensor*>(dlm);
DLTensor* dl_tensor = &dlmt->dl_tensor;
absl::optional<std::string> device_name =
DeviceNameFromDlContext(dl_tensor->device, status);
if (!device_name.has_value()) {
status->status =
tensorflow::errors::InvalidArgument("Unsupported Device Type");
return nullptr;
}
TF_DataType dtype;
Status s = TfDataTypeFormDlDataType(dl_tensor->dtype, &dtype);
if (!s.ok()) {
status->status = std::move(s);
return nullptr;
}
int num_dims = dl_tensor->ndim;
const int64_t* dims = dl_tensor->shape;
void* data = dl_tensor->data;
if (dl_tensor->byte_offset != 0) {
status->status = tensorflow::errors::InvalidArgument(
"Unsupported byte_offset (", dl_tensor->byte_offset,
") from DLPack, must be zero");
return nullptr;
}
size_t total_bytes = dl_tensor->dtype.bits / 8;
for (int i = 0; i < num_dims; i++) {
total_bytes *= dims[i];
}
if (dl_tensor->strides != nullptr &&
!IsValidStrideCompactRowMajorData(dl_tensor->shape, dl_tensor->strides,
num_dims)) {
status->status = tensorflow::errors::InvalidArgument(
"Invalid strides array from DLPack");
return nullptr;
}
TFE_TensorHandle* handle = TFE_NewTensorHandleFromDeviceMemory(
ctx, device_name.value().c_str(), dtype, dims, num_dims, data,
total_bytes, &DeallocatorWrapperFunc, dlmt, status);
return handle;
}
} | #include "tensorflow/c/eager/dlpack.h"
#include <vector>
#include "absl/strings/str_join.h"
#include "include/dlpack/dlpack.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void TestHandleFromDLPack(TF_Status* status, TFE_Context* ctx,
std::vector<int64_t> shape,
std::vector<int64_t> strides) {
size_t num_elements = 1;
for (int i = 0; i < static_cast<int32_t>(shape.size()); ++i) {
num_elements *= shape[i];
}
std::vector<float> data(num_elements);
for (size_t j = 0; j < num_elements; ++j) {
data[j] = j;
}
DLManagedTensor dlm_in = {};
DLTensor* dltensor_in = &dlm_in.dl_tensor;
dltensor_in->data = data.data();
dltensor_in->device = {kDLCPU, 0};
dltensor_in->ndim = static_cast<int32_t>(shape.size());
dltensor_in->dtype = {kDLFloat, 32, 1};
dltensor_in->shape = shape.data();
dltensor_in->strides = strides.data();
TFE_TensorHandle* handle = TFE_HandleFromDLPack(&dlm_in, status, ctx);
ASSERT_NE(handle, nullptr)
<< TF_Message(status) << " (shape=[" << absl::StrJoin(shape, ",")
<< "], strides=[" << absl::StrJoin(strides, ",") << "])";
auto* dlm_out =
static_cast<DLManagedTensor*>(TFE_HandleToDLPack(handle, status));
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
const DLTensor* dltensor_out = &dlm_out->dl_tensor;
EXPECT_EQ(dltensor_out->device.device_type, dltensor_in->device.device_type);
EXPECT_EQ(dltensor_out->device.device_id, dltensor_in->device.device_id);
EXPECT_EQ(dltensor_out->ndim, dltensor_in->ndim);
EXPECT_EQ(dltensor_out->dtype.code, dltensor_in->dtype.code);
EXPECT_EQ(dltensor_out->dtype.bits, dltensor_in->dtype.bits);
EXPECT_EQ(dltensor_out->dtype.lanes, dltensor_in->dtype.lanes);
for (int i = 0; i < dltensor_in->ndim; ++i) {
EXPECT_EQ(dltensor_out->shape[i], dltensor_in->shape[i]);
if (dltensor_out->strides) {
if (i == dltensor_in->ndim - 1) {
EXPECT_EQ(dltensor_out->strides[i], 1);
} else {
EXPECT_EQ(dltensor_out->strides[i],
dltensor_out->shape[i + 1] * dltensor_out->strides[i + 1]);
}
}
}
const float* data_in = static_cast<const float*>(dltensor_in->data);
const float* data_out = static_cast<const float*>(dltensor_out->data);
for (size_t j = 0; j < num_elements; ++j) {
EXPECT_EQ(data_out[j], data_in[j]);
}
TFE_CallDLManagedTensorDeleter(dlm_out);
TFE_DeleteTensorHandle(handle);
}
TEST(DLPack, HandleFromDLPackStrides) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TestHandleFromDLPack(status, ctx, {}, {});
TestHandleFromDLPack(status, ctx, {4}, {});
TestHandleFromDLPack(status, ctx, {4}, {1});
TestHandleFromDLPack(status, ctx, {4, 3, 2}, {});
TestHandleFromDLPack(status, ctx, {4, 3, 2}, {6, 2, 1});
TestHandleFromDLPack(status, ctx, {1}, {1});
TestHandleFromDLPack(status, ctx, {1}, {0});
TestHandleFromDLPack(status, ctx, {4, 1, 2}, {2, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 1, 2}, {2, 0, 1});
TestHandleFromDLPack(status, ctx, {4, 3, 1}, {3, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 3, 1}, {3, 1, 0});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 2, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 1, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 0, 1});
TestHandleFromDLPack(status, ctx, {4, 0, 2}, {0, 2, 0});
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/dlpack.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/dlpack_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b5213f53-4bff-4a65-a833-0de7b1b67a3f | cpp | google/cel-cpp | source_position | eval/public/source_position.cc | eval/public/source_position_test.cc | #include "eval/public/source_position.h"
#include <utility>
namespace google {
namespace api {
namespace expr {
namespace runtime {
using google::api::expr::v1alpha1::SourceInfo;
namespace {
std::pair<int, int32_t> GetLineAndLineOffset(const SourceInfo* source_info,
int32_t position) {
int line = 0;
int32_t line_offset = 0;
if (source_info != nullptr) {
for (const auto& curr_line_offset : source_info->line_offsets()) {
if (curr_line_offset > position) {
break;
}
line_offset = curr_line_offset;
line++;
}
}
if (line == 0) {
line++;
}
return std::pair<int, int32_t>(line, line_offset);
}
}
int32_t SourcePosition::line() const {
return GetLineAndLineOffset(source_info_, character_offset()).first;
}
int32_t SourcePosition::column() const {
int32_t position = character_offset();
std::pair<int, int32_t> line_and_offset =
GetLineAndLineOffset(source_info_, position);
return 1 + (position - line_and_offset.second);
}
int32_t SourcePosition::character_offset() const {
if (source_info_ == nullptr) {
return 0;
}
auto position_it = source_info_->positions().find(expr_id_);
return position_it != source_info_->positions().end() ? position_it->second
: 0;
}
}
}
}
} | #include "eval/public/source_position.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "internal/testing.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using ::testing::Eq;
using google::api::expr::v1alpha1::SourceInfo;
class SourcePositionTest : public testing::Test {
protected:
void SetUp() override {
source_info_.add_line_offsets(0);
source_info_.add_line_offsets(1);
source_info_.add_line_offsets(2);
(*source_info_.mutable_positions())[1] = 2;
source_info_.add_line_offsets(4);
(*source_info_.mutable_positions())[2] = 4;
(*source_info_.mutable_positions())[3] = 7;
source_info_.add_line_offsets(9);
source_info_.add_line_offsets(10);
(*source_info_.mutable_positions())[4] = 10;
(*source_info_.mutable_positions())[5] = 13;
}
SourceInfo source_info_;
};
TEST_F(SourcePositionTest, TestNullSourceInfo) {
SourcePosition position(3, nullptr);
EXPECT_THAT(position.character_offset(), Eq(0));
EXPECT_THAT(position.line(), Eq(1));
EXPECT_THAT(position.column(), Eq(1));
}
TEST_F(SourcePositionTest, TestNoNewlines) {
source_info_.clear_line_offsets();
SourcePosition position(3, &source_info_);
EXPECT_THAT(position.character_offset(), Eq(7));
EXPECT_THAT(position.line(), Eq(1));
EXPECT_THAT(position.column(), Eq(8));
}
TEST_F(SourcePositionTest, TestPosition) {
SourcePosition position(3, &source_info_);
EXPECT_THAT(position.character_offset(), Eq(7));
}
TEST_F(SourcePositionTest, TestLine) {
SourcePosition position1(1, &source_info_);
EXPECT_THAT(position1.line(), Eq(3));
SourcePosition position2(2, &source_info_);
EXPECT_THAT(position2.line(), Eq(4));
SourcePosition position3(3, &source_info_);
EXPECT_THAT(position3.line(), Eq(4));
SourcePosition position4(5, &source_info_);
EXPECT_THAT(position4.line(), Eq(6));
}
TEST_F(SourcePositionTest, TestColumn) {
SourcePosition position1(1, &source_info_);
EXPECT_THAT(position1.column(), Eq(1));
SourcePosition position2(2, &source_info_);
EXPECT_THAT(position2.column(), Eq(1));
SourcePosition position3(3, &source_info_);
EXPECT_THAT(position3.column(), Eq(4));
SourcePosition position4(5, &source_info_);
EXPECT_THAT(position4.column(), Eq(4));
}
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/source_position.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/source_position_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3ffa5816-7f4b-47b4-83f7-108b7e00ea6c | cpp | tensorflow/tensorflow | lower_while_op | tensorflow/core/common_runtime/lower_while_op.cc | tensorflow/core/common_runtime/lower_while_op_test.cc | #include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
using NodeOut = NodeBuilder::NodeOut;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
class LowerWhileHelper {
public:
static Status Run(Node* while_op, const NameAttrList& cond_fn,
const NameAttrList& body_fn, int parallel_iterations,
Graph* graph, const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable) {
LowerWhileHelper helper(while_op, cond_fn, body_fn, parallel_iterations,
graph, flib_def, keep_node_fetchable);
return helper.RunInternal();
}
private:
LowerWhileHelper(Node* while_op, const NameAttrList& cond_fn,
const NameAttrList& body_fn, int parallel_iterations,
Graph* graph, const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable);
Status RunInternal();
void InitializeInputOutputToLoweredNodeMap();
Status CreateEnterNodes();
Status CreateMergeNodes();
Status CreateCondFuncCallNode();
Status CreateSwitchNodes();
Status CreateBodyFuncCallNode();
Status CreateExitNodes();
Status CreateNextIterationNodes();
Status UpdateMergeNodes();
Status UpdateConsumers();
string NewName(const string& infix);
bool IsLoopCarriedResource(int index);
Node* while_op_;
Node* cond_call_node_;
Node* loop_cond_node_;
Node* body_call_node_;
Node* lowered_while_output_;
Node* lowered_while_executed_;
Graph* graph_;
const FunctionLibraryDefinition* flib_def_;
string name_;
const int parallel_iterations_;
bool keep_node_fetchable_;
NodeDebugInfo debug_info_;
NodeBuilder cond_call_builder_;
NodeBuilder body_call_builder_;
std::vector<Node*> enter_nodes_;
std::vector<Node*> merge_nodes_;
std::vector<Node*> switch_nodes_;
std::vector<Node*> exit_nodes_;
std::vector<Node*> next_iterations_nodes_;
std::vector<int> op_input_output_to_lowered_node_;
bool propagate_colocation_key_;
size_t num_loop_inputs_;
};
LowerWhileHelper::LowerWhileHelper(Node* while_op, const NameAttrList& cond_fn,
const NameAttrList& body_fn,
int parallel_iterations, Graph* graph,
const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable)
: while_op_(while_op),
graph_(graph),
flib_def_(flib_def),
name_(while_op->name()),
parallel_iterations_(parallel_iterations),
keep_node_fetchable_(keep_node_fetchable),
debug_info_(*while_op_),
cond_call_builder_(NewName("cond"), cond_fn.name(), flib_def,
&debug_info_),
body_call_builder_(NewName("body"), body_fn.name(), flib_def,
&debug_info_),
num_loop_inputs_(while_op_->num_inputs()) {
cond_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : cond_fn.attr()) {
cond_call_builder_.Attr(i.first, i.second);
}
body_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : body_fn.attr()) {
body_call_builder_.Attr(i.first, i.second);
}
enter_nodes_.resize(num_loop_inputs_);
merge_nodes_.reserve(num_loop_inputs_);
switch_nodes_.reserve(num_loop_inputs_);
exit_nodes_.reserve(num_loop_inputs_);
next_iterations_nodes_.reserve(num_loop_inputs_);
op_input_output_to_lowered_node_.resize(num_loop_inputs_, -1);
propagate_colocation_key_ =
flags::Global()
.enable_colocation_key_propagation_in_while_op_lowering.value();
}
Status LowerWhileHelper::RunInternal() {
InitializeInputOutputToLoweredNodeMap();
TF_RETURN_IF_ERROR(CreateEnterNodes());
TF_RETURN_IF_ERROR(CreateMergeNodes());
TF_RETURN_IF_ERROR(CreateCondFuncCallNode());
TF_RETURN_IF_ERROR(CreateSwitchNodes());
TF_RETURN_IF_ERROR(CreateBodyFuncCallNode());
TF_RETURN_IF_ERROR(CreateExitNodes());
TF_RETURN_IF_ERROR(CreateNextIterationNodes());
TF_RETURN_IF_ERROR(UpdateMergeNodes());
TF_RETURN_IF_ERROR(UpdateConsumers());
return absl::OkStatus();
}
void LowerWhileHelper::InitializeInputOutputToLoweredNodeMap() {
int counter = 0;
for (int i = 0; i < num_loop_inputs_; i++) {
if (!IsLoopCarriedResource(i)) {
op_input_output_to_lowered_node_[i] = counter++;
}
}
}
Status LowerWhileHelper::CreateEnterNodes() {
std::vector<const Edge*> edges;
TF_RETURN_IF_ERROR(while_op_->input_edges(&edges));
for (const Edge* edge : edges) {
Node* enter_node;
NodeBuilder builder =
NodeBuilder(NewName("enter"), "Enter", flib_def_, &debug_info_)
.Input(NodeOut(edge->src(), edge->src_output()))
.Attr("frame_name", name_)
.Attr("parallel_iterations", parallel_iterations_)
.Device(edge->src()->requested_device())
.AssignedDevice(edge->src()->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = edge->src()->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
if (IsLoopCarriedResource(edge->dst_input())) {
builder.Attr("is_constant", true);
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &enter_node));
enter_nodes_[edge->dst_input()] = enter_node;
}
std::vector<Node*> control_inputs;
for (const Edge* e : while_op_->in_edges()) {
if (e->IsControlEdge()) {
control_inputs.push_back(e->src());
}
}
if (!control_inputs.empty()) {
Node* incoming_control_node;
NodeBuilder builder = NodeBuilder(NewName("LoopControlInputs"), "NoOp",
flib_def_, &debug_info_)
.ControlInputs(control_inputs)
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &incoming_control_node));
for (Node* n : enter_nodes_) {
graph_->AddControlEdge(incoming_control_node, n);
}
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateMergeNodes() {
for (Node* enter_node : enter_nodes_) {
bool is_constant = enter_node->attrs().FindByString("is_constant")->b();
if (is_constant && enter_node->output_type(0) == DT_RESOURCE) {
continue;
}
Node* merge_node;
NodeBuilder builder =
NodeBuilder(NewName("merge"), "Merge", flib_def_, &debug_info_)
.Input({NodeOut(enter_node, 0), NodeOut(enter_node, 0)})
.Device(enter_node->requested_device())
.AssignedDevice(enter_node->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = enter_node->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &merge_node));
merge_nodes_.emplace_back(merge_node);
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateCondFuncCallNode() {
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
cond_call_builder_.Input(NodeOut(enter_nodes_[i], 0));
} else {
cond_call_builder_.Input(
NodeOut(merge_nodes_[op_input_output_to_lowered_node_[i]], 0));
}
}
cond_call_builder_.Device(while_op_->requested_device());
TF_RETURN_IF_ERROR(cond_call_builder_.Finalize(graph_, &cond_call_node_));
graph_->AddControlEdge(merge_nodes_[0], cond_call_node_);
NodeBuilder builder =
NodeBuilder(NewName("LoopCond"), "LoopCond", flib_def_, &debug_info_)
.Input(NodeOut(cond_call_node_, 0))
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &loop_cond_node_));
return absl::OkStatus();
}
Status LowerWhileHelper::CreateSwitchNodes() {
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
continue;
}
string op_name;
{
const Node* input_node;
TF_RETURN_IF_ERROR(while_op_->input_node(i, &input_node));
op_name = strings::StrCat(input_node->name(), "_switch");
}
Node* merge_node = merge_nodes_[op_input_output_to_lowered_node_[i]];
Node* switch_node;
string op_type = "Switch";
if (IsRefType(merge_node->output_type(0))) {
op_type = "RefSwitch";
}
NodeBuilder builder =
NodeBuilder(NewName(op_name), op_type, flib_def_, &debug_info_)
.Input(NodeOut(merge_node, 0))
.Input(NodeOut(loop_cond_node_, 0))
.Device(merge_node->requested_device())
.AssignedDevice(merge_node->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = merge_node->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &switch_node));
switch_nodes_.emplace_back(switch_node);
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateBodyFuncCallNode() {
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
body_call_builder_.Input(NodeOut(enter_nodes_[i], 0));
} else {
body_call_builder_.Input(
NodeOut(switch_nodes_[op_input_output_to_lowered_node_[i]], 1));
}
}
body_call_builder_.Device(while_op_->requested_device());
TF_RETURN_IF_ERROR(body_call_builder_.Finalize(graph_, &body_call_node_));
Node* body_control_node_;
string op_type = "Identity";
if (IsRefType(switch_nodes_[0]->output_type(1))) {
op_type = "RefIdentity";
}
NodeBuilder builder = NodeBuilder(NewName("loop_body_control"), op_type,
flib_def_, &debug_info_)
.Input(NodeOut(switch_nodes_[0], 1))
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &body_control_node_));
graph_->AddControlEdge(body_control_node_, body_call_node_);
return absl::OkStatus();
}
Status LowerWhileHelper::CreateExitNodes() {
std::vector<NodeOut> outputs;
outputs.reserve(num_loop_inputs_);
for (int i = 0; i < num_loop_inputs_; i++) {
if (IsLoopCarriedResource(i)) {
OutputTensor resource_tensor;
TF_RETURN_IF_ERROR(enter_nodes_[i]->input_tensor(0, &resource_tensor));
outputs.emplace_back(resource_tensor);
} else {
Node* exit_node;
NodeBuilder builder =
NodeBuilder(NewName("exit"), "Exit", flib_def_, &debug_info_)
.Input(NodeOut(switch_nodes_[op_input_output_to_lowered_node_[i]],
0))
.Device(switch_nodes_[op_input_output_to_lowered_node_[i]]
->requested_device())
.AssignedDevice(switch_nodes_[op_input_output_to_lowered_node_[i]]
->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr =
switch_nodes_[op_input_output_to_lowered_node_[i]]->attrs().Find(
kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &exit_node));
exit_nodes_.emplace_back(exit_node);
outputs.emplace_back(NodeOut(exit_node, 0));
}
}
NodeBuilder builder = NodeBuilder(NewName("LoopExecuted"), "NoOp",
OpRegistry::Global(), &debug_info_)
.ControlInputs(exit_nodes_)
.Device(while_op_->requested_device());
if (propagate_colocation_key_) {
auto colocation_attr = while_op_->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &lowered_while_executed_));
if (keep_node_fetchable_) {
TF_RETURN_IF_ERROR(
NodeBuilder(name_, "IdentityN", OpRegistry::Global(), &debug_info_)
.Input(outputs)
.Device(while_op_->requested_device())
.Finalize(graph_, &lowered_while_output_));
} else {
TF_RETURN_IF_ERROR(
NodeBuilder(name_, "NoOp", OpRegistry::Global(), &debug_info_)
.ControlInput(lowered_while_executed_)
.Device(while_op_->requested_device())
.Finalize(graph_, &lowered_while_output_));
}
return absl::OkStatus();
}
Status LowerWhileHelper::CreateNextIterationNodes() {
for (int i = 0; i < num_loop_inputs_; i++) {
Node* next_iteration;
if (IsLoopCarriedResource(i)) {
continue;
}
Node* merge_node = merge_nodes_[op_input_output_to_lowered_node_[i]];
NodeBuilder builder =
NodeBuilder(NewName("next_iteration"), "NextIteration", flib_def_,
&debug_info_)
.Input(NodeOut(body_call_node_, i))
.ControlInput(body_call_node_)
.Device(merge_node->requested_device())
.AssignedDevice(merge_node->assigned_device_name());
if (propagate_colocation_key_) {
auto colocation_attr = merge_node->attrs().Find(kColocationAttrName);
if (colocation_attr) {
builder.Attr(kColocationAttrName, *colocation_attr);
}
}
TF_RETURN_IF_ERROR(builder.Finalize(graph_, &next_iteration));
next_iterations_nodes_.emplace_back(next_iteration);
}
return absl::OkStatus();
}
Status LowerWhileHelper::UpdateMergeNodes() {
for (int i = 0; i < merge_nodes_.size(); i++) {
TF_RETURN_IF_ERROR(
graph_->UpdateEdge(next_iterations_nodes_[i], 0, merge_nodes_[i], 1));
}
return absl::OkStatus();
}
Status LowerWhileHelper::UpdateConsumers() {
for (const Edge* e : while_op_->out_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(lowered_while_executed_, e->dst());
} else {
if (IsLoopCarriedResource(e->src_output())) {
OutputTensor resource;
TF_RETURN_IF_ERROR(
enter_nodes_[e->src_output()]->input_tensor(0, &resource));
graph_->AddEdge(resource.node, resource.index, e->dst(),
e->dst_input());
} else {
int exit_node_index = op_input_output_to_lowered_node_[e->src_output()];
if (exit_node_index < 0) {
return errors::Internal(
"Expecting an Exit node for a Resource tensor.");
}
graph_->AddEdge(exit_nodes_[exit_node_index], 0, e->dst(),
e->dst_input());
}
}
}
return absl::OkStatus();
}
string LowerWhileHelper::NewName(const string& infix) {
return graph_->NewName(strings::StrCat(name_, "/", infix));
}
bool LowerWhileHelper::IsLoopCarriedResource(int index) {
if (while_op_->input_type(index) != DT_RESOURCE) return false;
auto body_func_name = while_op_->attrs().Find("body")->func().name();
auto body_func = flib_def_->Find(body_func_name);
auto arg_name = body_func->signature().input_arg(index).name();
for (auto& ret : body_func->ret())
if (ret.second == arg_name) return true;
return false;
}
}
Status RewriteWhileNode(Node* n, Graph* g,
const FunctionLibraryDefinition* flib_def,
bool keep_node_fetchable) {
VLOG(2) << "Lower While node (keep_node_fetchable=" << keep_node_fetchable
<< "): " << SummarizeNode(*n);
const AttrValue* cond_attr = n->attrs().Find("cond");
if (cond_attr == nullptr) {
return errors::InvalidArgument("While cond function missing");
}
const AttrValue* body_attr = n->attrs().Find("body");
if (body_attr == nullptr) {
return errors::InvalidArgument("While body function missing");
}
const AttrValue* parallel_iterations_attr =
n->attrs().Find("parallel_iterations");
if (parallel_iterations_attr == nullptr) {
return errors::InvalidArgument("parallel_iterations attr missing");
}
if (parallel_iterations_attr->i() < 1) {
return errors::InvalidArgument("parallel_iterations must be > 0");
}
TF_RETURN_IF_ERROR(LowerWhileHelper::Run(
n, cond_attr->func(), body_attr->func(), parallel_iterations_attr->i(), g,
flib_def, keep_node_fetchable));
g->RemoveNode(n);
return absl::OkStatus();
}
} | #include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerWhileOpTest, Simple) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(while_node)),
Output(while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_while_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
if (op->name() == "while") {
node_called_while_count++;
}
}
ASSERT_EQ(node_called_while_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
int enter_count = 0;
int exit_count = 0;
int switch_count = 0;
int merge_count = 0;
int next_iteration_count = 0;
node_called_while_count = 0;
int less_than_or_equan_to_n_count = 0;
int x_times_two_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsEnter()) {
++enter_count;
ASSERT_EQ(op->attrs().Find("parallel_iterations")->i(), 100);
}
if (op->IsExit()) {
++exit_count;
}
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
if (op->IsNextIteration()) {
++next_iteration_count;
}
if (op->name() == "while") {
node_called_while_count++;
}
if (op->type_string() == "LessThanOrEqualToN") {
less_than_or_equan_to_n_count++;
}
if (op->type_string() == "XTimesTwo") {
x_times_two_count++;
}
if (op->name() == "C") {
ASSERT_EQ(op->in_edges().size(), 2);
}
ASSERT_NE(op->type_string(), "While");
}
ASSERT_EQ(enter_count, 1);
ASSERT_EQ(exit_count, 1);
ASSERT_EQ(switch_count, 1);
ASSERT_EQ(merge_count, 1);
ASSERT_EQ(next_iteration_count, 1);
ASSERT_EQ(node_called_while_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(3));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
}
static void DanglingNodeTestHelper(int expected_count) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() =
test::function::XTimesTwoWithDanglingFloorDivNode();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwoWithDanglingFloorDivNode");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
auto c = ops::Identity(
root.WithOpName("C").WithControlDependencies(Output(while_node)),
Output(while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int mul_count = 0;
int floor_div_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == "Mul") {
mul_count++;
}
if (op->type_string() == "FloorDiv") {
floor_div_count++;
}
}
ASSERT_EQ(mul_count, 1);
ASSERT_EQ(floor_div_count, expected_count);
}
TEST(LowerWhileOpTest, DanglingNode) { DanglingNodeTestHelper(1); }
TEST(LowerWhileOpTest, DanglingNodeWithPruning) {
flags::Global().enable_function_pruning_before_inlining.reset(true);
DanglingNodeTestHelper(0);
flags::Global().enable_function_pruning_before_inlining.reset(false);
}
TEST(LowerWhileOpTest, ForwardAssignedInputDevice) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
TF_ASSERT_OK(graph->AddFunctionLibrary(f_lib_proto));
auto type = DT_FLOAT;
Node* placeholder;
TF_CHECK_OK(NodeBuilder("placed_node", "Placeholder")
.Attr("dtype", type)
.Finalize(graph.get(), &placeholder));
const string assigned_device_name = "/job:localhost/replica:0/task:0/gpu:0";
placeholder->set_assigned_device_name(assigned_device_name);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(placeholder)});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &graph->flib_def())
.Input(inputs)
.Attr("T", {type})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(graph.get(), &while_node));
TF_ASSERT_OK(Rewrite(&graph));
const Node* placeholder_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "placed_node") {
placeholder_node = op;
}
}
ASSERT_NE(placeholder_node, nullptr);
int enter_consumers = 0;
const Node* enter_node = nullptr;
for (const Node* consumer : placeholder_node->out_nodes()) {
if (consumer->type_string() == "Enter") {
enter_consumers += 1;
enter_node = consumer;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(enter_consumers, 1);
int merge_consumers = 0;
const Node* merge_node = nullptr;
for (const Node* consumer : enter_node->out_nodes()) {
if (consumer->type_string() == "Merge") {
merge_consumers += 1;
merge_node = consumer;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(merge_consumers, 1);
int next_iteration_consumers = 0;
for (const Node* consumer : merge_node->in_nodes()) {
if (consumer->type_string() == "NextIteration") {
next_iteration_consumers += 1;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(next_iteration_consumers, 1);
int switch_consumers = 0;
const Node* switch_node = nullptr;
for (const Node* consumer : merge_node->out_nodes()) {
if (consumer->type_string() == "Switch") {
switch_consumers += 1;
switch_node = consumer;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(switch_consumers, 1);
int exit_consumers = 0;
for (const Node* consumer : switch_node->out_nodes()) {
if (consumer->type_string() == "Exit") {
exit_consumers += 1;
ASSERT_EQ(consumer->assigned_device_name(), assigned_device_name);
}
}
ASSERT_EQ(exit_consumers, 1);
}
TEST(LowerWhileOpTest, ForwardRequestedInputDevice) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
TF_ASSERT_OK(graph->AddFunctionLibrary(f_lib_proto));
auto type = DT_FLOAT;
const string gpu_0_device = "/job:localhost/replica:0/task:0/gpu:0";
const string gpu_1_device = "/job:localhost/replica:0/task:0/gpu:1";
const string gpu_2_device = "/job:localhost/replica:0/task:0/gpu:2";
Node* gpu_0_ph;
TF_CHECK_OK(NodeBuilder("placed_node", "Placeholder")
.Attr("dtype", type)
.Device(gpu_0_device)
.Finalize(graph.get(), &gpu_0_ph));
Node* control_in;
TF_CHECK_OK(NodeBuilder("control_in", "Placeholder")
.Attr("dtype", type)
.Device(gpu_1_device)
.Finalize(graph.get(), &control_in));
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(gpu_0_ph)});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &graph->flib_def())
.Input(inputs)
.ControlInput(control_in)
.Device(gpu_2_device)
.Attr("T", {type})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(graph.get(), &while_node));
Node* control_out;
TensorProto proto;
proto.set_dtype(DT_FLOAT);
TensorShape empty_shape({0});
empty_shape.AsProto(proto.mutable_tensor_shape());
TF_ASSERT_OK(NodeBuilder("control_out", "Const")
.ControlInput(while_node)
.Attr("dtype", DT_FLOAT)
.Attr("value", proto)
.Finalize(graph.get(), &control_out));
TF_ASSERT_OK(Rewrite(&graph));
const Node* placeholder_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "placed_node") {
placeholder_node = op;
}
}
ASSERT_NE(placeholder_node, nullptr);
int enter_consumers = 0;
const Node* enter_node = nullptr;
for (const Node* consumer : placeholder_node->out_nodes()) {
if (consumer->type_string() == "Enter") {
enter_consumers += 1;
enter_node = consumer;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(enter_consumers, 1);
int merge_consumers = 0;
const Node* merge_node = nullptr;
for (const Node* consumer : enter_node->out_nodes()) {
if (consumer->type_string() == "Merge") {
merge_consumers += 1;
merge_node = consumer;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(merge_consumers, 1);
int next_iteration_consumers = 0;
for (const Node* consumer : merge_node->in_nodes()) {
if (consumer->type_string() == "NextIteration") {
next_iteration_consumers += 1;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(next_iteration_consumers, 1);
int switch_consumers = 0;
const Node* switch_node = nullptr;
for (const Node* consumer : merge_node->out_nodes()) {
if (consumer->type_string() == "Switch") {
switch_consumers += 1;
switch_node = consumer;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(switch_consumers, 1);
int exit_consumers = 0;
for (const Node* consumer : switch_node->out_nodes()) {
if (consumer->type_string() == "Exit") {
exit_consumers += 1;
ASSERT_EQ(consumer->requested_device(), gpu_0_device);
}
}
ASSERT_EQ(exit_consumers, 1);
const Node* loop_control_inputs_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopControlInputs")) {
loop_control_inputs_node = op;
}
}
ASSERT_NE(loop_control_inputs_node, nullptr);
ASSERT_EQ(loop_control_inputs_node->requested_device(), gpu_2_device);
const Node* loop_executed_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopExecuted")) {
loop_executed_node = op;
}
}
ASSERT_NE(loop_executed_node, nullptr);
ASSERT_EQ(loop_executed_node->requested_device(), gpu_2_device);
}
TEST(LowerWhileOpTest, ForwardColocationKeyAttribute) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = test::function::XTimesTwo();
*f_lib_proto.add_function() = test::function::LessThanOrEqualToN(8);
TF_ASSERT_OK(graph->AddFunctionLibrary(f_lib_proto));
auto type = DT_FLOAT;
const string gpu_0_device = "/job:localhost/replica:0/task:0/gpu:0";
const string gpu_1_device = "/job:localhost/replica:0/task:0/gpu:1";
const string gpu_2_device = "/job:localhost/replica:0/task:0/gpu:2";
Node* gpu_0_ph;
AttrValue gpu_0_colocation_attr;
gpu_0_colocation_attr.mutable_list()->add_s("loc@:some_op_on_gpu_0_device");
AttrValue gpu_1_colocation_attr;
gpu_1_colocation_attr.mutable_list()->add_s("loc@:some_op_on_gpu_1_device");
AttrValue gpu_2_colocation_attr;
gpu_2_colocation_attr.mutable_list()->add_s("loc@:some_op_on_gpu_2_device");
TF_CHECK_OK(NodeBuilder("placed_node", "Placeholder")
.Attr("dtype", type)
.Attr(kColocationAttrName, gpu_0_colocation_attr)
.Finalize(graph.get(), &gpu_0_ph));
Node* control_in;
TF_CHECK_OK(NodeBuilder("control_in", "Placeholder")
.Attr("dtype", type)
.Attr(kColocationAttrName, gpu_1_colocation_attr)
.Finalize(graph.get(), &control_in));
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(gpu_0_ph)});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &graph->flib_def())
.Input(inputs)
.ControlInput(control_in)
.Attr(kColocationAttrName, gpu_2_colocation_attr)
.Attr("T", {type})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(graph.get(), &while_node));
Node* control_out;
TensorProto proto;
proto.set_dtype(DT_FLOAT);
TensorShape empty_shape({0});
empty_shape.AsProto(proto.mutable_tensor_shape());
TF_ASSERT_OK(NodeBuilder("control_out", "Const")
.ControlInput(while_node)
.Attr("dtype", DT_FLOAT)
.Attr("value", proto)
.Finalize(graph.get(), &control_out));
TF_ASSERT_OK(Rewrite(&graph));
const Node* placeholder_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (op->name() == "placed_node") {
placeholder_node = op;
}
}
ASSERT_NE(placeholder_node, nullptr);
int enter_consumers = 0;
const Node* enter_node = nullptr;
for (const Node* consumer : placeholder_node->out_nodes()) {
if (consumer->type_string() == "Enter") {
enter_consumers += 1;
enter_node = consumer;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(enter_consumers, 1);
int merge_consumers = 0;
const Node* merge_node = nullptr;
for (const Node* consumer : enter_node->out_nodes()) {
if (consumer->type_string() == "Merge") {
merge_consumers += 1;
merge_node = consumer;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(merge_consumers, 1);
int next_iteration_consumers = 0;
for (const Node* consumer : merge_node->in_nodes()) {
if (consumer->type_string() == "NextIteration") {
next_iteration_consumers += 1;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(next_iteration_consumers, 1);
int switch_consumers = 0;
const Node* switch_node = nullptr;
for (const Node* consumer : merge_node->out_nodes()) {
if (consumer->type_string() == "Switch") {
switch_consumers += 1;
switch_node = consumer;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(switch_consumers, 1);
int exit_consumers = 0;
for (const Node* consumer : switch_node->out_nodes()) {
if (consumer->type_string() == "Exit") {
exit_consumers += 1;
auto* coloc_attr = consumer->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_0_device");
}
}
ASSERT_EQ(exit_consumers, 1);
const Node* loop_control_inputs_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopControlInputs")) {
loop_control_inputs_node = op;
}
}
ASSERT_NE(loop_control_inputs_node, nullptr);
auto* coloc_attr =
loop_control_inputs_node->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_2_device");
const Node* loop_executed_node = nullptr;
for (const auto* op : graph->op_nodes()) {
if (absl::StrContains(op->name(), "LoopExecuted")) {
loop_executed_node = op;
}
}
ASSERT_NE(loop_executed_node, nullptr);
coloc_attr = loop_executed_node->attrs().Find(kColocationAttrName);
ASSERT_NE(coloc_attr, nullptr);
ASSERT_EQ(coloc_attr->list().s_size(), 1);
ASSERT_EQ(coloc_attr->list().s(0), "loc@:some_op_on_gpu_2_device");
}
TEST(LowerWhileOpTest, MultipleInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XPlusOneXTimesY();
*(f_lib_proto.add_function()) = test::function::XYXLessThanOrEqualToN(4);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(root.WithOpName("B"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(a.node()), NodeBuilder::NodeOut(b.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("XYXLessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XPlusOneXTimesY");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32, DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsEnter());
ASSERT_FALSE(op->IsExit());
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
ASSERT_FALSE(op->IsNextIteration());
ASSERT_FALSE(op->IsLoopCond());
}
TF_ASSERT_OK(Rewrite(&graph));
int enter_count = 0;
int exit_count = 0;
int switch_count = 0;
int merge_count = 0;
int next_iteration_count = 0;
int x_plus_one_x_times_y_count = 0;
int x_y_x_less_than_equal_to_n_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsEnter()) {
++enter_count;
}
if (op->IsExit()) {
++exit_count;
}
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
if (op->IsNextIteration()) {
++next_iteration_count;
}
if (op->type_string() == "XPlusOneXTimesY") {
x_plus_one_x_times_y_count++;
}
if (op->type_string() == "XYXLessThanOrEqualToN") {
x_y_x_less_than_equal_to_n_count++;
}
ASSERT_NE(op->type_string(), "While");
}
ASSERT_EQ(enter_count, 2);
ASSERT_EQ(exit_count, 2);
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 2);
ASSERT_EQ(next_iteration_count, 2);
ASSERT_EQ(x_plus_one_x_times_y_count, 0);
ASSERT_EQ(x_y_x_less_than_equal_to_n_count, 0);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1));
feeds.emplace(Output(b.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(
feeds, {Output(while_node, 0), Output(while_node, 1)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 2);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 5);
EXPECT_EQ(out_tensors[1].scalar<int>()(), 24);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(3));
feeds.emplace(Output(b.node()), Input::Initializer(5));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(
feeds, {Output(while_node, 0), Output(while_node, 1)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 2);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 5);
EXPECT_EQ(out_tensors[1].scalar<int>()(), 60);
}
}
TEST(LowerWhileOpTest, DoNotInlineLoweredFunctions) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef x_times_two = test::function::XTimesTwo();
FunctionDef less_than_or_eq = test::function::LessThanOrEqualToN(8);
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
(*less_than_or_eq.mutable_attr())["_noinline"].set_b(true);
FunctionDefLibrary f_lib_proto;
*f_lib_proto.add_function() = x_times_two;
*f_lib_proto.add_function() = less_than_or_eq;
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* while_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue cond_func;
cond_func.mutable_func()->set_name("LessThanOrEqualToN");
AttrValue body_func;
body_func.mutable_func()->set_name("XTimesTwo");
TF_ASSERT_OK(
NodeBuilder("while", "While", &root.graph()->flib_def())
.Input(inputs)
.Attr("T", {DT_INT32})
.Attr("cond", cond_func)
.Attr("body", body_func)
.Attr("parallel_iterations", 100)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Finalize(root.graph(), &while_node));
TF_ASSERT_OK(root.DoShapeInference(while_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int x_times_two_count = 0;
int less_than_or_eq_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == x_times_two.signature().name()) {
x_times_two_count++;
}
if (op->type_string() == less_than_or_eq.signature().name()) {
less_than_or_eq_count++;
}
ASSERT_NE(op->type_string(), "While");
}
ASSERT_EQ(x_times_two_count, 1);
ASSERT_EQ(less_than_or_eq_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(1));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 16);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(3));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(while_node)}, &out_tensors));
ASSERT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_while_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_while_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
96433603-bbb8-48f6-b6cb-ac5eef4b8ec9 | cpp | google/arolla | jagged_shape | arolla/jagged_shape/dense_array/jagged_shape.cc | arolla/jagged_shape/jagged_shape_test.cc | #include "arolla/jagged_shape/dense_array/jagged_shape.h"
#include <sstream>
#include <utility>
#include "arolla/jagged_shape/util/repr.h"
#include "arolla/util/repr.h"
#include "arolla/util/string.h"
namespace arolla {
ReprToken ReprTraits<JaggedDenseArrayShape>::operator()(
const JaggedDenseArrayShape& value) const {
std::ostringstream result;
result << "JaggedShape(";
bool first = true;
for (const auto& edge : value.edges()) {
result << NonFirstComma(first)
<< CompactSplitPointsAsSizesRepr(edge.edge_values().values.span(),
3);
}
result << ")";
return ReprToken{std::move(result).str()};
}
} | #include "arolla/jagged_shape/jagged_shape.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/array/array.h"
#include "arolla/array/edge.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/jagged_shape/array/jagged_shape.h"
#include "arolla/jagged_shape/dense_array/jagged_shape.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
using ::absl_testing::StatusIs;
using ::arolla::testing::ReprTokenEq;
using ::testing::ElementsAre;
namespace arolla {
namespace {
class JaggedArrayShapeHelper {
public:
using Shape = JaggedArrayShape;
using Edge = Shape::Edge;
static absl::string_view ReprName() { return "JaggedArrayShape"; }
static absl::StatusOr<ArrayEdge> EdgeFromSplitPoints(
absl::Span<const OptionalValue<int64_t>> split_points) {
return ArrayEdge::FromSplitPoints(CreateArray<int64_t>(split_points));
}
static absl::StatusOr<ArrayEdge> EdgeFromMapping(
absl::Span<const OptionalValue<int64_t>> mapping, int64_t parent_size) {
return ArrayEdge::FromMapping(CreateArray<int64_t>(mapping), parent_size);
}
static const Buffer<int64_t>& GetSplitPoints(const ArrayEdge& edge) {
return edge.edge_values().dense_data().values;
}
};
class JaggedDenseArrayShapeHelper {
public:
using Shape = JaggedDenseArrayShape;
using Edge = Shape::Edge;
static absl::string_view ReprName() { return "JaggedShape"; }
static absl::StatusOr<DenseArrayEdge> EdgeFromSplitPoints(
absl::Span<const OptionalValue<int64_t>> split_points) {
return DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>(split_points));
}
static absl::StatusOr<DenseArrayEdge> EdgeFromMapping(
absl::Span<const OptionalValue<int64_t>> mapping, int64_t parent_size) {
return DenseArrayEdge::FromMapping(CreateDenseArray<int64_t>(mapping),
parent_size);
}
static const Buffer<int64_t>& GetSplitPoints(const DenseArrayEdge& edge) {
return edge.edge_values().values;
}
};
template <typename JaggedShapeHelper>
class JaggedShapeTest : public ::testing::Test {
public:
using Helper = JaggedShapeHelper;
using Shape = typename JaggedShapeHelper::Shape;
};
using JaggedShapeTestTypes =
::testing::Types<JaggedArrayShapeHelper, JaggedDenseArrayShapeHelper>;
TYPED_TEST_SUITE(JaggedShapeTest, JaggedShapeTestTypes);
TYPED_TEST(JaggedShapeTest, Empty) {
auto shape = TestFixture::Shape::Empty();
EXPECT_EQ(shape.rank(), 0);
EXPECT_EQ(shape.size(), 1);
EXPECT_TRUE(shape.edges().empty());
}
TYPED_TEST(JaggedShapeTest, FromEdges) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({}));
EXPECT_EQ(shape.rank(), 0);
EXPECT_EQ(shape.size(), 1);
EXPECT_TRUE(shape.edges().empty());
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({std::move(edge)}));
EXPECT_EQ(shape.rank(), 1);
EXPECT_EQ(shape.size(), 2);
auto edges = shape.edges();
EXPECT_EQ(edges.size(), 1);
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape,
Shape::FromEdges({std::move(edge1), std::move(edge2),
std::move(edge3)}));
EXPECT_EQ(shape.rank(), 3);
EXPECT_EQ(shape.size(), 4);
auto edges = shape.edges();
EXPECT_EQ(edges.size(), 3);
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]), ElementsAre(0, 1, 3));
EXPECT_THAT(Helper::GetSplitPoints(edges[2]), ElementsAre(0, 1, 2, 4));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 8}));
ASSERT_OK_AND_ASSIGN(auto edge2,
Helper::EdgeFromMapping({0, 0, 1, 1, 3, 5}, 8));
ASSERT_OK_AND_ASSIGN(
auto shape, Shape::FromEdges({std::move(edge1), std::move(edge2)}));
EXPECT_EQ(shape.rank(), 2);
EXPECT_EQ(shape.size(), 6);
auto edges = shape.edges();
EXPECT_EQ(edges.size(), 2);
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 8));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]),
ElementsAre(0, 2, 4, 4, 5, 5, 6, 6, 6));
}
}
TYPED_TEST(JaggedShapeTest, FromEdgesErrors) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2, 3}));
EXPECT_THAT(Shape::FromEdges({std::move(edge)}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incompatible dimensions - edges[0].parent_size "
"!= 1 (prior edge's child_size)"));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 4}));
EXPECT_THAT(Shape::FromEdges(
{std::move(edge1), std::move(edge2), std::move(edge3)}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incompatible dimensions - edges[2].parent_size "
"!= 3 (prior edge's child_size)"));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 1}));
ASSERT_OK_AND_ASSIGN(auto edge2,
Helper::EdgeFromMapping({0, std::nullopt}, 1));
EXPECT_THAT(Shape::FromEdges({std::move(edge1), std::move(edge2)}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected a full mapping"));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromMapping({1, 0}, 2));
EXPECT_THAT(Shape::FromEdges({std::move(edge1), std::move(edge2)}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected a sorted mapping"));
}
}
TYPED_TEST(JaggedShapeTest, FromEdges_BufferFactory) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromMapping({0, 0}, 1));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({std::move(edge)}));
EXPECT_TRUE(Helper::GetSplitPoints(shape.edges()[0]).is_owner());
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromMapping({0, 0}, 1));
UnsafeArenaBufferFactory arena{128};
ASSERT_OK_AND_ASSIGN(auto shape,
Shape::FromEdges({std::move(edge)}, arena));
EXPECT_FALSE(Helper::GetSplitPoints(shape.edges()[0]).is_owner());
}
}
TYPED_TEST(JaggedShapeTest, FlatFromSize) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
auto shape = Shape::FlatFromSize(3);
EXPECT_EQ(shape.rank(), 1);
EXPECT_EQ(shape.size(), 3);
auto edges = shape.edges();
EXPECT_EQ(edges.size(), 1);
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 3));
}
{
auto shape = Shape::FlatFromSize(3);
EXPECT_TRUE(Helper::GetSplitPoints(shape.edges()[0]).is_owner());
}
{
UnsafeArenaBufferFactory arena{128};
auto shape = Shape::FlatFromSize(3, arena);
EXPECT_FALSE(Helper::GetSplitPoints(shape.edges()[0]).is_owner());
}
}
TYPED_TEST(JaggedShapeTest, AddDims) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({std::move(edge)}));
ASSERT_OK_AND_ASSIGN(shape, shape.AddDims({}));
EXPECT_EQ(shape.rank(), 1);
EXPECT_EQ(shape.size(), 2);
EXPECT_THAT(Helper::GetSplitPoints(shape.edges()[0]), ElementsAre(0, 2));
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({std::move(edge)}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromMapping({0, 1, 2, 2}, 3));
ASSERT_OK_AND_ASSIGN(shape, shape.AddDims({edge2, edge3}));
EXPECT_EQ(shape.rank(), 3);
EXPECT_EQ(shape.size(), 4);
auto edges = shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]), ElementsAre(0, 1, 3));
EXPECT_THAT(Helper::GetSplitPoints(edges[2]), ElementsAre(0, 1, 2, 4));
}
{
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({}));
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromMapping({0, 0}, 1));
ASSERT_OK_AND_ASSIGN(shape, shape.AddDims({edge}));
EXPECT_TRUE(Helper::GetSplitPoints(shape.edges()[0]).is_owner());
}
{
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({}));
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromMapping({0, 0}, 1));
UnsafeArenaBufferFactory arena{128};
ASSERT_OK_AND_ASSIGN(shape, shape.AddDims({edge}, arena));
EXPECT_FALSE(Helper::GetSplitPoints(shape.edges()[0]).is_owner());
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({edge}));
EXPECT_THAT(shape.AddDims({edge}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incompatible dimensions - edges[1].parent_size "
"!= 2 (prior edge's child_size)"));
}
}
TYPED_TEST(JaggedShapeTest, RemoveDims) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({}));
shape = shape.RemoveDims(0);
EXPECT_EQ(shape.rank(), 0);
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({edge}));
shape = shape.RemoveDims(1);
EXPECT_EQ(shape.rank(), 1);
EXPECT_THAT(Helper::GetSplitPoints(shape.edges()[0]), ElementsAre(0, 2));
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({edge, edge2}));
shape = shape.RemoveDims(0);
EXPECT_EQ(shape.rank(), 0);
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({edge, edge2, edge3}));
shape = shape.RemoveDims(1);
EXPECT_EQ(shape.rank(), 1);
EXPECT_THAT(Helper::GetSplitPoints(shape.edges()[0]), ElementsAre(0, 2));
}
}
TYPED_TEST(JaggedShapeTest, FlattenDims_RankDecrease) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto edge4,
Helper::EdgeFromSplitPoints({0, 3, 4, 11, 12}));
ASSERT_OK_AND_ASSIGN(auto shape,
Shape::FromEdges({edge1, edge2, edge3, edge4}));
{
auto new_shape = shape.FlattenDims(0, 1);
EXPECT_EQ(new_shape.rank(), 4);
auto edges = new_shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]), ElementsAre(0, 1, 3));
EXPECT_THAT(Helper::GetSplitPoints(edges[2]), ElementsAre(0, 1, 2, 4));
EXPECT_THAT(Helper::GetSplitPoints(edges[3]), ElementsAre(0, 3, 4, 11, 12));
}
{
auto new_shape = shape.FlattenDims(0, 4);
EXPECT_EQ(new_shape.rank(), 1);
auto edges = new_shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 12));
}
{
auto new_shape = shape.FlattenDims(1, 3);
EXPECT_EQ(new_shape.rank(), 3);
auto edges = new_shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]), ElementsAre(0, 1, 4));
EXPECT_THAT(Helper::GetSplitPoints(edges[2]), ElementsAre(0, 3, 4, 11, 12));
}
{
auto new_shape = shape.FlattenDims(0, 4);
EXPECT_TRUE(Helper::GetSplitPoints(new_shape.edges()[0]).is_owner());
}
{
UnsafeArenaBufferFactory arena{128};
auto new_shape = shape.FlattenDims(0, 4, arena);
EXPECT_FALSE(Helper::GetSplitPoints(new_shape.edges()[0]).is_owner());
}
}
TYPED_TEST(JaggedShapeTest, FlattenDims_RankIncrease) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto edge4,
Helper::EdgeFromSplitPoints({0, 3, 4, 11, 12}));
ASSERT_OK_AND_ASSIGN(auto shape,
Shape::FromEdges({edge1, edge2, edge3, edge4}));
{
auto new_shape = shape.FlattenDims(0, 0);
EXPECT_EQ(new_shape.rank(), 5);
auto edges = new_shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 1));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]), ElementsAre(0, 2));
EXPECT_THAT(Helper::GetSplitPoints(edges[2]), ElementsAre(0, 1, 3));
EXPECT_THAT(Helper::GetSplitPoints(edges[3]), ElementsAre(0, 1, 2, 4));
EXPECT_THAT(Helper::GetSplitPoints(edges[4]), ElementsAre(0, 3, 4, 11, 12));
}
{
auto new_shape = shape.FlattenDims(2, 2);
EXPECT_EQ(new_shape.rank(), 5);
auto edges = new_shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]), ElementsAre(0, 1, 3));
EXPECT_THAT(Helper::GetSplitPoints(edges[2]), ElementsAre(0, 1, 2, 3));
EXPECT_THAT(Helper::GetSplitPoints(edges[3]), ElementsAre(0, 1, 2, 4));
EXPECT_THAT(Helper::GetSplitPoints(edges[4]), ElementsAre(0, 3, 4, 11, 12));
}
{
auto new_shape = shape.FlattenDims(4, 4);
EXPECT_EQ(new_shape.rank(), 5);
auto edges = new_shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
EXPECT_THAT(Helper::GetSplitPoints(edges[1]), ElementsAre(0, 1, 3));
EXPECT_THAT(Helper::GetSplitPoints(edges[2]), ElementsAre(0, 1, 2, 4));
EXPECT_THAT(Helper::GetSplitPoints(edges[3]), ElementsAre(0, 3, 4, 11, 12));
EXPECT_THAT(Helper::GetSplitPoints(edges[4]),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12));
}
{
auto empty_shape = Shape::Empty();
auto new_shape = empty_shape.FlattenDims(0, 0);
EXPECT_EQ(new_shape.rank(), 1);
auto edges = new_shape.edges();
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 1));
}
{
auto new_shape = shape.FlattenDims(0, 0);
EXPECT_TRUE(Helper::GetSplitPoints(new_shape.edges()[0]).is_owner());
}
{
UnsafeArenaBufferFactory arena{128};
auto new_shape = shape.FlattenDims(0, 0, arena);
EXPECT_FALSE(Helper::GetSplitPoints(new_shape.edges()[0]).is_owner());
}
}
TYPED_TEST(JaggedShapeTest, MovableAndCopyableClass) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
static_assert(std::is_copy_constructible_v<Shape> &&
std::is_copy_assignable_v<Shape>);
static_assert(std::is_move_constructible_v<Shape> &&
std::is_move_assignable_v<Shape>);
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({std::move(edge)}));
auto shape_cpy = shape;
EXPECT_EQ(shape_cpy.rank(), 1);
EXPECT_EQ(shape_cpy.size(), 2);
auto edges = shape_cpy.edges();
EXPECT_EQ(edges.size(), 1);
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
}
{
ASSERT_OK_AND_ASSIGN(auto edge, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({std::move(edge)}));
auto shape_move = std::move(shape);
EXPECT_EQ(shape_move.rank(), 1);
EXPECT_EQ(shape_move.size(), 2);
auto edges = shape_move.edges();
EXPECT_EQ(edges.size(), 1);
EXPECT_THAT(Helper::GetSplitPoints(edges[0]), ElementsAre(0, 2));
}
}
TYPED_TEST(JaggedShapeTest, Fingerprint) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 2}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2}));
EXPECT_EQ(FingerprintHasher("salt").Combine(shape1).Finish(),
FingerprintHasher("salt").Combine(shape2).Finish());
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape3, Shape::FromEdges({edge1, edge3}));
EXPECT_NE(FingerprintHasher("salt").Combine(shape1).Finish(),
FingerprintHasher("salt").Combine(shape3).Finish());
ASSERT_OK_AND_ASSIGN(auto shape4, Shape::FromEdges({edge1, edge2, edge3}));
EXPECT_NE(FingerprintHasher("salt").Combine(shape1).Finish(),
FingerprintHasher("salt").Combine(shape4).Finish());
}
TYPED_TEST(JaggedShapeTest, FastEquivalenceCheck) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
JaggedShapeFastEquivalenceResult kEqSizes(
JaggedShapeFastEquivalenceResult::kSizesEq);
JaggedShapeFastEquivalenceResult kNotEq(
JaggedShapeFastEquivalenceResult::kNotEq);
JaggedShapeFastEquivalenceResult kEq(JaggedShapeFastEquivalenceResult::kEq);
{
SCOPED_TRACE("Empty is fully equal.");
auto shape = Shape::Empty();
EXPECT_EQ(shape.FastEquivalenceCheck(shape), kEq);
}
{
SCOPED_TRACE("Rank 1 is fully equal.");
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape, Shape::FromEdges({edge1}));
EXPECT_EQ(shape.FastEquivalenceCheck(shape), kEq);
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1}));
EXPECT_EQ(shape.FastEquivalenceCheck(shape2), kEq);
EXPECT_EQ(shape2.FastEquivalenceCheck(shape), kEq);
ASSERT_OK_AND_ASSIGN(auto edge1_new, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape2_new, Shape::FromEdges({edge1_new}));
EXPECT_EQ(shape.FastEquivalenceCheck(shape2_new), kEq);
EXPECT_EQ(shape2_new.FastEquivalenceCheck(shape), kEq);
}
{
SCOPED_TRACE("Equal shapes.");
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2, edge3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
EXPECT_EQ(shape1.FastEquivalenceCheck(shape1), kEq)
<< "the same pointer must be exact equal";
EXPECT_EQ(shape1.FastEquivalenceCheck(shape2), kEqSizes);
EXPECT_EQ(shape2.FastEquivalenceCheck(shape1), kEqSizes);
}
{
SCOPED_TRACE("Different shapes.");
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge2}));
EXPECT_EQ(shape1.FastEquivalenceCheck(shape2), kNotEq);
EXPECT_EQ(shape2.FastEquivalenceCheck(shape1), kNotEq);
}
{
SCOPED_TRACE("Different ranks.");
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1}));
EXPECT_EQ(shape1.FastEquivalenceCheck(shape2), kNotEq);
EXPECT_EQ(shape2.FastEquivalenceCheck(shape1), kNotEq);
}
{
SCOPED_TRACE("False negative.");
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge4, Helper::EdgeFromSplitPoints({0, 2, 3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge3, edge4}));
EXPECT_EQ(shape1.FastEquivalenceCheck(shape2), kEqSizes);
EXPECT_EQ(shape2.FastEquivalenceCheck(shape1), kEqSizes);
}
}
TYPED_TEST(JaggedShapeTest, EqOp) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2, edge3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
EXPECT_TRUE(shape1.IsEquivalentTo(shape2));
EXPECT_TRUE(shape2.IsEquivalentTo(shape1));
EXPECT_TRUE(shape1.IsEquivalentTo(shape1));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge2}));
EXPECT_FALSE(shape1.IsEquivalentTo(shape2));
EXPECT_FALSE(shape2.IsEquivalentTo(shape1));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1}));
EXPECT_FALSE(shape1.IsEquivalentTo(shape2));
EXPECT_FALSE(shape2.IsEquivalentTo(shape1));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge4, Helper::EdgeFromSplitPoints({0, 2, 3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge3, edge4}));
EXPECT_FALSE(shape1.IsEquivalentTo(shape2));
EXPECT_FALSE(shape2.IsEquivalentTo(shape1));
}
}
TYPED_TEST(JaggedShapeTest, IsBroadcastableTo) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2, edge3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
EXPECT_TRUE(shape1.IsBroadcastableTo(shape2));
EXPECT_TRUE(shape2.IsBroadcastableTo(shape1));
EXPECT_TRUE(shape1.IsBroadcastableTo(shape1));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge2}));
EXPECT_FALSE(shape1.IsBroadcastableTo(shape2));
EXPECT_FALSE(shape2.IsBroadcastableTo(shape1));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 2, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 2, 4, 6}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
EXPECT_TRUE(shape1.IsBroadcastableTo(shape2));
EXPECT_FALSE(shape2.IsBroadcastableTo(shape1));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2_1, Helper::EdgeFromSplitPoints({0, 2, 3}));
ASSERT_OK_AND_ASSIGN(auto edge2_2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 2, 4, 6}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2_1}));
ASSERT_OK_AND_ASSIGN(auto shape2,
Shape::FromEdges({edge1, edge2_2, edge3}));
EXPECT_FALSE(shape1.IsBroadcastableTo(shape2));
EXPECT_FALSE(shape2.IsBroadcastableTo(shape1));
}
}
TYPED_TEST(JaggedShapeTest, GetBroadcastEdge) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
auto edge = shape1.GetBroadcastEdge(shape2);
EXPECT_TRUE(edge.IsEquivalentTo(*Helper::EdgeFromSplitPoints({0, 1, 4})));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2, edge3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
auto edge = shape1.GetBroadcastEdge(shape2);
EXPECT_TRUE(
edge.IsEquivalentTo(*Helper::EdgeFromSplitPoints({0, 1, 2, 3, 4})));
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
auto edge = shape1.GetBroadcastEdge(shape2);
EXPECT_TRUE(Helper::GetSplitPoints(edge).is_owner());
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
UnsafeArenaBufferFactory arena{128};
auto edge = shape1.GetBroadcastEdge(shape2, arena);
EXPECT_FALSE(Helper::GetSplitPoints(edge).is_owner());
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2, edge3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
auto edge = shape1.GetBroadcastEdge(shape2);
EXPECT_TRUE(Helper::GetSplitPoints(edge).is_owner());
}
{
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 1, 3}));
ASSERT_OK_AND_ASSIGN(auto edge3, Helper::EdgeFromSplitPoints({0, 1, 2, 4}));
ASSERT_OK_AND_ASSIGN(auto shape1, Shape::FromEdges({edge1, edge2, edge3}));
ASSERT_OK_AND_ASSIGN(auto shape2, Shape::FromEdges({edge1, edge2, edge3}));
UnsafeArenaBufferFactory arena{128};
auto edge = shape1.GetBroadcastEdge(shape2, arena);
EXPECT_FALSE(Helper::GetSplitPoints(edge).is_owner());
}
}
TYPED_TEST(JaggedShapeTest, EdgeT) {
using Edge = typename TestFixture::Shape::Edge;
using TestEdge = typename TestFixture::Helper::Edge;
static_assert(std::is_same_v<Edge, TestEdge>);
}
TYPED_TEST(JaggedShapeTest, Repr) {
using Shape = typename TestFixture::Shape;
using Helper = typename TestFixture::Helper;
ASSERT_OK_AND_ASSIGN(auto edge1, Helper::EdgeFromSplitPoints({0, 2}));
ASSERT_OK_AND_ASSIGN(auto edge2, Helper::EdgeFromSplitPoints({0, 2, 7}));
ASSERT_OK_AND_ASSIGN(auto edge3,
Helper::EdgeFromSplitPoints({0, 1, 2, 3, 4, 5, 6, 7}));
ASSERT_OK_AND_ASSIGN(auto edge4,
Helper::EdgeFromSplitPoints({0, 2, 3, 4, 5, 6, 7, 8}));
ASSERT_OK_AND_ASSIGN(auto shape,
Shape::FromEdges({edge1, edge2, edge3, edge4}));
std::string expected_repr = absl::StrCat(
Helper::ReprName(), "(2, [2, 5], 1, [2, 1, 1, ..., 1, 1, 1])");
EXPECT_THAT(GenReprToken(shape), ReprTokenEq(expected_repr));
}
template <typename ShapeHelper>
typename ShapeHelper::Shape ShapeFromEdgesBM(
const typename ShapeHelper::Shape::EdgeVec& edges,
benchmark::State& state) {
state.PauseTiming();
auto edges_cpy = edges;
state.ResumeTiming();
return ShapeHelper::Shape::FromEdges(std::move(edges_cpy)).value();
}
template <typename ShapeHelper>
typename ShapeHelper::Edge GetSplitPointsEdge(int64_t parent_size,
int64_t children) {
std::vector<OptionalValue<int64_t>> split_points;
split_points.reserve(parent_size + 1);
for (int64_t i = 0; i <= parent_size; ++i) {
split_points.push_back(i * children);
}
return ShapeHelper::EdgeFromSplitPoints(std::move(split_points)).value();
}
template <typename ShapeHelper>
typename ShapeHelper::Edge GetMappingEdge(int64_t parent_size,
int64_t children) {
std::vector<OptionalValue<int64_t>> mapping;
mapping.reserve(parent_size * children);
for (int64_t i = 0; i < parent_size; ++i) {
for (int64_t j = 0; j < children; ++j) {
mapping.push_back(i);
}
}
return ShapeHelper::EdgeFromMapping(std::move(mapping), parent_size).value();
}
template <typename ShapeHelper>
typename ShapeHelper::Shape GetShape(int64_t rank, int64_t num_children) {
typename ShapeHelper::Shape::EdgeVec edges;
edges.reserve(rank);
for (int i = 0; i < rank; ++i) {
edges.push_back(GetSplitPointsEdge<ShapeHelper>(std::pow(num_children, i),
num_children));
}
return ShapeHelper::Shape::FromEdges(std::move(edges)).value();
}
template <typename ShapeHelper>
void BM_JaggedShape_EmptyCreation(benchmark::State& state) {
for (auto _ : state) {
auto shape = ShapeHelper::Shape::Empty();
benchmark::DoNotOptimize(shape);
}
}
BENCHMARK(BM_JaggedShape_EmptyCreation<JaggedArrayShapeHelper>);
BENCHMARK(BM_JaggedShape_EmptyCreation<JaggedDenseArrayShapeHelper>);
template <typename ShapeHelper>
void BM_JaggedShape_FromSplitPointEdges(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
typename ShapeHelper::Shape::EdgeVec edges;
edges.reserve(rank);
for (int i = 0; i < rank; ++i) {
edges.push_back(GetSplitPointsEdge<ShapeHelper>(std::pow(num_children, i),
num_children));
}
for (auto _ : state) {
benchmark::DoNotOptimize(edges);
auto shape = ShapeFromEdgesBM<ShapeHelper>(edges, state);
benchmark::DoNotOptimize(shape);
}
}
BENCHMARK(BM_JaggedShape_FromSplitPointEdges<JaggedArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
BENCHMARK(BM_JaggedShape_FromSplitPointEdges<JaggedDenseArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
template <typename ShapeHelper>
void BM_JaggedShape_FromMappingEdges(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
typename ShapeHelper::Shape::EdgeVec edges;
edges.reserve(rank);
for (int i = 0; i < rank; ++i) {
edges.push_back(
GetMappingEdge<ShapeHelper>(std::pow(num_children, i), num_children));
}
for (auto _ : state) {
benchmark::DoNotOptimize(edges);
auto shape = ShapeFromEdgesBM<ShapeHelper>(edges, state);
benchmark::DoNotOptimize(shape);
}
}
BENCHMARK(BM_JaggedShape_FromMappingEdges<JaggedArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
BENCHMARK(BM_JaggedShape_FromMappingEdges<JaggedDenseArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
template <typename ShapeHelper>
void BM_JaggedShape_FastEquivalenceCheck(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
auto shape1 = GetShape<ShapeHelper>(rank, num_children);
auto shape2 = GetShape<ShapeHelper>(rank, num_children);
for (auto _ : state) {
benchmark::DoNotOptimize(shape1);
benchmark::DoNotOptimize(shape2);
auto eq = shape1.FastEquivalenceCheck(shape2);
benchmark::DoNotOptimize(eq);
}
}
BENCHMARK(BM_JaggedShape_FastEquivalenceCheck<JaggedArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
BENCHMARK(BM_JaggedShape_FastEquivalenceCheck<JaggedDenseArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
template <typename ShapeHelper>
void BM_JaggedShape_IsEquivalentTo(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
auto shape1 = GetShape<ShapeHelper>(rank, num_children);
auto shape2 = GetShape<ShapeHelper>(rank, num_children);
for (auto _ : state) {
benchmark::DoNotOptimize(shape1);
benchmark::DoNotOptimize(shape2);
auto eq = shape1.IsEquivalentTo(shape2);
benchmark::DoNotOptimize(eq);
}
}
BENCHMARK(BM_JaggedShape_IsEquivalentTo<JaggedArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
BENCHMARK(BM_JaggedShape_IsEquivalentTo<JaggedDenseArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
template <typename ShapeHelper>
void BM_JaggedShape_IsEquivalentTo_SameObj(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
auto shape1 = GetShape<ShapeHelper>(rank, num_children);
auto shape2 = shape1;
for (auto _ : state) {
benchmark::DoNotOptimize(shape1);
benchmark::DoNotOptimize(shape2);
auto eq = shape1.IsEquivalentTo(shape2);
benchmark::DoNotOptimize(eq);
}
}
BENCHMARK(BM_JaggedShape_IsEquivalentTo_SameObj<JaggedArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(4, 100);
BENCHMARK(BM_JaggedShape_IsEquivalentTo_SameObj<JaggedDenseArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(4, 100);
template <typename ShapeHelper>
void BM_JaggedShape_FlattenDims(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
auto shape = GetShape<ShapeHelper>(rank, num_children);
int from = state.range(2);
int to = state.range(3);
for (auto _ : state) {
benchmark::DoNotOptimize(shape);
benchmark::DoNotOptimize(from);
benchmark::DoNotOptimize(to);
auto flattened_shape = shape.FlattenDims(from, to);
benchmark::DoNotOptimize(flattened_shape);
}
}
BENCHMARK(BM_JaggedShape_FlattenDims<JaggedArrayShapeHelper>)
->Args({6, 10, 0, 6})
->Args({6, 10, 0, 2})
->Args({6, 10, 2, 4})
->Args({6, 10, 4, 6})
->Args({6, 10, 5, 6})
->Args({6, 10, 0, 0})
->Args({6, 10, 2, 2})
->Args({6, 10, 4, 4})
->Args({6, 10, 6, 6});
BENCHMARK(BM_JaggedShape_FlattenDims<JaggedDenseArrayShapeHelper>)
->Args({6, 10, 0, 6})
->Args({6, 10, 0, 2})
->Args({6, 10, 2, 4})
->Args({6, 10, 4, 6})
->Args({6, 10, 5, 6})
->Args({6, 10, 0, 0})
->Args({6, 10, 2, 2})
->Args({6, 10, 4, 4})
->Args({6, 10, 6, 6});
template <typename ShapeHelper>
void BM_JaggedShape_IsBroadcastableTo(benchmark::State& state) {
const int rank_1 = state.range(0);
const int rank_2 = state.range(1);
const int num_children = state.range(2);
auto shape1 = GetShape<ShapeHelper>(rank_1, num_children);
auto shape2 = GetShape<ShapeHelper>(rank_2, num_children);
for (auto _ : state) {
benchmark::DoNotOptimize(shape1);
benchmark::DoNotOptimize(shape2);
auto is_broadcastable = shape1.IsBroadcastableTo(shape2);
benchmark::DoNotOptimize(is_broadcastable);
}
}
BENCHMARK(BM_JaggedShape_IsBroadcastableTo<JaggedArrayShapeHelper>)
->Args({1, 1, 1})
->Args({1, 5, 5})
->Args({4, 5, 5});
BENCHMARK(BM_JaggedShape_IsBroadcastableTo<JaggedDenseArrayShapeHelper>)
->Args({1, 1, 1})
->Args({1, 5, 5})
->Args({4, 5, 5});
template <typename ShapeHelper>
void BM_JaggedShape_IsBroadcastableTo_SameObj(benchmark::State& state) {
const int rank_1 = state.range(0);
const int num_children = state.range(1);
auto shape1 = GetShape<ShapeHelper>(rank_1, num_children);
auto shape2 = shape1;
for (auto _ : state) {
benchmark::DoNotOptimize(shape1);
benchmark::DoNotOptimize(shape2);
auto is_broadcastable = shape1.IsBroadcastableTo(shape2);
benchmark::DoNotOptimize(is_broadcastable);
}
}
BENCHMARK(BM_JaggedShape_IsBroadcastableTo_SameObj<JaggedArrayShapeHelper>)
->Args({1, 1})
->Args({1, 5})
->Args({4, 5});
BENCHMARK(BM_JaggedShape_IsBroadcastableTo_SameObj<JaggedDenseArrayShapeHelper>)
->Args({1, 1})
->Args({1, 5})
->Args({4, 5});
template <typename ShapeHelper>
void BM_JaggedShape_Copying(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
auto shape = GetShape<ShapeHelper>(rank, num_children);
for (auto _ : state) {
benchmark::DoNotOptimize(shape);
auto shape_copy = shape;
benchmark::DoNotOptimize(shape_copy);
}
}
BENCHMARK(BM_JaggedShape_Copying<JaggedArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
BENCHMARK(BM_JaggedShape_Copying<JaggedDenseArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
template <typename ShapeHelper>
void BM_JaggedShape_Repr(benchmark::State& state) {
const int rank = state.range(0);
const int num_children = state.range(1);
auto shape = GetShape<ShapeHelper>(rank, num_children);
for (auto _ : state) {
benchmark::DoNotOptimize(shape);
auto repr = Repr(shape);
benchmark::DoNotOptimize(repr);
}
}
BENCHMARK(BM_JaggedShape_Repr<JaggedArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
BENCHMARK(BM_JaggedShape_Repr<JaggedDenseArrayShapeHelper>)
->ArgPair(1, 1)
->ArgPair(100, 1)
->ArgPair(1, 100)
->ArgPair(4, 100);
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/dense_array/jagged_shape.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/jagged_shape/jagged_shape_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
49ddf85c-a7a2-47ba-973b-ea18e73dc05d | cpp | tensorflow/tensorflow | get_options_op | tensorflow/core/kernels/data/get_options_op.cc | tensorflow/core/kernels/data/get_options_op_test.cc | #include "tensorflow/core/kernels/data/get_options_op.h"
#include "absl/memory/memory.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
void GetOptionsOp::Compute(OpKernelContext* ctx) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
if (ctx->status().ok()) {
Tensor* string_handle_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({}), &string_handle_t));
string_handle_t->scalar<tstring>()() = input->options().SerializeAsString();
}
}
string GetOptionsOp::TraceString(const OpKernelContext& ctx,
bool verbose) const {
return tsl::profiler::TraceMeOp(name_view(), type_string_view());
}
namespace {
REGISTER_KERNEL_BUILDER(Name("GetOptions").Device(DEVICE_CPU).Priority(2),
GetOptionsOp);
REGISTER_KERNEL_BUILDER(Name("GetOptions")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("serialized_options")
.Priority(1),
GetOptionsOp);
}
}
} | #include "tensorflow/core/kernels/data/get_options_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptions[] = R"proto(
deterministic: true
slack: true
optimization_options { apply_default_optimizations: true autotune: true }
distribute_options {}
)proto";
class GetOptionsParams : public DatasetParams {
public:
template <typename T>
GetOptionsParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(OptionsDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
return absl::OkStatus();
}
string dataset_type() const override { return "GetOptions"; }
string op_name() const override { return dataset_type(); }
private:
string serialized_options_;
};
class GetOptionsOpTest : public DatasetOpsTestBase {};
OptionsDatasetParams OptionsDatasetParams0() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
GetOptionsParams GetOptionsParams0() {
return GetOptionsParams(OptionsDatasetParams0(),
{DT_INT64},
{PartialTensorShape({})},
"get_options_0");
}
TEST_F(GetOptionsOpTest, Compute) {
auto test_case_params = GetOptionsParams0();
TF_ASSERT_OK(InitializeRuntime(test_case_params));
std::vector<Tensor> output;
TF_ASSERT_OK(RunDatasetOp(test_case_params, &output));
EXPECT_EQ(1, output.size());
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
Tensor expected_tensor =
CreateTensor<tstring>(TensorShape({}), {options.SerializeAsString()});
Tensor result_tensor = output[0];
string serialized_options = result_tensor.scalar<tstring>()();
Options result_options;
result_options.ParseFromString(serialized_options);
TF_EXPECT_OK(ExpectEqual(expected_tensor, result_tensor));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/get_options_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/get_options_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c94ebb6-18e0-4adb-b0a7-957d43bac92d | cpp | tensorflow/tensorflow | import_tensorflow | tensorflow/lite/toco/import_tensorflow.cc | tensorflow/lite/toco/import_tensorflow_test.cc | #include "tensorflow/lite/toco/import_tensorflow.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/map.h"
#include "google/protobuf/text_format.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/model_flags.pb.h"
#include "tensorflow/lite/toco/tensorflow_graph_matching/resolve_cluster.h"
#include "tensorflow/lite/toco/tensorflow_util.h"
#include "tensorflow/lite/toco/tooling_util.h"
using tensorflow::AttrValue;
using tensorflow::DT_BOOL;
using tensorflow::DT_COMPLEX64;
using tensorflow::DT_FLOAT;
using tensorflow::DT_INT16;
using tensorflow::DT_INT32;
using tensorflow::DT_INT64;
using tensorflow::DT_QUINT8;
using tensorflow::DT_STRING;
using tensorflow::DT_UINT16;
using tensorflow::DT_UINT32;
using tensorflow::DT_UINT8;
using tensorflow::GraphDef;
using tensorflow::NodeDef;
using tensorflow::TensorProto;
using tensorflow::TensorShapeProto;
namespace toco {
namespace {
bool HasAttr(const NodeDef& node, const std::string& attr_name) {
return node.attr().count(attr_name) > 0;
}
bool HasWildcardDimension(const TensorShapeProto& shape) {
for (const auto& dim : shape.dim()) {
if (dim.size() == -1) return true;
}
return false;
}
const std::string& GetStringAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kS);
return attr.s();
}
int64_t GetIntAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name)) << attr_name << " not found in:\n"
<< node.DebugString();
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kI);
return attr.i();
}
float GetFloatAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kF);
return attr.f();
}
bool GetBoolAttr(const NodeDef& node, const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kB);
return attr.b();
}
tensorflow::DataType GetDataTypeAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kType);
return attr.type();
}
const TensorShapeProto& GetShapeAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kShape);
return attr.shape();
}
const TensorProto& GetTensorAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name)) << "No attr named '" << attr_name << "'";
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kTensor);
return attr.tensor();
}
const AttrValue::ListValue& GetListAttr(const NodeDef& node,
const std::string& attr_name) {
CHECK(HasAttr(node, attr_name));
const auto& attr = node.attr().at(attr_name);
CHECK_EQ(attr.value_case(), AttrValue::kList);
return attr.list();
}
tensorflow::Status CheckOptionalAttr(const NodeDef& node,
const std::string& attr_name,
const std::string& expected_value) {
if (HasAttr(node, attr_name)) {
const std::string& value = GetStringAttr(node, attr_name);
if (value != expected_value) {
return tensorflow::errors::InvalidArgument(
"Unexpected value for attribute '" + attr_name + "'. Expected '" +
expected_value + "'");
}
}
return absl::OkStatus();
}
tensorflow::Status CheckOptionalAttr(
const NodeDef& node, const std::string& attr_name,
const tensorflow::DataType& expected_value) {
if (HasAttr(node, attr_name)) {
const tensorflow::DataType& value = GetDataTypeAttr(node, attr_name);
if (value != expected_value) {
return tensorflow::errors::InvalidArgument(
"Unexpected value for attribute '" + attr_name + "'. Expected '" +
tensorflow::DataType_Name(expected_value) + "'");
}
}
return absl::OkStatus();
}
template <typename T1, typename T2>
tensorflow::Status ExpectValue(const T1& v1, const T2& v2,
const std::string& description) {
if (v1 == v2) return absl::OkStatus();
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Unexpected ", description, ": got ", v1, ", expected ", v2));
}
ArrayDataType ConvertDataType(tensorflow::DataType dtype) {
if (dtype == DT_UINT8)
return ArrayDataType::kUint8;
else if (dtype == DT_FLOAT)
return ArrayDataType::kFloat;
else if (dtype == DT_BOOL)
return ArrayDataType::kBool;
else if (dtype == DT_INT16)
return ArrayDataType::kInt16;
else if (dtype == DT_UINT16)
return ArrayDataType::kUint16;
else if (dtype == DT_INT32)
return ArrayDataType::kInt32;
else if (dtype == DT_UINT32)
return ArrayDataType::kUint32;
else if (dtype == DT_INT64)
return ArrayDataType::kInt64;
else if (dtype == DT_STRING)
return ArrayDataType::kString;
else if (dtype == DT_COMPLEX64)
return ArrayDataType::kComplex64;
else
LOG(INFO) << "Unsupported data type in placeholder op: " << dtype;
return ArrayDataType::kNone;
}
tensorflow::Status ImportShape(
const TFLITE_PROTO_NS::RepeatedPtrField<tensorflow::TensorShapeProto_Dim>&
input_dims,
int* input_flat_size, Shape* shape) {
std::vector<int> input_dims_only_sizes;
bool zero_sized_shape = false;
for (auto& d : input_dims) {
if (d.size() > std::numeric_limits<int>::max()) {
return tensorflow::errors::InvalidArgument("Shape element overflows");
}
if (d.size() == 0) {
zero_sized_shape = true;
}
input_dims_only_sizes.push_back(d.size());
}
if (zero_sized_shape) {
shape->mutable_dims()->clear();
if (input_flat_size != nullptr) *input_flat_size = 0;
return absl::OkStatus();
}
*shape->mutable_dims() = input_dims_only_sizes;
if (input_flat_size == nullptr) return absl::OkStatus();
return NumElements(input_dims_only_sizes, input_flat_size);
}
template <typename T>
struct TensorTraits;
template <>
struct TensorTraits<float> {
static int size(const TensorProto& p) { return p.float_val_size(); }
static float get(const TensorProto& p, int i) { return p.float_val(i); }
static std::string accessor_name() { return "float_val"; }
static std::string type_name() { return "float"; }
static void CopyFromContent(const TensorProto& p, std::vector<float>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<uint8_t> {
static int size(const TensorProto& p) { return p.int_val_size(); }
static uint8_t get(const TensorProto& p, int i) { return p.int_val(i); }
static std::string accessor_name() { return "int_val"; }
static std::string type_name() { return "uint8"; }
static void CopyFromContent(const TensorProto& p,
std::vector<uint8_t>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<std::complex<float>> {
static int size(const TensorProto& p) { return p.scomplex_val_size() / 2; }
static std::complex<float> get(const TensorProto& p, int i) {
return std::complex<float>(p.scomplex_val(2 * i),
p.scomplex_val(2 * i + 1));
}
static std::string accessor_name() { return "scomplex_val"; }
static std::string type_name() { return "complex64"; }
static void CopyFromContent(const TensorProto& p,
std::vector<std::complex<float>>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<int32> {
static int size(const TensorProto& p) { return p.int_val_size(); }
static int32 get(const TensorProto& p, int i) { return p.int_val(i); }
static std::string accessor_name() { return "int_val"; }
static std::string type_name() { return "int32"; }
static void CopyFromContent(const TensorProto& p, std::vector<int32>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<uint32> {
static int size(const TensorProto& p) { return p.uint32_val_size(); }
static int32 get(const TensorProto& p, int i) { return p.uint32_val(i); }
static std::string accessor_name() { return "uint32_val"; }
static std::string type_name() { return "uint32"; }
static void CopyFromContent(const TensorProto& p, std::vector<uint32>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<int64_t> {
static int size(const TensorProto& p) { return p.int64_val_size(); }
static int64_t get(const TensorProto& p, int i) { return p.int64_val(i); }
static std::string accessor_name() { return "int64_val"; }
static std::string type_name() { return "int64"; }
static void CopyFromContent(const TensorProto& p,
std::vector<int64_t>* data) {
toco::port::CopyToBuffer(p.tensor_content(),
reinterpret_cast<char*>(data->data()));
}
};
template <>
struct TensorTraits<bool> {
static int size(const TensorProto& p) { return p.bool_val_size(); }
static bool get(const TensorProto& p, int i) { return p.bool_val(i); }
static std::string accessor_name() { return "bool_val"; }
static std::string type_name() { return "bool"; }
static void CopyFromContent(const TensorProto& p, std::vector<bool>* data) {
std::vector<char> buf(p.tensor_content().size());
toco::port::CopyToBuffer(p.tensor_content(), buf.data());
for (int i = 0; i < p.tensor_content().size(); i++) {
(*data)[i] = static_cast<bool>(buf[i]);
}
}
};
template <typename T>
tensorflow::Status ImportTensorData(const TensorProto& input_tensor,
int input_flat_size,
std::vector<T>* output_data) {
CHECK_GE(output_data->size(), input_flat_size);
int num_elements_in_tensor = TensorTraits<T>::size(input_tensor);
if (num_elements_in_tensor == input_flat_size) {
for (int i = 0; i < num_elements_in_tensor; i++) {
(*output_data)[i] = TensorTraits<T>::get(input_tensor, i);
}
} else if (input_tensor.tensor_content().size() ==
input_flat_size * sizeof(T)) {
TensorTraits<T>::CopyFromContent(input_tensor, output_data);
} else if (num_elements_in_tensor >= 0 &&
num_elements_in_tensor < input_flat_size) {
int i = 0;
for (; i < num_elements_in_tensor; ++i) {
(*output_data)[i] = TensorTraits<T>::get(input_tensor, i);
}
auto last = i == 0 ? T(0) : (*output_data)[i - 1];
for (; i < input_flat_size; ++i) {
(*output_data)[i] = last;
}
} else {
std::string accessor_name = TensorTraits<T>::accessor_name();
std::string type_name = TensorTraits<T>::type_name();
return tensorflow::errors::InvalidArgument(
absl::StrCat("Neither input_content (",
input_tensor.tensor_content().size() / sizeof(T), ") nor ",
accessor_name, " (", num_elements_in_tensor,
") have the right dimensions (", input_flat_size,
") for this ", type_name, " tensor"));
}
return absl::OkStatus();
}
tensorflow::Status ImportFloatArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_FLOAT);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_float_data =
output_array->GetMutableBuffer<ArrayDataType::kFloat>().data;
output_float_data.resize(RequiredBufferSizeForShape(output_array->shape()),
0.f);
return ImportTensorData<float>(input_tensor, input_flat_size,
&output_float_data);
}
tensorflow::Status ImportComplex64Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_COMPLEX64);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 4);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_complex_data =
output_array->GetMutableBuffer<ArrayDataType::kComplex64>().data;
output_complex_data.resize(RequiredBufferSizeForShape(output_array->shape()),
std::complex<float>(0.f, 0.f));
return ImportTensorData<std::complex<float>>(input_tensor, input_flat_size,
&output_complex_data);
}
tensorflow::Status ImportQuint8Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_QUINT8);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kUint8>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<uint8_t>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportInt32Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_INT32);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kInt32>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<int32>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportUint32Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_UINT32);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kUint32>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<uint32>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportInt64Array(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_INT64);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_int_data =
output_array->GetMutableBuffer<ArrayDataType::kInt64>().data;
output_int_data.resize(RequiredBufferSizeForShape(output_array->shape()), 0);
return ImportTensorData<int64_t>(input_tensor, input_flat_size,
&output_int_data);
}
tensorflow::Status ImportBoolArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_BOOL);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
auto& output_bool_data =
output_array->GetMutableBuffer<ArrayDataType::kBool>().data;
output_bool_data.resize(RequiredBufferSizeForShape(output_array->shape()),
false);
status =
ImportTensorData<bool>(input_tensor, input_flat_size, &output_bool_data);
if (!status.ok() && output_bool_data.size() == 1) {
output_bool_data[0] = false;
return absl::OkStatus();
}
return status;
}
tensorflow::Status ImportStringArray(const TensorProto& input_tensor,
Array* output_array) {
CHECK_EQ(input_tensor.dtype(), DT_STRING);
const auto& input_shape = input_tensor.tensor_shape();
CHECK_LE(input_shape.dim_size(), 6);
int input_flat_size;
auto status = ImportShape(input_shape.dim(), &input_flat_size,
output_array->mutable_shape());
if (!status.ok()) return status;
if (input_flat_size != input_tensor.string_val_size()) {
return tensorflow::errors::InvalidArgument(
"Input_content string_val doesn't have the right dimensions "
"for this string tensor");
}
auto& output_string_data =
output_array->GetMutableBuffer<ArrayDataType::kString>().data;
output_string_data.resize(RequiredBufferSizeForShape(output_array->shape()));
CHECK_GE(output_string_data.size(), input_flat_size);
for (int i = 0; i < input_flat_size; ++i) {
output_string_data[i] = input_tensor.string_val(i);
}
return absl::OkStatus();
}
int GetInputsCount(const NodeDef& node,
const TensorFlowImportFlags& tf_import_flags) {
if (tf_import_flags.drop_control_dependency) {
for (size_t i = 0; i < node.input_size(); ++i) {
if (node.input(i)[0] == '^') {
return i;
}
}
}
return node.input_size();
}
tensorflow::Status CheckInputsCount(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
int expected_input_count) {
if (GetInputsCount(node, tf_import_flags) != expected_input_count) {
return tensorflow::errors::FailedPrecondition(
node.op(), " node expects ", expected_input_count,
" input(s) other than control dependencies: ", node.DebugString());
}
return absl::OkStatus();
}
template <ArrayDataType T>
std::string CreateConstArray(
Model* model, std::string const& name,
std::vector<typename toco::DataType<T>> const& data) {
std::string array_name = toco::AvailableArrayName(*model, name);
auto& array = model->GetOrCreateArray(array_name);
array.data_type = T;
array.mutable_shape()->mutable_dims()->emplace_back(
static_cast<int>(data.size()));
array.GetMutableBuffer<T>().data = data;
return array_name;
}
void RetainTensorFlowNodeDef(const NodeDef& node, Operator* op) {
node.SerializeToString(&op->tensorflow_node_def);
}
void GetOutputNamesFromNodeDef(const NodeDef& node,
const tensorflow::OpDef& op_def,
TensorFlowUnsupportedOperator* op) {
int next_output = 0;
auto add_output = [&node, &next_output, op]() {
if (next_output == 0) {
op->outputs.push_back(node.name());
} else {
op->outputs.push_back(absl::StrCat(node.name(), ":", next_output));
}
++next_output;
};
for (int i = 0; i < op_def.output_arg_size(); ++i) {
std::string multiples = op_def.output_arg(i).number_attr();
if (!multiples.empty()) {
CHECK(HasAttr(node, multiples)) << "No attr named " << multiples;
int num_outputs = GetIntAttr(node, multiples);
for (int j = 0; j < num_outputs; ++j) {
add_output();
}
} else {
std::string list = op_def.output_arg(i).type_list_attr();
if (!list.empty()) {
CHECK(HasAttr(node, list)) << "No attr named " << list;
const AttrValue::ListValue& list_value = GetListAttr(node, list);
for (int j = 0; j < list_value.type_size(); ++j) {
add_output();
}
} else {
add_output();
}
}
}
}
void GetOutputTypesFromNodeDef(const NodeDef& node,
const tensorflow::OpDef& op_def,
TensorFlowUnsupportedOperator* op) {
auto add_type = [&node, op](tensorflow::DataType type) {
if (type == tensorflow::DT_INVALID) {
LOG(WARNING) << "Op node missing output type attribute: " << node.name();
op->output_data_types.clear();
} else {
op->output_data_types.push_back(ConvertDataType(type));
}
};
auto get_type = [&node](const tensorflow::OpDef::ArgDef& a) {
if (a.type() != tensorflow::DT_INVALID) {
return a.type();
} else if (HasAttr(node, a.type_attr())) {
return GetDataTypeAttr(node, a.type_attr());
} else {
return tensorflow::DT_INVALID;
}
};
for (int i = 0; i < op_def.output_arg_size(); ++i) {
std::string multiples = op_def.output_arg(i).number_attr();
if (!multiples.empty()) {
CHECK(HasAttr(node, multiples)) << "No attr named " << multiples;
int num_outputs = GetIntAttr(node, multiples);
auto type = get_type(op_def.output_arg(i));
for (int j = 0; j < num_outputs; ++j) {
add_type(type);
}
} else {
std::string list = op_def.output_arg(i).type_list_attr();
if (!list.empty()) {
CHECK(HasAttr(node, list)) << "No attr named " << list;
const AttrValue::ListValue& list_value = GetListAttr(node, list);
for (int j = 0; j < list_value.type_size(); ++j) {
add_type(list_value.type(j));
}
} else {
add_type(get_type(op_def.output_arg(i)));
}
}
}
}
tensorflow::Status ConvertUnsupportedOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
static constexpr char kAttrOutputQuantized[] = "_output_quantized";
static constexpr char kAttrOutputTypes[] = "_output_types";
static constexpr char kAttrOutputShapes[] = "_output_shapes";
static constexpr char kAttrSupportOutputTypeFloatInQuantizedOp[] =
"_support_output_type_float_in_quantized_op";
LOG(INFO) << "Converting unsupported operation: " << node.op();
auto* op = new TensorFlowUnsupportedOperator;
op->tensorflow_op = node.op();
RetainTensorFlowNodeDef(node, op);
model->operators.emplace_back(op);
const int num_inputs = GetInputsCount(node, tf_import_flags);
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
const tensorflow::OpDef* op_def = nullptr;
if (tensorflow::OpRegistry::Global()->LookUpOpDef(node.op(), &op_def).ok()) {
GetOutputNamesFromNodeDef(node, *op_def, op);
} else {
op->outputs.push_back(node.name());
}
if (HasAttr(node, kAttrOutputQuantized)) {
op->quantized = GetBoolAttr(node, kAttrOutputQuantized);
}
if (HasAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp)) {
op->support_output_type_float_in_quantized_op =
GetBoolAttr(node, kAttrSupportOutputTypeFloatInQuantizedOp);
}
if (HasAttr(node, kAttrOutputTypes)) {
const auto& output_types = GetListAttr(node, kAttrOutputTypes);
for (int i = 0; i < output_types.type_size(); ++i) {
op->output_data_types.push_back(ConvertDataType(output_types.type(i)));
}
} else if (HasAttr(node, "Tout")) {
const auto& output_type = GetDataTypeAttr(node, "Tout");
op->output_data_types.push_back(ConvertDataType(output_type));
} else if (op_def != nullptr) {
GetOutputTypesFromNodeDef(node, *op_def, op);
} else {
LOG(INFO) << "Unable to determine output type for op: " << node.op();
}
if (HasAttr(node, kAttrOutputShapes)) {
const auto& output_shapes = GetListAttr(node, kAttrOutputShapes);
Shape output_shape;
for (int i = 0; i < output_shapes.shape_size(); ++i) {
const auto& shape = output_shapes.shape(i);
if (HasWildcardDimension(shape)) {
LOG(INFO) << "Skipping wildcard output shape(s) for node: "
<< node.name();
op->output_shapes.clear();
break;
}
const auto status =
ImportShape(shape.dim(), nullptr, &output_shape);
if (!status.ok()) {
return status;
}
op->output_shapes.push_back(output_shape);
}
}
return absl::OkStatus();
}
tensorflow::Status ConvertConstOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Const");
const auto& tensor = GetTensorAttr(node, "value");
const auto dtype = GetDataTypeAttr(node, "dtype");
tensorflow::Status status = absl::OkStatus();
auto& array = model->GetOrCreateArray(node.name());
switch (dtype) {
case DT_FLOAT:
array.data_type = ArrayDataType::kFloat;
status = ImportFloatArray(tensor, &array);
break;
case DT_INT32:
array.data_type = ArrayDataType::kInt32;
status = ImportInt32Array(tensor, &array);
break;
case DT_UINT32:
array.data_type = ArrayDataType::kUint32;
status = ImportUint32Array(tensor, &array);
break;
case DT_QUINT8:
array.data_type = ArrayDataType::kUint8;
status = ImportQuint8Array(tensor, &array);
break;
case DT_INT64:
array.data_type = ArrayDataType::kInt64;
status = ImportInt64Array(tensor, &array);
break;
case DT_STRING:
array.data_type = ArrayDataType::kString;
status = ImportStringArray(tensor, &array);
break;
case DT_BOOL:
array.data_type = ArrayDataType::kBool;
status = ImportBoolArray(tensor, &array);
break;
case DT_COMPLEX64:
array.data_type = ArrayDataType::kComplex64;
status = ImportComplex64Array(tensor, &array);
break;
default:
array.data_type = ArrayDataType::kNone;
array.GetMutableBuffer<ArrayDataType::kNone>();
break;
}
TF_RETURN_WITH_CONTEXT_IF_ERROR(
status, " (while processing node '" + node.name() + "')");
return absl::OkStatus();
}
tensorflow::Status ConvertConvOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Conv2D");
TF_RETURN_IF_ERROR(CheckInputsCount(node, tf_import_flags, 2));
TF_RETURN_IF_ERROR(CheckOptionalAttr(node, "data_format", "NHWC"));
TF_RETURN_IF_ERROR(CheckOptionalAttr(node, "T", DT_FLOAT));
const auto& input_name = node.input(0);
const auto& weights_name = node.input(1);
const auto& reordered_weights_name =
AvailableArrayName(*model, weights_name + "_reordered");
const Operator* existing_reorder =
GetOpWithOutput(*model, reordered_weights_name);
if (existing_reorder) {
CHECK(existing_reorder->type == OperatorType::kReorderAxes);
} else {
auto* reorder = new ReorderAxesOperator;
reorder->inputs = {weights_name};
reorder->outputs = {reordered_weights_name};
reorder->input_axes_order = AxesOrder::kHWIO;
reorder->output_axes_order = AxesOrder::kOHWI;
model->operators.emplace_back(reorder);
}
if (!HasAttr(node, "strides")) {
return tensorflow::errors::InvalidArgument("Missing attribute 'strides'");
}
const auto& strides = GetListAttr(node, "strides");
TF_RETURN_IF_ERROR(ExpectValue(strides.i_size(), 4, "number of strides"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(0), 1, "strides(0)"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(3), 1, "strides(3)"));
int dilation_height_factor;
int dilation_width_factor;
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
TF_RETURN_IF_ERROR(
ExpectValue(dilations.i_size(), 4, "number of dilations"));
if (dilations.i(0) != 1 || dilations.i(3) != 1) {
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Can only import Conv ops with dilation along the height "
"(1st) or width (2nd) axis. TensorFlow op \"",
node.name(), "\" had dilations:[ ", dilations.i(0), ", ",
dilations.i(1), ", ", dilations.i(2), ", ", dilations.i(3), "]."));
}
dilation_height_factor = dilations.i(1);
dilation_width_factor = dilations.i(2);
} else {
dilation_height_factor = 1;
dilation_width_factor = 1;
}
const auto& padding = GetStringAttr(node, "padding");
PaddingType padding_type;
if (padding == "SAME") {
padding_type = PaddingType::kSame;
} else if (padding == "VALID") {
padding_type = PaddingType::kValid;
} else {
return tensorflow::errors::InvalidArgument(
"Bad padding (only SAME and VALID are supported)");
}
auto* conv = new ConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
conv->dilation_height_factor = dilation_height_factor;
conv->dilation_width_factor = dilation_width_factor;
conv->padding.type = padding_type;
model->operators.emplace_back(conv);
return absl::OkStatus();
}
tensorflow::Status ConvertDepthwiseConvOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "DepthwiseConv2dNative");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
if (HasAttr(node, "data_format")) {
CHECK_EQ(GetStringAttr(node, "data_format"), "NHWC");
}
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
const auto& input_name = node.input(0);
const auto& weights_name = node.input(1);
const auto& reordered_weights_name = weights_name + "_reordered";
const Operator* existing_reorder =
GetOpWithOutput(*model, reordered_weights_name);
if (existing_reorder) {
CHECK(existing_reorder->type == OperatorType::kReorderAxes);
} else {
auto* reorder = new ReorderAxesOperator;
reorder->inputs = {weights_name};
reorder->outputs = {reordered_weights_name};
reorder->input_axes_order = AxesOrder::kHWIM;
reorder->output_axes_order = AxesOrder::k1HWO;
model->operators.emplace_back(reorder);
}
const auto& strides = GetListAttr(node, "strides");
TF_RETURN_IF_ERROR(ExpectValue(strides.i_size(), 4, "number of strides"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(0), 1, "strides(0)"));
TF_RETURN_IF_ERROR(ExpectValue(strides.i(3), 1, "strides(3)"));
int dilation_height_factor;
int dilation_width_factor;
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
TF_RETURN_IF_ERROR(
ExpectValue(dilations.i_size(), 4, "number of dilations"));
if (dilations.i(0) != 1 || dilations.i(3) != 1) {
return tensorflow::errors::InvalidArgument(absl::StrCat(
"Can only import Conv ops with dilation along the height "
"(1st) or width (2nd) axis. TensorFlow op \"",
node.name(), "\" had dilations:[ ", dilations.i(0), ", ",
dilations.i(1), ", ", dilations.i(2), ", ", dilations.i(3), "]."));
}
dilation_height_factor = dilations.i(1);
dilation_width_factor = dilations.i(2);
} else {
dilation_height_factor = 1;
dilation_width_factor = 1;
}
const auto& padding = GetStringAttr(node, "padding");
PaddingType padding_type;
if (padding == "SAME") {
padding_type = PaddingType::kSame;
} else if (padding == "VALID") {
padding_type = PaddingType::kValid;
} else {
return tensorflow::errors::InvalidArgument(
"Bad padding (only SAME and VALID are supported)");
}
auto* conv = new DepthwiseConvOperator;
conv->inputs = {input_name, reordered_weights_name};
conv->outputs = {node.name()};
conv->stride_height = strides.i(1);
conv->stride_width = strides.i(2);
conv->dilation_height_factor = dilation_height_factor;
conv->dilation_width_factor = dilation_width_factor;
conv->padding.type = padding_type;
model->operators.emplace_back(conv);
return absl::OkStatus();
}
tensorflow::Status ConvertDepthToSpaceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "DepthToSpace");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
tensorflow::DataType dtype = GetDataTypeAttr(node, "T");
if (dtype != DT_FLOAT && dtype != DT_UINT8 && dtype != DT_INT32 &&
dtype != DT_INT64) {
const auto* enum_descriptor = tensorflow::DataType_descriptor();
LOG(FATAL) << "TFLite does not support DepthToSpace with type T:"
<< enum_descriptor->FindValueByNumber(dtype)->name() << ". "
<< "T must be one of {DT_FLOAT, DT_UINT8, DT_INT32, DT_INT64}.";
}
auto* op = new DepthToSpaceOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
op->block_size = GetIntAttr(node, "block_size");
QCHECK_GE(op->block_size, 2);
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSpaceToDepthOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SpaceToDepth");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
tensorflow::DataType dtype = GetDataTypeAttr(node, "T");
if (dtype != DT_FLOAT && dtype != DT_UINT8 && dtype != DT_INT32 &&
dtype != DT_INT64) {
const auto* enum_descriptor = tensorflow::DataType_descriptor();
LOG(FATAL) << "TFLite does not support SpaceToDepth with type T:"
<< enum_descriptor->FindValueByNumber(dtype)->name() << ". "
<< "T must be one of {DT_FLOAT, DT_UINT8, DT_INT32, DT_INT64}.";
}
auto* op = new SpaceToDepthOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
op->block_size = GetIntAttr(node, "block_size");
QCHECK_GE(op->block_size, 2);
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertBiasAddOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "BiasAdd");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
const auto& input_name = node.input(0);
const auto& bias_name = node.input(1);
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
auto* biasadd = new AddOperator;
biasadd->inputs.push_back(input_name);
biasadd->inputs.push_back(bias_name);
biasadd->outputs.push_back(node.name());
model->operators.emplace_back(biasadd);
return absl::OkStatus();
}
tensorflow::Status ConvertRandomUniform(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "RandomUniform");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_INT32);
auto op = std::make_unique<RandomUniformOperator>();
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
op->dtype = ConvertDataType(GetDataTypeAttr(node, "dtype"));
op->seed = GetIntAttr(node, "seed");
op->seed2 = GetIntAttr(node, "seed2");
CHECK(model != nullptr);
model->operators.emplace_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertIdentityOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "Identity" || node.op() == "CheckNumerics" ||
node.op() == "PlaceholderWithDefault" || node.op() == "StopGradient" ||
node.op() == "Snapshot" || node.op() == "EnsureShape");
auto* op = new TensorFlowIdentityOperator;
QCHECK_GE(node.input_size(), 1)
<< node.op()
<< " node expects at least 1 input other than control dependencies: "
<< node.DebugString();
const auto& input_name = node.input(0);
op->inputs.push_back(input_name);
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertIdentityNOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "IdentityN");
for (int i = 0; i < node.input_size(); ++i) {
auto* op = new TensorFlowIdentityOperator;
const auto& input_name = node.input(i);
std::string output_name = node.name();
if (i > 0) {
output_name = output_name + ":" + std::to_string(i);
}
op->inputs.push_back(input_name);
op->outputs.push_back(output_name);
model->operators.emplace_back(op);
}
return absl::OkStatus();
}
tensorflow::Status ConvertFakeQuantWithMinMaxArgs(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "FakeQuantWithMinMaxArgs");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
auto* op = new FakeQuantOperator;
op->inputs.push_back(node.input(0));
op->minmax = std::make_unique<MinMax>();
auto& minmax = *op->minmax;
minmax.min = GetFloatAttr(node, "min");
minmax.max = GetFloatAttr(node, "max");
op->outputs.push_back(node.name());
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
if (HasAttr(node, "narrow_range")) {
op->narrow_range = GetBoolAttr(node, "narrow_range");
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertFakeQuantWithMinMaxVars(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "FakeQuantWithMinMaxVars");
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK(num_inputs == 3 || num_inputs == 4)
<< "FakeQuantWithMinMaxVars node expects 3 or 4 inputs other than "
"control dependencies: "
<< node.DebugString();
auto* op = new FakeQuantOperator;
for (int i = 0; i < 3; i++) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
op->num_bits = HasAttr(node, "num_bits") ? GetIntAttr(node, "num_bits") : 8;
if (HasAttr(node, "narrow_range")) {
op->narrow_range = GetBoolAttr(node, "narrow_range");
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSqueezeOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Squeeze");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
auto* op = new SqueezeOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
if (HasAttr(node, "squeeze_dims")) {
const auto& squeeze_dims = GetListAttr(node, "squeeze_dims");
for (int i = 0; i < squeeze_dims.i_size(); ++i) {
op->squeeze_dims.push_back(squeeze_dims.i(i));
}
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSplitOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Split");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new TensorFlowSplitOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
const int num_split = GetIntAttr(node, "num_split");
op->outputs.push_back(node.name());
for (int i = 1; i < num_split; i++) {
op->outputs.push_back(absl::StrCat(node.name(), ":", i));
}
op->num_split = num_split;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSplitVOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SplitV");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
auto* op = new TensorFlowSplitVOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
const int num_split = GetIntAttr(node, "num_split");
op->outputs.push_back(node.name());
for (int i = 1; i < num_split; i++) {
op->outputs.push_back(absl::StrCat(node.name(), ":", i));
}
op->num_split = num_split;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSwitchOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Switch");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new TensorFlowSwitchOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
op->outputs.push_back(node.name() + ":1");
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSoftmaxOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Softmax");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
auto* softmax = new SoftmaxOperator;
softmax->inputs.push_back(input_name);
softmax->outputs.push_back(node.name());
CHECK(!node.attr().count("beta"));
if (node.attr().count("_softmax_beta")) {
softmax->beta = GetFloatAttr(node, "_softmax_beta");
} else {
softmax->beta = 1.f;
}
model->operators.emplace_back(softmax);
return absl::OkStatus();
}
tensorflow::Status ConvertLRNOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "LRN");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
auto* lrn = new LocalResponseNormalizationOperator;
lrn->inputs.push_back(input_name);
lrn->outputs.push_back(node.name());
lrn->range = GetIntAttr(node, "depth_radius");
lrn->bias = GetFloatAttr(node, "bias");
lrn->alpha = GetFloatAttr(node, "alpha");
lrn->beta = GetFloatAttr(node, "beta");
model->operators.emplace_back(lrn);
return absl::OkStatus();
}
tensorflow::Status ConvertMaxPoolOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "MaxPool");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
if (node.attr().count("data_format")) {
CHECK_EQ(GetStringAttr(node, "data_format"), "NHWC");
}
if (HasAttr(node, "T")) {
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
} else {
LOG(WARNING) << "Found MaxPool operator missing 'T' attribute";
}
auto* maxpool = new MaxPoolOperator;
maxpool->inputs.push_back(input_name);
maxpool->outputs.push_back(node.name());
const auto& strides = GetListAttr(node, "strides");
CHECK_EQ(strides.i_size(), 4);
CHECK_EQ(strides.i(0), 1);
CHECK_EQ(strides.i(3), 1);
maxpool->stride_height = strides.i(1);
maxpool->stride_width = strides.i(2);
const auto& ksize = GetListAttr(node, "ksize");
CHECK_EQ(ksize.i_size(), 4);
CHECK_EQ(ksize.i(0), 1);
CHECK_EQ(ksize.i(3), 1);
maxpool->kheight = ksize.i(1);
maxpool->kwidth = ksize.i(2);
const auto& padding = GetStringAttr(node, "padding");
if (padding == "SAME") {
maxpool->padding.type = PaddingType::kSame;
} else if (padding == "VALID") {
maxpool->padding.type = PaddingType::kValid;
} else {
LOG(FATAL) << "Bad padding (only SAME and VALID are supported)";
}
model->operators.emplace_back(maxpool);
return absl::OkStatus();
}
tensorflow::Status ConvertAvgPoolOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "AvgPool");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto& input_name = node.input(0);
if (node.attr().count("data_format")) {
CHECK_EQ(GetStringAttr(node, "data_format"), "NHWC");
}
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
auto* avgpool = new AveragePoolOperator;
avgpool->inputs.push_back(input_name);
avgpool->outputs.push_back(node.name());
const auto& strides = GetListAttr(node, "strides");
CHECK_EQ(strides.i_size(), 4);
CHECK_EQ(strides.i(0), 1);
CHECK_EQ(strides.i(3), 1);
avgpool->stride_height = strides.i(1);
avgpool->stride_width = strides.i(2);
const auto& ksize = GetListAttr(node, "ksize");
CHECK_EQ(ksize.i_size(), 4);
CHECK_EQ(ksize.i(0), 1);
CHECK_EQ(ksize.i(3), 1);
avgpool->kheight = ksize.i(1);
avgpool->kwidth = ksize.i(2);
const auto& padding = GetStringAttr(node, "padding");
if (padding == "SAME") {
avgpool->padding.type = PaddingType::kSame;
} else if (padding == "VALID") {
avgpool->padding.type = PaddingType::kValid;
} else {
LOG(FATAL) << "Bad padding (only SAME and VALID are supported)";
}
model->operators.emplace_back(avgpool);
return absl::OkStatus();
}
tensorflow::Status ConvertBatchMatMulOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* batch_matmul = new BatchMatMulOperator;
if (HasAttr(node, "adj_x")) {
batch_matmul->adj_x = GetBoolAttr(node, "adj_x");
}
if (HasAttr(node, "adj_y")) {
batch_matmul->adj_y = GetBoolAttr(node, "adj_y");
}
batch_matmul->inputs = {node.input(0), node.input(1)};
batch_matmul->outputs = {node.name()};
RetainTensorFlowNodeDef(node, batch_matmul);
model->operators.emplace_back(batch_matmul);
return absl::OkStatus();
}
tensorflow::Status ConvertMatMulOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
CHECK(!HasAttr(node, "adjoint_a") ||
(GetBoolAttr(node, "adjoint_a") == false));
CHECK(!HasAttr(node, "adjoint_b") ||
(GetBoolAttr(node, "adjoint_b") == false));
auto* matmul = new TensorFlowMatMulOperator;
if (HasAttr(node, "transpose_a")) {
matmul->transpose_a = GetBoolAttr(node, "transpose_a");
}
if (HasAttr(node, "transpose_b")) {
matmul->transpose_b = GetBoolAttr(node, "transpose_b");
}
matmul->inputs = {node.input(0), node.input(1)};
matmul->outputs = {node.name()};
model->operators.emplace_back(matmul);
return absl::OkStatus();
}
tensorflow::Status ConvertConcatOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
Operator* op = nullptr;
if (node.op() == "Concat") {
op = new TensorFlowConcatOperator;
} else if (node.op() == "ConcatV2") {
op = new TensorFlowConcatV2Operator;
} else {
LOG(FATAL) << "Expected Concat or ConcatV2";
}
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK_GE(num_inputs, 2)
<< node.op()
<< " node expects at least 2 inputs other than control dependencies: "
<< node.DebugString();
CHECK_EQ(num_inputs, 1 + GetIntAttr(node, "N"));
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertMirrorPadOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
if (node.op() != "MirrorPad") {
LOG(FATAL) << "Expected MirrorPad.";
}
const int num_inputs = GetInputsCount(node, tf_import_flags);
CHECK_EQ(num_inputs, 2);
auto* op = new MirrorPadOperator;
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
const auto mode = GetStringAttr(node, "mode");
if (mode == "REFLECT") {
op->mode = toco::MirrorPadMode::kReflect;
} else if (mode == "SYMMETRIC") {
op->mode = toco::MirrorPadMode::kSymmetric;
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
static constexpr int kAnyNumInputs = -1;
enum FlexSupport { kFlexOk, kFlexNotOk };
template <typename Op, int NumInputs, int NumOutputs, FlexSupport flex>
tensorflow::Status ConvertSimpleOperatorGeneric(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
if (NumInputs != kAnyNumInputs) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, NumInputs));
}
auto* op = new Op;
const int num_inputs = GetInputsCount(node, tf_import_flags);
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
if (NumOutputs > 1) {
for (int i = 1; i < NumOutputs; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i));
}
}
if (flex == kFlexOk) {
RetainTensorFlowNodeDef(node, op);
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
template <typename Op, int NumInputs, int NumOutputs>
tensorflow::Status ConvertSimpleOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
return ConvertSimpleOperatorGeneric<Op, NumInputs, NumOutputs, kFlexNotOk>(
node, tf_import_flags, model_flags, model);
}
template <typename Op, int NumInputs, int NumOutputs>
tensorflow::Status ConvertSimpleOperatorFlexOk(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
return ConvertSimpleOperatorGeneric<Op, NumInputs, NumOutputs, kFlexOk>(
node, tf_import_flags, model_flags, model);
}
tensorflow::Status ConditionallyConvertConstOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
const auto& tensor = GetTensorAttr(node, "value");
const auto& shape = tensor.tensor_shape();
for (const auto& dim : shape.dim()) {
if (dim.size() <= 0) {
return ConvertUnsupportedOperator(node, tf_import_flags, model_flags,
model);
}
}
switch (GetDataTypeAttr(node, "dtype")) {
case DT_FLOAT:
case DT_INT32:
case DT_QUINT8:
case DT_INT64:
case DT_STRING:
case DT_BOOL:
case DT_COMPLEX64:
return ConvertConstOperator(node, tf_import_flags, model_flags, model);
default:
return ConvertUnsupportedOperator(node, tf_import_flags, model_flags,
model);
}
}
tensorflow::Status ConvertStridedSliceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "StridedSlice");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 4));
auto* op = new StridedSliceOperator;
for (const auto& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name());
op->begin_mask =
HasAttr(node, "begin_mask") ? GetIntAttr(node, "begin_mask") : 0;
op->ellipsis_mask =
HasAttr(node, "ellipsis_mask") ? GetIntAttr(node, "ellipsis_mask") : 0;
op->end_mask = HasAttr(node, "end_mask") ? GetIntAttr(node, "end_mask") : 0;
op->new_axis_mask =
HasAttr(node, "new_axis_mask") ? GetIntAttr(node, "new_axis_mask") : 0;
op->shrink_axis_mask = HasAttr(node, "shrink_axis_mask")
? GetIntAttr(node, "shrink_axis_mask")
: 0;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertPlaceholderOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "Placeholder" || node.op() == "LegacyFedInput");
if (node.op() == "Placeholder") {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 0));
}
bool inside_input_arrays = false;
for (const auto& input_array : model_flags.input_arrays()) {
if (node.name() == input_array.name()) {
inside_input_arrays = true;
break;
}
}
if (!inside_input_arrays) {
model->AddInvalidInputArray(node.name());
}
auto& array = model->GetOrCreateArray(node.name());
if (node.attr().count("dtype")) {
array.data_type = ConvertDataType(GetDataTypeAttr(node, "dtype"));
}
if (node.attr().count("shape")) {
const auto& shape = GetShapeAttr(node, "shape");
auto num_dims = shape.dim_size();
if (num_dims > 0 && !HasWildcardDimension(shape)) {
auto& dst_array_dims = *array.mutable_shape()->mutable_dims();
dst_array_dims.resize(num_dims);
for (std::size_t i = 0; i < num_dims; i++) {
dst_array_dims[i] = shape.dim(i).size();
}
}
}
return absl::OkStatus();
}
tensorflow::Status ConvertNoOpOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
return absl::OkStatus();
}
tensorflow::Status ConvertCastOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Cast");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto tf_src_dtype = GetDataTypeAttr(node, "SrcT");
const auto tf_dst_dtype = GetDataTypeAttr(node, "DstT");
auto* op = new CastOperator;
op->src_data_type = ConvertDataType(tf_src_dtype);
op->dst_data_type = ConvertDataType(tf_dst_dtype);
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertFloorOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Floor");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto data_type = GetDataTypeAttr(node, "T");
CHECK(data_type == DT_FLOAT);
auto* op = new FloorOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertCeilOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Ceil");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto data_type = GetDataTypeAttr(node, "T");
CHECK(data_type == DT_FLOAT);
auto* op = new CeilOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertRoundOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Round");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto data_type = GetDataTypeAttr(node, "T");
CHECK(data_type == DT_FLOAT);
auto* op = new RoundOperator;
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertGatherOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "Gather" || node.op() == "GatherV2");
if (node.op() == "Gather")
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
if (node.op() == "GatherV2")
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
const auto indices_data_type = GetDataTypeAttr(node, "Tindices");
CHECK(indices_data_type == DT_INT32 || indices_data_type == DT_INT64);
auto* op = new GatherOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
if (node.input_size() >= 3) {
const auto axis_data_type = GetDataTypeAttr(node, "Taxis");
CHECK(axis_data_type == DT_INT32 || axis_data_type == DT_INT64);
op->inputs.push_back(node.input(2));
} else {
op->axis = {0};
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertGatherNdOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "GatherNd");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
const auto indices_data_type = GetDataTypeAttr(node, "Tindices");
CHECK(indices_data_type == DT_INT32 || indices_data_type == DT_INT64);
auto* op = new GatherNdOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
template <typename Op>
tensorflow::Status ConvertArgMinMaxOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
const auto axis_data_type =
HasAttr(node, "Tidx") ? GetDataTypeAttr(node, "Tidx") : DT_INT32;
const auto output_type = HasAttr(node, "output_type")
? GetDataTypeAttr(node, "output_type")
: DT_INT64;
CHECK(axis_data_type == DT_INT64 || axis_data_type == DT_INT32);
CHECK(output_type == DT_INT64 || output_type == DT_INT32);
auto* op = new Op;
op->output_data_type = ConvertDataType(output_type);
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertArgMaxOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ArgMax");
return ConvertArgMinMaxOperator<ArgMaxOperator>(node, tf_import_flags,
model_flags, model);
}
tensorflow::Status ConvertArgMinOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ArgMin");
return ConvertArgMinMaxOperator<ArgMinOperator>(node, tf_import_flags,
model_flags, model);
}
tensorflow::Status ConvertResizeBilinearOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ResizeBilinear");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new ResizeBilinearOperator;
op->align_corners = false;
op->half_pixel_centers = false;
if (HasAttr(node, "align_corners")) {
op->align_corners = GetBoolAttr(node, "align_corners");
}
if (HasAttr(node, "half_pixel_centers")) {
op->half_pixel_centers = GetBoolAttr(node, "half_pixel_centers");
}
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertResizeNearestNeighborOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ResizeNearestNeighbor");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new ResizeNearestNeighborOperator;
op->align_corners = false;
op->half_pixel_centers = false;
if (HasAttr(node, "align_corners")) {
op->align_corners = GetBoolAttr(node, "align_corners");
}
if (HasAttr(node, "half_pixel_centers")) {
op->half_pixel_centers = GetBoolAttr(node, "half_pixel_centers");
}
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertBatchNormWithGlobalNormalizationOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "BatchNormWithGlobalNormalization");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 5));
std::string multiplier = node.name() + "_mul";
if (GetBoolAttr(node, "scale_after_normalization")) {
std::string rsqrt = node.name() + "_rsqrt";
auto* rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs.push_back(node.input(2));
rsqrt_op->outputs.push_back(rsqrt);
model->operators.emplace_back(rsqrt_op);
auto* mul_op = new MulOperator;
mul_op->inputs.push_back(rsqrt);
mul_op->inputs.push_back(node.input(4));
mul_op->outputs.push_back(multiplier);
model->operators.emplace_back(mul_op);
} else {
auto* rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs.push_back(node.input(2));
rsqrt_op->outputs.push_back(multiplier);
model->operators.emplace_back(rsqrt_op);
}
auto* op = new BatchNormalizationOperator;
op->global_normalization = true;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(multiplier);
op->inputs.push_back(node.input(3));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertFusedBatchNormOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK((node.op() == "FusedBatchNorm") || (node.op() == "FusedBatchNormV3"));
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 5));
const std::string& gamma_input = node.input(1);
const std::string& beta_input = node.input(2);
const std::string& moving_mean_input = node.input(3);
const std::string& moving_variance_input = node.input(4);
const std::string epsilon_array_name =
CreateConstArray<ArrayDataType::kFloat>(model,
node.name() + "_epsilon_array",
{GetFloatAttr(node, "epsilon")});
const std::string epsilon_add_op_name = node.name() + "_epsilon";
auto* epsilon_add_op = new AddOperator;
epsilon_add_op->inputs.push_back(moving_variance_input);
epsilon_add_op->inputs.push_back(epsilon_array_name);
epsilon_add_op->outputs.push_back(epsilon_add_op_name);
model->operators.emplace_back(epsilon_add_op);
const std::string rsqrt_op_name = node.name() + "_rsqrt";
auto* rsqrt_op = new TensorFlowRsqrtOperator;
rsqrt_op->inputs.push_back(epsilon_add_op_name);
rsqrt_op->outputs.push_back(rsqrt_op_name);
model->operators.emplace_back(rsqrt_op);
const std::string multiplier = node.name() + "_mul";
auto* mul_op = new MulOperator;
mul_op->inputs.push_back(rsqrt_op_name);
mul_op->inputs.push_back(gamma_input);
mul_op->outputs.push_back(multiplier);
model->operators.emplace_back(mul_op);
auto* op = new BatchNormalizationOperator;
op->global_normalization = true;
op->inputs.push_back(node.input(0));
op->inputs.push_back(moving_mean_input);
op->inputs.push_back(multiplier);
op->inputs.push_back(beta_input);
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertSpaceToBatchNDOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SpaceToBatchND");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
CHECK_EQ(GetDataTypeAttr(node, "Tblock_shape"), DT_INT32);
CHECK_EQ(GetDataTypeAttr(node, "Tpaddings"), DT_INT32);
auto* op = new SpaceToBatchNDOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertBatchToSpaceNDOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "BatchToSpaceND");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
CHECK_EQ(GetDataTypeAttr(node, "Tblock_shape"), DT_INT32);
CHECK_EQ(GetDataTypeAttr(node, "Tcrops"), DT_INT32);
auto* op = new BatchToSpaceNDOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
template <typename T>
tensorflow::Status ConvertReduceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new T;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
if (HasAttr(node, "keepdims")) {
op->keep_dims = GetBoolAttr(node, "keepdims");
} else if (HasAttr(node, "keep_dims")) {
op->keep_dims = GetBoolAttr(node, "keep_dims");
}
return absl::OkStatus();
}
tensorflow::Status ConvertSvdfOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Svdf");
const int input_size = GetInputsCount(node, tf_import_flags);
QCHECK(input_size == 4 || input_size == 5)
<< "Svdf node expects 3 or 4 inputs other than control dependencies: "
<< node.DebugString();
bool has_bias = (input_size == 5);
auto* op = new SvdfOperator;
int index = 0;
op->inputs.push_back(node.input(index++));
op->inputs.push_back(node.input(index++));
op->inputs.push_back(node.input(index++));
if (has_bias) {
op->inputs.push_back(node.input(index++));
}
op->inputs.push_back(node.input(index));
op->outputs.push_back(node.name());
if (node.attr().at("ActivationFunction").s() == "Relu") {
op->fused_activation_function = FusedActivationFunctionType::kRelu;
} else {
op->fused_activation_function = FusedActivationFunctionType::kNone;
}
op->rank = node.attr().at("Rank").i();
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertTransposeConvOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Conv2DBackpropInput");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
auto* op = new TransposeConvOperator;
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
const auto& strides = GetListAttr(node, "strides");
op->stride_height = strides.i(1);
op->stride_width = strides.i(2);
CHECK_EQ(strides.i_size(), 4)
<< "Can only import TransposeConv ops with 4D strides. TensorFlow op \""
<< node.name() << "\" has " << strides.i_size() << "D strides.";
CHECK((strides.i(0) == 1) && (strides.i(3) == 1))
<< "Can only import TransposeConv ops with striding along the height "
"(1st) or width (2nd) axis. TensorFlow op \""
<< node.name() << "\" had strides:[ " << strides.i(0) << ", "
<< strides.i(1) << ", " << strides.i(2) << ", " << strides.i(3) << "].";
op->stride_height = strides.i(1);
op->stride_width = strides.i(2);
if (HasAttr(node, "dilations")) {
const auto& dilations = GetListAttr(node, "dilations");
CHECK_EQ(dilations.i_size(), 4)
<< "Dilation unsupported in TransposeConv. TensorFlow op \""
<< node.name() << "\" had dilations";
CHECK((dilations.i(0) == 1) && (dilations.i(1) == 1) &&
(dilations.i(2) == 1) && (dilations.i(3) == 1))
<< "Dilation unsupported in TransposeConv. TensorFlow op \""
<< node.name() << "\" had dilations:[ " << dilations.i(0) << ", "
<< dilations.i(1) << ", " << dilations.i(2) << ", " << dilations.i(3)
<< "].";
}
const std::string& weights_name = node.input(TransposeConvOperator::WEIGHTS);
const std::string& transposed_weights_name = weights_name + "_transposed";
const Operator* existing_transpose =
GetOpWithOutput(*model, transposed_weights_name);
if (existing_transpose) {
CHECK(existing_transpose->type == OperatorType::kTranspose);
} else {
TransposeOperator* transpose = new TransposeOperator;
std::string perm_array = CreateConstArray<ArrayDataType::kInt32>(
model, node.name() + "_transpose_perm", {2, 0, 1, 3});
transpose->inputs = {weights_name, perm_array};
transpose->outputs = {transposed_weights_name};
model->operators.emplace_back(transpose);
}
op->inputs[1] = transposed_weights_name;
auto const& padding = GetStringAttr(node, "padding");
if (padding == "SAME") {
op->padding.type = PaddingType::kSame;
} else if (padding == "VALID") {
op->padding.type = PaddingType::kValid;
} else {
LOG(FATAL) << "Only SAME and VALID padding supported on "
"Conv2DBackpropInput nodes.";
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertRangeOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Range");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 3));
auto* op = new RangeOperator;
if (HasAttr(node, "Tidx")) {
const auto dtype = toco::GetDataTypeAttr(node, "Tidx");
CHECK(dtype == DT_UINT8 || dtype == DT_INT32 || dtype == DT_INT64 ||
dtype == DT_FLOAT);
op->dtype = ConvertDataType(dtype);
}
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
op->inputs.push_back(node.input(2));
op->outputs.push_back(node.name());
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertPackOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Pack");
auto op = std::make_unique<PackOperator>();
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK_GE(num_inputs, 1)
<< node.op()
<< " node expects at least 1 input other than control dependencies: "
<< node.DebugString();
CHECK_EQ(num_inputs, GetIntAttr(node, "N"));
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->values_count = HasAttr(node, "N") ? GetIntAttr(node, "N") : num_inputs;
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : 0;
op->dtype = ConvertDataType(toco::GetDataTypeAttr(node, "T"));
op->outputs.push_back(node.name());
model->operators.emplace_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertUnpackOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Unpack");
auto op = std::make_unique<UnpackOperator>();
const int num_inputs = GetInputsCount(node, tf_import_flags);
QCHECK_EQ(num_inputs, 1);
op->inputs.push_back(node.input(0));
op->num = GetIntAttr(node, "num");
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : 0;
op->dtype = ConvertDataType(toco::GetDataTypeAttr(node, "T"));
op->outputs.push_back(node.name());
for (int i = 1; i < op->num; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i));
}
model->operators.emplace_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertOperatorSpecialCasedAsRNNBackEdge(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "NextIteration");
CHECK_EQ(node.input_size(), 1);
auto* rnn_state = model->flags.add_rnn_states();
rnn_state->set_discardable(true);
rnn_state->set_state_array(node.name());
rnn_state->set_back_edge_source_array(node.input(0));
rnn_state->set_size(1);
return absl::OkStatus();
}
tensorflow::Status ConvertShapeOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "Shape");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
const auto out_type =
HasAttr(node, "out_type") ? GetDataTypeAttr(node, "out_type") : DT_INT32;
CHECK(out_type == DT_INT64 || out_type == DT_INT32);
auto op = std::make_unique<TensorFlowShapeOperator>();
op->output_data_type = ConvertDataType(out_type);
op->inputs.push_back(node.input(0));
op->outputs.push_back(node.name());
model->operators.push_back(std::move(op));
return absl::OkStatus();
}
tensorflow::Status ConvertReverseSequenceOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "ReverseSequence");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto op = std::make_unique<ReverseSequenceOperator>();
if (HasAttr(node, "seq_dim")) {
op->seq_dim = GetIntAttr(node, "seq_dim");
}
op->batch_dim =
HasAttr(node, "batch_dim") ? GetIntAttr(node, "batch_dim") : 0;
const int num_inputs = GetInputsCount(node, tf_import_flags);
for (int i = 0; i < num_inputs; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
model->operators.push_back(std::move(op));
return absl::OkStatus();
}
void StripCaretFromArrayNames(Model* model) {
for (auto& op : model->operators) {
for (auto& input : op->inputs) {
input = std::string(absl::StripPrefix(input, "^"));
}
for (auto& output : op->outputs) {
output = std::string(absl::StripPrefix(output, "^"));
}
}
for (auto& array : model->GetArrayMap()) {
if (absl::StartsWith(array.first, "^")) {
LOG(FATAL) << "What?";
}
}
}
void StripZeroOutputIndexFromInputs(NodeDef* node) {
for (auto& input : *node->mutable_input()) {
input = std::string(absl::StripSuffix(input, ":0"));
}
}
void AddExtraOutputs(Model* model) {
std::vector<std::string> consumed_arrays;
for (const auto& consumer_op : model->operators) {
for (const std::string& input : consumer_op->inputs) {
consumed_arrays.push_back(input);
}
}
for (const std::string& output_array : model->flags.output_arrays()) {
consumed_arrays.push_back(output_array);
}
for (const auto& rnn_state : model->flags.rnn_states()) {
consumed_arrays.push_back(rnn_state.back_edge_source_array());
}
for (const std::string& consumed_array : consumed_arrays) {
if (GetOpWithOutput(*model, consumed_array)) {
continue;
}
const std::vector<std::string>& split = absl::StrSplit(consumed_array, ':');
if (split.size() != 2) {
continue;
}
int output_index = 0;
if (!absl::SimpleAtoi(split[1], &output_index)) {
continue;
}
auto* producer_op = GetOpWithOutput(*model, split[0]);
if (!producer_op) {
continue;
}
while (producer_op->outputs.size() <= output_index) {
using toco::port::StringF;
producer_op->outputs.push_back(
StringF("%s:%d", split[0], producer_op->outputs.size()));
}
}
}
bool InlineAllFunctions(GraphDef* graphdef) {
if (graphdef->library().function().empty()) {
VLOG(kLogLevelModelUnchanged) << "No functions to inline.";
return false;
}
GraphDef graphdef_copy(*graphdef);
for (auto& function :
(*graphdef_copy.mutable_library()->mutable_function())) {
auto* attributes = function.mutable_attr();
if (attributes->count(tensorflow::kNoInlineAttr) != 0) {
(*attributes)[tensorflow::kNoInlineAttr].set_b(false);
}
}
tensorflow::SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 1});
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_CHECK_OK(tensorflow::DeviceFactory::AddDevices(
options, "/job:localhost/replica:0/task:0", &devices));
tensorflow::FunctionLibraryDefinition fld(tensorflow::OpRegistry::Global(),
graphdef_copy.library());
tensorflow::StaticDeviceMgr device_mgr(std::move(devices));
tensorflow::ProcessFunctionLibraryRuntime pflr(
&device_mgr, tensorflow::Env::Default(), &options.config,
TF_GRAPH_DEF_VERSION, &fld,
options.config.graph_options().optimizer_options(), nullptr);
tensorflow::FunctionLibraryRuntime* flr;
flr = pflr.GetFLR("/job:localhost/replica:0/task:0/cpu:0");
tensorflow::Graph graph(fld);
tensorflow::ImportGraphDefOptions gc_opts;
gc_opts.validate_shape = false;
const auto& tf_convert_status = tensorflow::ImportGraphDef(
gc_opts, graphdef_copy, &graph, nullptr, nullptr);
if (!tf_convert_status.ok()) {
LOG(ERROR) << "tensorflow::ImportGraphDef failed with status: "
<< tf_convert_status.ToString();
return false;
}
bool graph_modified = false;
while (tensorflow::ExpandInlineFunctions(flr, &graph)) {
graph_modified = true;
}
if (graph_modified) {
LOG(INFO) << "Found and inlined TensorFlow functions.";
graph.ToGraphDef(graphdef);
}
return graph_modified;
}
tensorflow::Status ConvertTopKV2Operator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK((node.op() == "TopK") || (node.op() == "TopKV2"));
auto op = std::make_unique<TopKV2Operator>();
op->inputs.push_back(node.input(0));
if (HasAttr(node, "k")) {
std::string k_array = CreateConstArray<ArrayDataType::kInt32>(
model, node.name() + "k", {static_cast<int32>(GetIntAttr(node, "k"))});
op->inputs.push_back(k_array);
} else {
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
op->inputs.push_back(node.input(1));
}
op->outputs.push_back(node.name());
op->outputs.push_back(node.name() + ":1");
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertDynamicPartitionOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
auto op = std::make_unique<DynamicPartitionOperator>();
CHECK(HasAttr(node, "num_partitions"));
op->num_partitions = GetIntAttr(node, "num_partitions");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
op->inputs.push_back(node.input(0));
op->inputs.push_back(node.input(1));
CHECK_GT(op->num_partitions, 1);
op->outputs.push_back(node.name());
for (int i = 1; i < op->num_partitions; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i));
}
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertDynamicStitchOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK(node.op() == "DynamicStitch" || node.op() == "ParallelDynamicStitch");
auto op = std::make_unique<DynamicStitchOperator>();
CHECK(HasAttr(node, "N"));
op->num_partitions = GetIntAttr(node, "N");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, op->num_partitions * 2));
for (int i = 0; i < op->num_partitions * 2; ++i) {
op->inputs.push_back(node.input(i));
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertSparseToDenseOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "SparseToDense");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 4));
auto* op = new SparseToDenseOperator;
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name());
op->validate_indices = HasAttr(node, "validate_indices")
? GetBoolAttr(node, "validate_indices")
: true;
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertOneHotOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "OneHot");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 4));
const auto dtype = GetDataTypeAttr(node, "T");
CHECK(dtype == DT_INT32 || dtype == DT_INT64 || dtype == DT_FLOAT ||
dtype == DT_BOOL);
auto op = std::make_unique<OneHotOperator>();
op->axis = HasAttr(node, "axis") ? GetIntAttr(node, "axis") : -1;
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name());
model->operators.emplace_back(op.release());
return absl::OkStatus();
}
tensorflow::Status ConvertCTCBeamSearchDecoderOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "CTCBeamSearchDecoder");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 2));
auto* op = new CTCBeamSearchDecoderOperator;
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->beam_width =
HasAttr(node, "beam_width") ? GetIntAttr(node, "beam_width") : 1;
op->top_paths =
HasAttr(node, "top_paths") ? GetIntAttr(node, "top_paths") : 1;
op->merge_repeated = HasAttr(node, "merge_repeated")
? GetBoolAttr(node, "merge_repeated")
: true;
op->outputs.push_back(node.name());
for (int i = 0; i < op->top_paths; ++i) {
op->outputs.push_back(node.name() + ":" + std::to_string(i + 1));
}
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertUnidirectionalSequenceLstm(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
DCHECK_EQ(node.op(), "UnidirectionalSequenceLstm");
const auto& indices = GetListAttr(node, "_tflite_input_indices");
auto* op = new UnidirectionalSequenceLstmOperator();
const int kInputsSize = 20;
op->inputs.resize(kInputsSize);
if (indices.i_size() != node.input().size()) {
int count = 0;
for (int idx = 0; idx < kInputsSize; ++idx) {
if (count < indices.i_size() && indices.i(count) == idx) {
op->inputs[idx] = node.input(idx);
count++;
} else {
std::string optional_name = node.name() + "_" + std::to_string(idx);
model->CreateOptionalArray(optional_name);
op->inputs[idx] = optional_name;
}
}
} else {
std::vector<bool> done(kInputsSize);
int idx = 0;
for (const std::string& input : node.input()) {
int real_index = indices.i(idx);
op->inputs[real_index] = (input);
done[real_index] = true;
idx++;
}
for (int idx = 0; idx < done.size(); idx++) {
if (!done[idx]) {
std::string optional_name = node.name() + "_" + std::to_string(idx);
model->CreateOptionalArray(optional_name);
op->inputs[idx] = optional_name;
}
}
}
op->outputs.push_back(node.name() + ":2");
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertLeakyReluOperator(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
CHECK_EQ(node.op(), "LeakyRelu");
TF_QCHECK_OK(CheckInputsCount(node, tf_import_flags, 1));
CHECK_EQ(GetDataTypeAttr(node, "T"), DT_FLOAT);
const auto& input_name = node.input(0);
auto* op = new LeakyReluOperator;
op->inputs.push_back(input_name);
op->outputs.push_back(node.name());
op->alpha = GetFloatAttr(node, "alpha");
model->operators.emplace_back(op);
return absl::OkStatus();
}
tensorflow::Status ConvertUnidirectionalSequenceRnn(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model) {
DCHECK_EQ(node.op(), "UnidirectionalSequenceRnn");
const auto& indices = GetListAttr(node, "_tflite_input_indices");
if (indices.i_size() != node.input().size()) {
return tensorflow::errors::InvalidArgument("Input size does not match.");
}
auto* op = new UnidirectionalSequenceRnnOperator();
for (const std::string& input : node.input()) {
op->inputs.push_back(input);
}
op->outputs.push_back(node.name() + ":1");
model->operators.emplace_back(op);
return absl::OkStatus();
}
}
namespace internal {
using ConverterType = tensorflow::Status (*)(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model);
using ConverterMapType = std::unordered_map<std::string, ConverterType>;
ConverterMapType GetTensorFlowNodeConverterMapForFlex() {
return std::unordered_map<std::string, ConverterType>({
{"LegacyFedInput", ConvertPlaceholderOperator},
{"Placeholder", ConvertPlaceholderOperator},
{"Const", ConditionallyConvertConstOperator},
});
}
ConverterMapType GetTensorFlowNodeConverterMap() {
return std::unordered_map<std::string, ConverterType>({
{"Abs", ConvertSimpleOperator<AbsOperator, kAnyNumInputs, 1>},
{"Add", ConvertSimpleOperator<AddOperator, 2, 1>},
{"AddV2", ConvertSimpleOperator<AddOperator, 2, 1>},
{"AddN", ConvertSimpleOperator<AddNOperator, kAnyNumInputs, 1>},
{"All", ConvertSimpleOperator<TensorFlowAllOperator, kAnyNumInputs, 1>},
{"Any", ConvertReduceOperator<TensorFlowAnyOperator>},
{"ArgMax", ConvertArgMaxOperator},
{"ArgMin", ConvertArgMinOperator},
{"Assert",
ConvertSimpleOperator<TensorFlowAssertOperator, kAnyNumInputs, 1>},
{"AvgPool", ConvertAvgPoolOperator},
{"BatchMatMul", ConvertBatchMatMulOperator},
{"BatchMatMulV2", ConvertBatchMatMulOperator},
{"BatchNormWithGlobalNormalization",
ConvertBatchNormWithGlobalNormalizationOperator},
{"BatchToSpaceND", ConvertBatchToSpaceNDOperator},
{"BiasAdd", ConvertBiasAddOperator},
{"Cast", ConvertCastOperator},
{"Ceil", ConvertCeilOperator},
{"CheckNumerics", ConvertIdentityOperator},
{"Concat", ConvertConcatOperator},
{"ConcatV2", ConvertConcatOperator},
{"Const", ConvertConstOperator},
{"Conv2D", ConvertConvOperator},
{"Conv2DBackpropInput", ConvertTransposeConvOperator},
{"Cos", ConvertSimpleOperator<CosOperator, 1, 1>},
{"CTCBeamSearchDecoder", ConvertCTCBeamSearchDecoderOperator},
{"DepthToSpace", ConvertDepthToSpaceOperator},
{"DepthwiseConv2dNative", ConvertDepthwiseConvOperator},
{"Div", ConvertSimpleOperator<DivOperator, 2, 1>},
{"DynamicPartition", ConvertDynamicPartitionOperator},
{"DynamicStitch", ConvertDynamicStitchOperator},
{"Elu", ConvertSimpleOperator<EluOperator, 1, 1>},
{"EnsureShape", ConvertIdentityOperator},
{"Equal", ConvertSimpleOperator<TensorFlowEqualOperator, 2, 1>},
{"Exp", ConvertSimpleOperator<ExpOperator, 1, 1>},
{"ExpandDims", ConvertSimpleOperator<ExpandDimsOperator, 2, 1>},
{"FakeQuantWithMinMaxArgs", ConvertFakeQuantWithMinMaxArgs},
{"FakeQuantWithMinMaxVars", ConvertFakeQuantWithMinMaxVars},
{"Fill", ConvertSimpleOperator<FillOperator, 2, 1>},
{"Floor", ConvertFloorOperator},
{"FloorDiv", ConvertSimpleOperator<FloorDivOperator, 2, 1>},
{"FloorMod", ConvertSimpleOperator<FloorModOperator, 2, 1>},
{"FusedBatchNorm", ConvertFusedBatchNormOperator},
{"FusedBatchNormV3", ConvertFusedBatchNormOperator},
{"Gather", ConvertGatherOperator},
{"GatherV2", ConvertGatherOperator},
{"GatherNd", ConvertGatherNdOperator},
{"Greater", ConvertSimpleOperator<TensorFlowGreaterOperator, 2, 1>},
{"GreaterEqual",
ConvertSimpleOperator<TensorFlowGreaterEqualOperator, 2, 1>},
{"Identity", ConvertIdentityOperator},
{"IdentityN", ConvertIdentityNOperator},
{"LRN", ConvertLRNOperator},
{"LeakyRelu", ConvertLeakyReluOperator},
{"LegacyFedInput", ConvertPlaceholderOperator},
{"Less", ConvertSimpleOperator<TensorFlowLessOperator, 2, 1>},
{"LessEqual", ConvertSimpleOperator<TensorFlowLessEqualOperator, 2, 1>},
{"Log", ConvertSimpleOperator<LogOperator, 1, 1>},
{"LogicalAnd", ConvertSimpleOperator<LogicalAndOperator, 2, 1>},
{"LogicalOr", ConvertSimpleOperator<LogicalOrOperator, 2, 1>},
{"LogicalNot", ConvertSimpleOperator<LogicalNotOperator, 1, 1>},
{"LogSoftmax", ConvertSimpleOperator<LogSoftmaxOperator, 1, 1>},
{"MatMul", ConvertMatMulOperator},
{"MatrixDiag", ConvertSimpleOperator<MatrixDiagOperator, 1, 1>},
{"MatrixDiagV2", ConvertSimpleOperator<MatrixDiagV2Operator, 5, 1>},
{"MatrixDiagV3", ConvertSimpleOperator<MatrixDiagV3Operator, 5, 1>},
{"MatrixSetDiag", ConvertSimpleOperator<MatrixSetDiagOperator, 2, 1>},
{"MatrixSetDiagV2", ConvertSimpleOperator<MatrixSetDiagV2Operator, 3, 1>},
{"MatrixSetDiagV3", ConvertSimpleOperator<MatrixSetDiagV3Operator, 3, 1>},
{"Max", ConvertReduceOperator<TensorFlowMaxOperator>},
{"MaxPool", ConvertMaxPoolOperator},
{"Maximum", ConvertSimpleOperator<TensorFlowMaximumOperator, 2, 1>},
{"Mean", ConvertReduceOperator<MeanOperator>},
{"Merge",
ConvertSimpleOperator<TensorFlowMergeOperator, kAnyNumInputs, 1>},
{"Min", ConvertReduceOperator<TensorFlowMinOperator>},
{"Minimum", ConvertSimpleOperator<TensorFlowMinimumOperator, 2, 1>},
{"Mul", ConvertSimpleOperator<MulOperator, 2, 1>},
{"Neg", ConvertSimpleOperator<NegOperator, 1, 1>},
{"NextIteration", ConvertOperatorSpecialCasedAsRNNBackEdge},
{"NoOp", ConvertNoOpOperator},
{"NotEqual", ConvertSimpleOperator<TensorFlowNotEqualOperator, 2, 1>},
{"OneHot", ConvertOneHotOperator},
{"Pack", ConvertPackOperator},
{"Pad", ConvertSimpleOperator<PadOperator, 2, 1>},
{"PadV2", ConvertSimpleOperator<PadV2Operator, 3, 1>},
{"ParallelDynamicStitch", ConvertDynamicStitchOperator},
{"Placeholder", ConvertPlaceholderOperator},
{"PlaceholderWithDefault", ConvertIdentityOperator},
{"Pow", ConvertSimpleOperator<PowOperator, 2, 1>},
{"Prod", ConvertReduceOperator<TensorFlowProdOperator>},
{"RandomUniform", ConvertRandomUniform},
{"Range", ConvertRangeOperator},
{"Rank", ConvertSimpleOperator<TensorFlowRankOperator, 1, 1>},
{"RealDiv", ConvertSimpleOperator<DivOperator, 2, 1>},
{"Relu", ConvertSimpleOperator<ReluOperator, 1, 1>},
{"Relu6", ConvertSimpleOperator<Relu6Operator, 1, 1>},
{"Reshape", ConvertSimpleOperator<TensorFlowReshapeOperator, 2, 1>},
{"ResizeBilinear", ConvertResizeBilinearOperator},
{"ResizeNearestNeighbor", ConvertResizeNearestNeighborOperator},
{"ReverseSequence", ConvertReverseSequenceOperator},
{"ReverseV2", ConvertSimpleOperator<ReverseV2Operator, 2, 1>},
{"Round", ConvertRoundOperator},
{"Rsqrt", ConvertSimpleOperator<TensorFlowRsqrtOperator, 1, 1>},
{"ScatterNd", ConvertSimpleOperator<ScatterNdOperator, 3, 1>},
{"SegmentSum", ConvertSimpleOperator<SegmentSumOperator, 2, 1>},
{"Select", ConvertSimpleOperator<SelectOperator, 3, 1>},
{"SelectV2", ConvertSimpleOperator<SelectOperator, 3, 1>},
{"Shape", ConvertShapeOperator},
{"Sigmoid", ConvertSimpleOperator<LogisticOperator, 1, 1>},
{"Sin", ConvertSimpleOperator<SinOperator, 1, 1>},
{"Slice", ConvertSimpleOperator<SliceOperator, 3, 1>},
{"Softmax", ConvertSoftmaxOperator},
{"SpaceToBatchND", ConvertSpaceToBatchNDOperator},
{"SpaceToDepth", ConvertSpaceToDepthOperator},
{"SparseToDense", ConvertSparseToDenseOperator},
{"Split", ConvertSplitOperator},
{"SplitV", ConvertSplitVOperator},
{"Sqrt", ConvertSimpleOperator<TensorFlowSqrtOperator, 1, 1>},
{"Square", ConvertSimpleOperator<TensorFlowSquareOperator, 1, 1>},
{"SquaredDifference",
ConvertSimpleOperator<SquaredDifferenceOperator, 2, 1>},
{"Snapshot", ConvertIdentityOperator},
{"Squeeze", ConvertSqueezeOperator},
{"StopGradient", ConvertIdentityOperator},
{"StridedSlice", ConvertStridedSliceOperator},
{"Sub", ConvertSimpleOperator<SubOperator, 2, 1>},
{"Sum", ConvertReduceOperator<TensorFlowSumOperator>},
{"Svdf", ConvertSvdfOperator},
{"Switch", ConvertSwitchOperator},
{"Tanh", ConvertSimpleOperator<TanhOperator, 1, 1>},
{"Tile", ConvertSimpleOperator<TensorFlowTileOperator, 2, 1>},
{"TopK", ConvertTopKV2Operator},
{"TopKV2", ConvertTopKV2Operator},
{"Transpose", ConvertSimpleOperator<TransposeOperator, 2, 1>},
{"Unpack", ConvertUnpackOperator},
{"ZerosLike", ConvertSimpleOperator<TensorFlowZerosLikeOperator, 1, 1>},
{"UnidirectionalSequenceLstm", ConvertUnidirectionalSequenceLstm},
{"UnidirectionalSequenceRnn", ConvertUnidirectionalSequenceRnn},
{"MirrorPad", ConvertMirrorPadOperator},
{"Unique", ConvertSimpleOperator<UniqueOperator, 1, 2>},
{"Where", ConvertSimpleOperator<WhereOperator, 1, 1>},
});
}
tensorflow::Status ImportTensorFlowNode(
const tensorflow::NodeDef& node,
const TensorFlowImportFlags& tf_import_flags, const ModelFlags& model_flags,
Model* model, const ConverterMapType& converter_map) {
auto converter = converter_map.find(node.op());
if (converter == converter_map.end()) {
return ConvertUnsupportedOperator(node, tf_import_flags, model_flags,
model);
} else {
return converter->second(node, tf_import_flags, model_flags, model);
}
}
}
std::unique_ptr<Model> ImportTensorFlowGraphDef(
const ModelFlags& model_flags, const TensorFlowImportFlags& tf_import_flags,
const GraphDef& tf_graph) {
LogDumpGraphDef(kLogLevelModelChanged, "AT IMPORT", tf_graph);
GraphDef inlined_graph(tf_graph);
if (InlineAllFunctions(&inlined_graph)) {
LogDumpGraphDef(kLogLevelModelChanged, "AFTER INLINING", inlined_graph);
}
for (const auto& specified_input_array : model_flags.input_arrays()) {
CHECK(!absl::EndsWith(specified_input_array.name(), ":0"))
<< "Unsupported explicit zero output index: "
<< specified_input_array.name();
}
for (const std::string& specified_output_array :
model_flags.output_arrays()) {
CHECK(!absl::EndsWith(specified_output_array, ":0"))
<< "Unsupported explicit zero output index: " << specified_output_array;
}
Model* model = new Model;
internal::ConverterMapType converter_map;
if (!tf_import_flags.import_all_ops_as_unsupported) {
converter_map = internal::GetTensorFlowNodeConverterMap();
} else {
converter_map = internal::GetTensorFlowNodeConverterMapForFlex();
}
for (auto node : inlined_graph.node()) {
StripZeroOutputIndexFromInputs(&node);
auto status = internal::ImportTensorFlowNode(
node, tf_import_flags, model_flags, model, converter_map);
CHECK(status.ok()) << status.message();
}
ResolveModelFlags(model_flags, model);
StripCaretFromArrayNames(model);
AddExtraOutputs(model);
FixNoMissingArray(model);
FixNoOrphanedArray(model);
FixOperatorOrdering(model);
CheckInvariants(*model);
for (const auto& rnn_state : model->flags.rnn_states()) {
model->GetArray(rnn_state.state_array()).buffer = nullptr;
}
return std::unique_ptr<Model>(model);
}
std::unique_ptr<Model> ImportTensorFlowGraphDef(
const ModelFlags& model_flags, const TensorFlowImportFlags& tf_import_flags,
const std::string& input_file_contents) {
std::unique_ptr<GraphDef> tf_graph(new GraphDef);
CHECK(ParseFromStringEitherTextOrBinary(input_file_contents, tf_graph.get()));
std::unique_ptr<GraphDef> pruned_graph =
MaybeReplaceCompositeSubgraph(*tf_graph);
if (pruned_graph) {
tf_graph = std::move(pruned_graph);
}
return ImportTensorFlowGraphDef(model_flags, tf_import_flags, *tf_graph);
}
} | #include "tensorflow/lite/toco/import_tensorflow.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
using tensorflow::AttrValue;
using tensorflow::DT_BOOL;
using tensorflow::DT_COMPLEX64;
using tensorflow::DT_FLOAT;
using tensorflow::DT_INT32;
using tensorflow::DT_INT64;
using tensorflow::DT_INVALID;
using tensorflow::DT_QUINT8;
using tensorflow::DT_STRING;
using tensorflow::DT_UINT16;
using tensorflow::DT_UINT32;
using tensorflow::NodeDef;
using tensorflow::Status;
using ::testing::ElementsAre;
namespace internal {
using ConverterType = tensorflow::Status (*)(
const NodeDef& node, const TensorFlowImportFlags& tf_import_flags,
const ModelFlags& model_flags, Model* model);
using ConverterMapType = std::unordered_map<std::string, ConverterType>;
ConverterMapType GetTensorFlowNodeConverterMap();
ConverterMapType GetTensorFlowNodeConverterMapForFlex();
Status ImportTensorFlowNode(const NodeDef&, const TensorFlowImportFlags&,
const ModelFlags& model_flags, Model*,
const ConverterMapType&);
}
namespace {
Status ImportNode(const NodeDef& node, Model* model) {
const auto converter = internal::GetTensorFlowNodeConverterMap();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), model, converter);
}
Status ImportFlexNode(const NodeDef& node, Model* model) {
const auto converter = internal::ConverterMapType();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), model, converter);
}
Status ImportNode(const NodeDef& node) {
Model model;
return ImportNode(node, &model);
}
NodeDef BuildNode(
const std::string& op,
const std::vector<std::initializer_list<int>>& output_shapes) {
NodeDef node;
node.set_op(op);
node.set_name("Node1");
node.add_input();
node.set_input(0, "Node0");
AttrValue::ListValue* shapes =
(*node.mutable_attr())["_output_shapes"].mutable_list();
for (const auto& output_shape : output_shapes) {
tensorflow::TensorShapeProto* shape = shapes->add_shape();
for (int64_t output_shape_dim : output_shape) {
auto shape_dim = shape->add_dim();
shape_dim->set_size(output_shape_dim);
}
}
return node;
}
namespace {
void BuildConstNode(std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements,
NodeDef* node) {
node->set_op("Const");
node->set_name("Node1");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["dtype"] = dtype_attr;
tensorflow::TensorProto t;
t.set_dtype(dtype);
auto* s = t.mutable_tensor_shape();
for (auto d : shape) {
s->add_dim()->set_size(d);
}
switch (dtype) {
case DT_FLOAT:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_float_val(i / 10000.0 + 1);
}
break;
case DT_INT32:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<int>::max() + 1);
}
break;
case DT_UINT32:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint32_t>::max() + 1);
}
break;
case DT_QUINT8:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint8_t>::max() + 1);
}
break;
case DT_INT64:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int64_val(i + 1);
}
break;
case DT_UINT16:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_int_val(i % std::numeric_limits<uint16_t>::max() + 1);
}
break;
case DT_STRING:
break;
case DT_BOOL:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_bool_val((i % 2) == 0);
}
break;
case DT_COMPLEX64:
for (int64_t i = 0; i < num_elements; ++i) {
t.add_scomplex_val(i / 10000.0 + 1);
t.add_scomplex_val(-i / 10000.0 - 1);
}
break;
default:
break;
}
AttrValue value_attr;
SetAttrValue(t, &value_attr);
(*node->mutable_attr())["value"] = value_attr;
}
}
TEST(FlexImportTest, ConditionalConst) {
Model model;
auto build_and_import_node =
[&model](const std::string& name, std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements) {
NodeDef node;
BuildConstNode(shape, dtype, num_elements, &node);
node.set_name(name);
const auto converter = internal::GetTensorFlowNodeConverterMapForFlex();
return internal::ImportTensorFlowNode(node, TensorFlowImportFlags(),
ModelFlags(), &model, converter);
};
EXPECT_TRUE(build_and_import_node("Known", {1, 2, 3}, DT_INT32, 6).ok());
EXPECT_TRUE(build_and_import_node("BadType", {1, 2, 3}, DT_INVALID, 6).ok());
EXPECT_TRUE(build_and_import_node("Unknown", {1, -2, 3}, DT_INT32, 6).ok());
EXPECT_EQ(model.operators.size(), 2);
EXPECT_TRUE(model.HasArray("Known"));
EXPECT_FALSE(model.HasArray("Unknown"));
EXPECT_FALSE(model.HasArray("BadType"));
}
TEST(FlexImportTest, SoftmaxWithBeta) {
NodeDef node;
node.set_op("Softmax");
node.set_name("softmax");
node.add_input();
node.set_input(0, "logits");
AttrValue dtype_attr;
SetAttrValue(0.5, &dtype_attr);
(*node.mutable_attr())["_softmax_beta"] = dtype_attr;
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax);
const SoftmaxOperator* op =
static_cast<const SoftmaxOperator*>(model.operators[0].get());
EXPECT_EQ(op->beta, 0.5);
}
TEST(FlexImportTest, SoftmaxWithoutBeta) {
NodeDef node;
node.set_op("Softmax");
node.set_name("softmax");
node.add_input();
node.set_input(0, "logits");
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kSoftmax);
const SoftmaxOperator* op =
static_cast<const SoftmaxOperator*>(model.operators[0].get());
EXPECT_EQ(op->beta, 1.0);
}
class ShapeImportTest : public ::testing::TestWithParam<tensorflow::DataType> {
};
TEST_P(ShapeImportTest, ShapeElementIsNegative) {
NodeDef node;
BuildConstNode({1, -2, 10}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(
status.message(),
"Tensor shape should not include negative values\n\t (while processing "
"node 'Node1')");
}
TEST_P(ShapeImportTest, ShapeElementIsZero) {
NodeDef node;
BuildConstNode({1, 0, 10}, GetParam(), 0, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
const auto& array = model.GetArray("Node1");
EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre());
}
TEST_P(ShapeImportTest, ShapeIsOneDimZero) {
NodeDef node;
BuildConstNode({0}, GetParam(), 0, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
const auto& array = model.GetArray("Node1");
EXPECT_THAT(array.shape().dims(), ::testing::ElementsAre());
}
TEST_P(ShapeImportTest, ShapeElementTooLarge) {
NodeDef node;
BuildConstNode({3000000000}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(status.message(),
"Shape element overflows\n\t (while processing node 'Node1')");
}
TEST_P(ShapeImportTest, ShapeTooLarge) {
NodeDef node;
BuildConstNode({1000000, 2000000, 2000000, 2000000}, GetParam(), 0, &node);
auto status = ImportNode(node);
EXPECT_EQ(status.message(),
"Tensor shape is too large\n\t (while processing node 'Node1')");
}
std::vector<tensorflow::DataType> TestTypes() {
return {DT_FLOAT, DT_INT32, DT_INT64, DT_BOOL, DT_QUINT8, DT_COMPLEX64};
}
INSTANTIATE_TEST_SUITE_P(ShapeImportTest, ShapeImportTest,
::testing::ValuesIn(TestTypes()));
class ContentImportTest : public ::testing::Test {
public:
template <ArrayDataType T>
std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) {
Model model;
auto status = ImportNode(node, &model);
CHECK(status.ok()) << status.message();
const auto& array = model.GetArray("Node1");
return array.GetBuffer<T>().data;
}
void RemoveTrailingElements(NodeDef* node, int num) {
tensorflow::TensorProto* p =
node->mutable_attr()->at("value").mutable_tensor();
for (int i = 0; i < num; ++i) {
if (p->int_val_size() > 0) p->mutable_int_val()->RemoveLast();
if (p->int64_val_size() > 0) p->mutable_int64_val()->RemoveLast();
if (p->float_val_size() > 0) p->mutable_float_val()->RemoveLast();
if (p->bool_val_size() > 0) p->mutable_bool_val()->RemoveLast();
if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast();
if (p->scomplex_val_size() > 0) p->mutable_scomplex_val()->RemoveLast();
}
}
};
TEST_F(ContentImportTest, Int32) {
constexpr ArrayDataType kType = ArrayDataType::kInt32;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_INT32, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Int64) {
constexpr ArrayDataType kType = ArrayDataType::kInt64;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_INT64, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Quint8) {
constexpr ArrayDataType kType = ArrayDataType::kUint8;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_QUINT8, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 5));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Bool) {
constexpr ArrayDataType kType = ArrayDataType::kBool;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_BOOL, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 1));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 1, 1, 1, 1, 1));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(0, 0, 0, 0, 0, 0));
}
TEST_F(ContentImportTest, Float) {
constexpr ArrayDataType kType = ArrayDataType::kFloat;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_FLOAT, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0004));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000));
}
TEST_F(ContentImportTest, Complex64) {
constexpr ArrayDataType kType = ArrayDataType::kComplex64;
NodeDef node;
BuildConstNode({1, 2, 3}, DT_COMPLEX64, 6, &node);
using cplx = std::complex<float>;
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001),
cplx(1.0002, -1.0002), cplx(1.0003, -1.0003),
cplx(1.0004, -1.0004), cplx(1.0005, -1.0005)));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0001, -1.0001),
cplx(1.0002, -1.0002), cplx(1.0003, -1.0003),
cplx(1.0004, -1.0004), cplx(1.0004, -1.0004)));
RemoveTrailingElements(&node, 4);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(1.0000, -1.0000), cplx(1.0000, -1.0000),
cplx(1.0000, -1.0000), cplx(1.0000, -1.0000),
cplx(1.0000, -1.0000), cplx(1.0000, -1.0000)));
RemoveTrailingElements(&node, 1);
EXPECT_THAT(
ImportAndGetData<kType>(node),
ElementsAre(std::complex<float>(0.0000, 0.0000), cplx(0.0000, 0.0000),
cplx(0.0000, 0.0000), cplx(0.0000, 0.0000),
cplx(0.0000, 0.0000), cplx(0.0000, 0.0000)));
}
std::vector<std::pair<tensorflow::DataType, ArrayDataType>> UnaryTestTypes() {
return {{DT_FLOAT, ArrayDataType::kFloat},
{DT_INT32, ArrayDataType::kInt32},
{DT_INT64, ArrayDataType::kInt64}};
}
class TensorContentTest : public ::testing::Test {
public:
template <ArrayDataType T>
std::vector<DataType<T>> ImportAndGetData(const NodeDef& node) {
Model model;
auto status = ImportNode(node, &model);
CHECK(status.ok()) << status.message();
const auto& nodearray = model.GetArray("Node1");
return nodearray.GetBuffer<T>().data;
}
template <class T>
void NodeWithTensorContent(std::initializer_list<int64_t> shape,
tensorflow::DataType dtype, int64_t num_elements,
NodeDef* node) {
node->set_op("Const");
node->set_name("Node1");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["dtype"] = dtype_attr;
auto allocated_content = std::make_unique<T[]>(num_elements);
tensorflow::TensorProto t;
t.set_dtype(dtype);
auto* s = t.mutable_tensor_shape();
for (const auto& d : shape) {
s->add_dim()->set_size(d);
}
switch (dtype) {
case DT_FLOAT:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i / 10000.0 + 1;
}
break;
case DT_INT32:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i % std::numeric_limits<int>::max() + 1;
}
break;
case DT_QUINT8:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i % std::numeric_limits<uint8_t>::max() + 1;
}
break;
case DT_INT64:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = i + 1;
}
break;
case DT_STRING:
break;
case DT_BOOL:
for (int64_t i = 0; i < num_elements; ++i) {
allocated_content[i] = ((i % 2) == 0);
}
break;
default:
break;
}
t.set_tensor_content(
std::string(reinterpret_cast<const char*>(allocated_content.get()),
num_elements * sizeof(T)));
AttrValue value_attr;
SetAttrValue(t, &value_attr);
(*node->mutable_attr())["value"] = value_attr;
allocated_content.reset();
}
};
TEST_F(TensorContentTest, Int64) {
constexpr ArrayDataType kType = ArrayDataType::kInt64;
NodeDef node;
NodeWithTensorContent<int64_t>({1, 2, 3}, DT_INT64, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Int32) {
constexpr ArrayDataType kType = ArrayDataType::kInt32;
NodeDef node;
NodeWithTensorContent<int>({1, 2, 3}, DT_INT32, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Float) {
constexpr ArrayDataType kType = ArrayDataType::kFloat;
NodeDef node;
NodeWithTensorContent<float>({1, 2, 3}, DT_FLOAT, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node),
ElementsAre(1.0000, 1.0001, 1.0002, 1.0003, 1.0004, 1.0005));
}
TEST_F(TensorContentTest, Quint8) {
constexpr ArrayDataType kType = ArrayDataType::kUint8;
NodeDef node;
NodeWithTensorContent<uint8_t>({1, 2, 3}, DT_QUINT8, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 2, 3, 4, 5, 6));
}
TEST_F(TensorContentTest, Bool) {
constexpr ArrayDataType kType = ArrayDataType::kBool;
NodeDef node;
NodeWithTensorContent<bool>({1, 2, 3}, DT_BOOL, 6, &node);
EXPECT_THAT(ImportAndGetData<kType>(node), ElementsAre(1, 0, 1, 0, 1, 0));
}
class TypeImportTest : public ::testing::TestWithParam<
std::pair<tensorflow::DataType, ArrayDataType>> {
protected:
TypeImportTest() {}
void BuildUnaryNode(const std::string& op_name, tensorflow::DataType dtype,
NodeDef* node) {
node->set_op(op_name);
node->set_name("Node1");
node->add_input();
node->set_input(0, "Node0");
AttrValue dtype_attr;
SetAttrValue(dtype, &dtype_attr);
(*node->mutable_attr())["T"] = dtype_attr;
}
};
TEST_P(TypeImportTest, BasicTypeInference) {
NodeDef node;
BuildUnaryNode("Atan", GetParam().first, &node);
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_THAT(op->output_data_types, ::testing::ElementsAre(GetParam().second));
}
INSTANTIATE_TEST_SUITE_P(BasicTypeInference, TypeImportTest,
::testing::ValuesIn(UnaryTestTypes()));
TEST(ImportTest, TypeInferenceWithFixedOutputType) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("IsFinite", {{1, 2}, {2, 3}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_THAT(op->output_data_types,
::testing::ElementsAre(ArrayDataType::kBool));
}
TEST(ImportTest, FailedTypeInference) {
NodeDef node;
node.set_op("Atan");
node.set_name("Node1");
node.add_input();
node.set_input(0, "Node0");
Model model;
EXPECT_TRUE(ImportNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_TRUE(op->output_data_types.empty());
}
TEST(ImportTest, UnsupportedOpWithOutputShapes) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("Atan", {{1, 2}, {2, 3}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_EQ(op->output_shapes.size(), 2);
ASSERT_THAT(op->output_shapes[0].dims(), ::testing::ElementsAre(1, 2));
ASSERT_THAT(op->output_shapes[1].dims(), ::testing::ElementsAre(2, 3));
}
TEST(ImportTest, UnsupportedOpWithWildcardOutputShapes) {
Model model;
EXPECT_TRUE(ImportNode(BuildNode("Atan", {{-1, 2}}), &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_TRUE(op->output_shapes.empty());
}
TEST(ImportTest, UnsupportedOpWithMultipleOutputs) {
NodeDef node = BuildNode("ParseExample", {});
{
AttrValue value_attr;
SetAttrValue(2, &value_attr);
(*node.mutable_attr())["Nsparse"] = value_attr;
}
{
AttrValue value_attr;
std::vector<tensorflow::DataType> types;
types.push_back(tensorflow::DT_FLOAT);
types.push_back(tensorflow::DT_STRING);
SetAttrValue(types, &value_attr);
(*node.mutable_attr())["sparse_types"] = value_attr;
}
{
AttrValue value_attr;
std::vector<tensorflow::DataType> types;
types.push_back(tensorflow::DT_STRING);
types.push_back(tensorflow::DT_FLOAT);
types.push_back(tensorflow::DT_INT64);
SetAttrValue(types, &value_attr);
(*node.mutable_attr())["Tdense"] = value_attr;
}
Model model;
EXPECT_TRUE(ImportFlexNode(node, &model).ok());
ASSERT_THAT(model.operators.size(), ::testing::Ge(1));
ASSERT_EQ(model.operators[0]->type, OperatorType::kUnsupported);
const TensorFlowUnsupportedOperator* op =
static_cast<const TensorFlowUnsupportedOperator*>(
model.operators[0].get());
ASSERT_EQ(op->outputs.size(), 9);
ASSERT_EQ(op->output_data_types.size(), 9);
ASSERT_EQ(op->outputs[0], "Node1");
ASSERT_EQ(op->outputs[1], "Node1:1");
ASSERT_EQ(op->output_data_types[0], ArrayDataType::kInt64);
ASSERT_EQ(op->output_data_types[1], ArrayDataType::kInt64);
ASSERT_EQ(op->outputs[2], "Node1:2");
ASSERT_EQ(op->outputs[3], "Node1:3");
ASSERT_EQ(op->output_data_types[2], ArrayDataType::kFloat);
ASSERT_EQ(op->output_data_types[3], ArrayDataType::kString);
ASSERT_EQ(op->outputs[4], "Node1:4");
ASSERT_EQ(op->outputs[5], "Node1:5");
ASSERT_EQ(op->output_data_types[4], ArrayDataType::kInt64);
ASSERT_EQ(op->output_data_types[5], ArrayDataType::kInt64);
ASSERT_EQ(op->outputs[6], "Node1:6");
ASSERT_EQ(op->outputs[7], "Node1:7");
ASSERT_EQ(op->outputs[8], "Node1:8");
ASSERT_EQ(op->output_data_types[6], ArrayDataType::kString);
ASSERT_EQ(op->output_data_types[7], ArrayDataType::kFloat);
ASSERT_EQ(op->output_data_types[8], ArrayDataType::kInt64);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/import_tensorflow.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/import_tensorflow_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5c2ea959-8528-49ce-afb1-9865a7548db7 | cpp | google/quiche | quic_interval_set | quiche/quic/core/quic_interval_set.h | quiche/quic/core/quic_interval_set_test.cc | #ifndef QUICHE_QUIC_CORE_QUIC_INTERVAL_SET_H_
#define QUICHE_QUIC_CORE_QUIC_INTERVAL_SET_H_
#include <stddef.h>
#include <algorithm>
#include <initializer_list>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "quiche/quic/core/quic_interval.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/common/platform/api/quiche_containers.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quic {
template <typename T>
class QUICHE_NO_EXPORT QuicIntervalSet {
public:
using value_type = QuicInterval<T>;
private:
struct QUICHE_NO_EXPORT IntervalLess {
using is_transparent = void;
bool operator()(const value_type& a, const value_type& b) const;
bool operator()(const value_type& a, const T& point) const;
bool operator()(const value_type& a, T&& point) const;
bool operator()(const T& point, const value_type& a) const;
bool operator()(T&& point, const value_type& a) const;
};
using Set = quiche::QuicheSmallOrderedSet<value_type, IntervalLess>;
public:
using const_iterator = typename Set::const_iterator;
using const_reverse_iterator = typename Set::const_reverse_iterator;
QuicIntervalSet() = default;
explicit QuicIntervalSet(const value_type& interval) { Add(interval); }
QuicIntervalSet(const T& min, const T& max) { Add(min, max); }
QuicIntervalSet(std::initializer_list<value_type> il) { assign(il); }
void Clear() { intervals_.clear(); }
size_t Size() const { return intervals_.size(); }
value_type SpanningInterval() const;
void Add(const value_type& interval);
void Add(const T& min, const T& max) { Add(value_type(min, max)); }
void AddOptimizedForAppend(const value_type& interval) {
if (Empty() || !GetQuicFlag(quic_interval_set_enable_add_optimization)) {
Add(interval);
return;
}
const_reverse_iterator last_interval = intervals_.rbegin();
if (interval.min() < last_interval->min() ||
interval.min() > last_interval->max()) {
Add(interval);
return;
}
if (interval.max() <= last_interval->max()) {
return;
}
const_cast<value_type*>(&(*last_interval))->SetMax(interval.max());
}
void AddOptimizedForAppend(const T& min, const T& max) {
AddOptimizedForAppend(value_type(min, max));
}
void PopFront() {
QUICHE_DCHECK(!Empty());
intervals_.erase(intervals_.begin());
}
bool TrimLessThan(const T& value) {
size_t num_intervals_trimmed = 0;
while (!intervals_.empty()) {
const_iterator first_interval = intervals_.begin();
if (first_interval->min() >= value) {
break;
}
++num_intervals_trimmed;
if (first_interval->max() <= value) {
intervals_.erase(first_interval);
continue;
}
const_cast<value_type*>(&(*first_interval))->SetMin(value);
break;
}
return num_intervals_trimmed != 0;
}
bool Empty() const { return intervals_.empty(); }
bool Contains(const T& value) const;
bool Contains(const value_type& interval) const;
bool Contains(const QuicIntervalSet<T>& other) const;
bool Contains(const T& min, const T& max) const {
return Contains(value_type(min, max));
}
bool Intersects(const QuicIntervalSet& other) const;
const_iterator Find(const T& value) const;
const_iterator Find(const value_type& interval) const;
const_iterator Find(const T& min, const T& max) const {
return Find(value_type(min, max));
}
const_iterator LowerBound(const T& value) const;
const_iterator UpperBound(const T& value) const;
bool IsDisjoint(const value_type& interval) const;
void Union(const QuicIntervalSet& other);
void Intersection(const QuicIntervalSet& other);
void Difference(const value_type& interval);
void Difference(const T& min, const T& max);
void Difference(const QuicIntervalSet& other);
void Complement(const T& min, const T& max);
const_iterator begin() const { return intervals_.begin(); }
const_iterator end() const { return intervals_.end(); }
const_reverse_iterator rbegin() const { return intervals_.rbegin(); }
const_reverse_iterator rend() const { return intervals_.rend(); }
template <typename Iter>
void assign(Iter first, Iter last) {
Clear();
for (; first != last; ++first) Add(*first);
}
void assign(std::initializer_list<value_type> il) {
assign(il.begin(), il.end());
}
std::string ToString() const;
QuicIntervalSet& operator=(std::initializer_list<value_type> il) {
assign(il.begin(), il.end());
return *this;
}
friend bool operator==(const QuicIntervalSet& a, const QuicIntervalSet& b) {
return a.Size() == b.Size() &&
std::equal(a.begin(), a.end(), b.begin(), NonemptyIntervalEq());
}
friend bool operator!=(const QuicIntervalSet& a, const QuicIntervalSet& b) {
return !(a == b);
}
private:
struct QUICHE_NO_EXPORT NonemptyIntervalEq {
bool operator()(const value_type& a, const value_type& b) const {
return a.min() == b.min() && a.max() == b.max();
}
};
bool Valid() const;
const_iterator FindIntersectionCandidate(const QuicIntervalSet& other) const;
const_iterator FindIntersectionCandidate(const value_type& interval) const;
template <typename X, typename Func>
static bool FindNextIntersectingPairImpl(X* x, const QuicIntervalSet& y,
const_iterator* mine,
const_iterator* theirs,
Func on_hole);
bool FindNextIntersectingPair(const QuicIntervalSet& other,
const_iterator* mine,
const_iterator* theirs) const {
return FindNextIntersectingPairImpl(
this, other, mine, theirs,
[](const QuicIntervalSet*, const_iterator, const_iterator end) {
return end;
});
}
bool FindNextIntersectingPairAndEraseHoles(const QuicIntervalSet& other,
const_iterator* mine,
const_iterator* theirs) {
return FindNextIntersectingPairImpl(
this, other, mine, theirs,
[](QuicIntervalSet* x, const_iterator from, const_iterator to) {
return x->intervals_.erase(from, to);
});
}
Set intervals_;
};
template <typename T>
auto operator<<(std::ostream& out, const QuicIntervalSet<T>& seq)
-> decltype(out << *seq.begin()) {
out << "{";
for (const auto& interval : seq) {
out << " " << interval;
}
out << " }";
return out;
}
template <typename T>
typename QuicIntervalSet<T>::value_type QuicIntervalSet<T>::SpanningInterval()
const {
value_type result;
if (!intervals_.empty()) {
result.SetMin(intervals_.begin()->min());
result.SetMax(intervals_.rbegin()->max());
}
return result;
}
template <typename T>
void QuicIntervalSet<T>::Add(const value_type& interval) {
if (interval.Empty()) return;
const_iterator it = intervals_.lower_bound(interval.min());
value_type the_union = interval;
if (it != intervals_.begin()) {
--it;
if (it->Separated(the_union)) {
++it;
}
}
const_iterator start = it;
while (it != intervals_.end() && !it->Separated(the_union)) {
the_union.SpanningUnion(*it);
++it;
}
intervals_.erase(start, it);
intervals_.insert(the_union);
}
template <typename T>
bool QuicIntervalSet<T>::Contains(const T& value) const {
const_iterator it = intervals_.upper_bound(value);
if (it == intervals_.begin()) return false;
--it;
return it->Contains(value);
}
template <typename T>
bool QuicIntervalSet<T>::Contains(const value_type& interval) const {
const_iterator it = intervals_.upper_bound(interval.min());
if (it == intervals_.begin()) return false;
--it;
return it->Contains(interval);
}
template <typename T>
bool QuicIntervalSet<T>::Contains(const QuicIntervalSet<T>& other) const {
if (!SpanningInterval().Contains(other.SpanningInterval())) {
return false;
}
for (const_iterator i = other.begin(); i != other.end(); ++i) {
if (!Contains(*i)) {
return false;
}
}
return true;
}
template <typename T>
typename QuicIntervalSet<T>::const_iterator QuicIntervalSet<T>::Find(
const T& value) const {
const_iterator it = intervals_.upper_bound(value);
if (it == intervals_.begin()) return intervals_.end();
--it;
if (it->Contains(value))
return it;
else
return intervals_.end();
}
template <typename T>
typename QuicIntervalSet<T>::const_iterator QuicIntervalSet<T>::Find(
const value_type& probe) const {
const_iterator it = intervals_.upper_bound(probe.min());
if (it == intervals_.begin()) return intervals_.end();
--it;
if (it->Contains(probe))
return it;
else
return intervals_.end();
}
template <typename T>
typename QuicIntervalSet<T>::const_iterator QuicIntervalSet<T>::LowerBound(
const T& value) const {
const_iterator it = intervals_.lower_bound(value);
if (it == intervals_.begin()) {
return it;
}
--it;
if (it->Contains(value)) {
return it;
} else {
return ++it;
}
}
template <typename T>
typename QuicIntervalSet<T>::const_iterator QuicIntervalSet<T>::UpperBound(
const T& value) const {
return intervals_.upper_bound(value);
}
template <typename T>
bool QuicIntervalSet<T>::IsDisjoint(const value_type& interval) const {
if (interval.Empty()) return true;
const_iterator it = intervals_.upper_bound(interval.min());
if (it != intervals_.end() && interval.max() > it->min()) return false;
if (it == intervals_.begin()) return true;
--it;
return it->max() <= interval.min();
}
template <typename T>
void QuicIntervalSet<T>::Union(const QuicIntervalSet& other) {
for (const value_type& interval : other.intervals_) {
Add(interval);
}
}
template <typename T>
typename QuicIntervalSet<T>::const_iterator
QuicIntervalSet<T>::FindIntersectionCandidate(
const QuicIntervalSet& other) const {
return FindIntersectionCandidate(*other.intervals_.begin());
}
template <typename T>
typename QuicIntervalSet<T>::const_iterator
QuicIntervalSet<T>::FindIntersectionCandidate(
const value_type& interval) const {
const_iterator mine = intervals_.upper_bound(interval.min());
if (mine != intervals_.begin()) {
--mine;
}
return mine;
}
template <typename T>
template <typename X, typename Func>
bool QuicIntervalSet<T>::FindNextIntersectingPairImpl(X* x,
const QuicIntervalSet& y,
const_iterator* mine,
const_iterator* theirs,
Func on_hole) {
QUICHE_CHECK(x != nullptr);
if ((*mine == x->intervals_.end()) || (*theirs == y.intervals_.end())) {
return false;
}
while (!(**mine).Intersects(**theirs)) {
const_iterator erase_first = *mine;
while (*mine != x->intervals_.end() && (**mine).max() <= (**theirs).min()) {
++(*mine);
}
*mine = on_hole(x, erase_first, *mine);
if (*mine == x->intervals_.end()) {
return false;
}
while (*theirs != y.intervals_.end() &&
(**theirs).max() <= (**mine).min()) {
++(*theirs);
}
if (*theirs == y.intervals_.end()) {
on_hole(x, *mine, x->intervals_.end());
return false;
}
}
return true;
}
template <typename T>
void QuicIntervalSet<T>::Intersection(const QuicIntervalSet& other) {
if (!SpanningInterval().Intersects(other.SpanningInterval())) {
intervals_.clear();
return;
}
const_iterator mine = FindIntersectionCandidate(other);
mine = intervals_.erase(intervals_.begin(), mine);
const_iterator theirs = other.FindIntersectionCandidate(*this);
while (FindNextIntersectingPairAndEraseHoles(other, &mine, &theirs)) {
value_type i(*mine);
intervals_.erase(mine);
mine = intervals_.end();
value_type intersection;
while (theirs != other.intervals_.end() &&
i.Intersects(*theirs, &intersection)) {
std::pair<const_iterator, bool> ins = intervals_.insert(intersection);
QUICHE_DCHECK(ins.second);
mine = ins.first;
++theirs;
}
QUICHE_DCHECK(mine != intervals_.end());
--theirs;
++mine;
}
QUICHE_DCHECK(Valid());
}
template <typename T>
bool QuicIntervalSet<T>::Intersects(const QuicIntervalSet& other) const {
auto mine = intervals_.begin();
auto theirs = other.intervals_.begin();
while (mine != intervals_.end() && theirs != other.intervals_.end()) {
if (mine->Intersects(*theirs))
return true;
else if (*mine < *theirs)
++mine;
else
++theirs;
}
return false;
}
template <typename T>
void QuicIntervalSet<T>::Difference(const value_type& interval) {
if (!SpanningInterval().Intersects(interval)) {
return;
}
Difference(QuicIntervalSet<T>(interval));
}
template <typename T>
void QuicIntervalSet<T>::Difference(const T& min, const T& max) {
Difference(value_type(min, max));
}
template <typename T>
void QuicIntervalSet<T>::Difference(const QuicIntervalSet& other) {
if (Empty()) return;
Set result;
const_iterator mine = intervals_.begin();
value_type myinterval = *mine;
const_iterator theirs = other.intervals_.begin();
while (mine != intervals_.end()) {
QUICHE_DCHECK(!myinterval.Empty());
QUICHE_DCHECK(myinterval.max() == mine->max());
if (theirs == other.intervals_.end() || myinterval.max() <= theirs->min()) {
result.insert(result.end(), myinterval);
myinterval.Clear();
} else if (theirs->max() <= myinterval.min()) {
++theirs;
} else if (myinterval.min() < theirs->min()) {
result.insert(result.end(), value_type(myinterval.min(), theirs->min()));
myinterval.SetMin(theirs->max());
} else {
myinterval.SetMin(theirs->max());
}
if (myinterval.Empty()) {
++mine;
if (mine != intervals_.end()) {
myinterval = *mine;
}
}
}
std::swap(result, intervals_);
QUICHE_DCHECK(Valid());
}
template <typename T>
void QuicIntervalSet<T>::Complement(const T& min, const T& max) {
QuicIntervalSet<T> span(min, max);
span.Difference(*this);
intervals_.swap(span.intervals_);
}
template <typename T>
std::string QuicIntervalSet<T>::ToString() const {
std::ostringstream os;
os << *this;
return os.str();
}
template <typename T>
bool QuicIntervalSet<T>::Valid() const {
const_iterator prev = end();
for (const_iterator it = begin(); it != end(); ++it) {
if (it->min() >= it->max()) return false;
if (prev != end() && prev->max() >= it->min()) return false;
prev = it;
}
return true;
}
template <typename T>
bool QuicIntervalSet<T>::IntervalLess::operator()(const value_type& a,
const value_type& b) const {
return a.min() < b.min();
}
template <typename T>
bool QuicIntervalSet<T>::IntervalLess::operator()(const value_type& a,
const T& point) const {
return a.min() < point;
}
template <typename T>
bool QuicIntervalSet<T>::IntervalLess::operator()(const value_type& a,
T&& point) const {
return a.min() < point;
}
template <typename T>
bool QuicIntervalSet<T>::IntervalLess::operator()(const T& point,
const value_type& a) const {
return point < a.min();
}
template <typename T>
bool QuicIntervalSet<T>::IntervalLess::operator()(T&& point,
const value_type& a) const {
return point < a.min();
}
}
#endif | #include "quiche/quic/core/quic_interval_set.h"
#include <stdarg.h>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <limits>
#include <sstream>
#include <string>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
using ::testing::ElementsAreArray;
class QuicIntervalSetTest : public QuicTest {
protected:
virtual void SetUp() {
is.Add(100, 200);
is.Add(300, 400);
is.Add(500, 600);
is.Add(700, 800);
is.Add(900, 1000);
is.Add(1100, 1200);
is.Add(1300, 1400);
is.Add(1500, 1600);
is.Add(1700, 1800);
is.Add(1900, 2000);
is.Add(2100, 2200);
other.Add(50, 70);
other.Add(2250, 2270);
other.Add(650, 670);
other.Add(350, 360);
other.Add(370, 380);
other.Add(470, 530);
other.Add(770, 830);
other.Add(870, 900);
other.Add(1200, 1230);
other.Add(1270, 1830);
}
virtual void TearDown() {
is.Clear();
EXPECT_TRUE(is.Empty());
other.Clear();
EXPECT_TRUE(other.Empty());
}
QuicIntervalSet<int> is;
QuicIntervalSet<int> other;
};
TEST_F(QuicIntervalSetTest, IsDisjoint) {
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(0, 99)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(0, 100)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(200, 200)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(200, 299)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(400, 407)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(405, 499)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(2300, 2300)));
EXPECT_TRUE(
is.IsDisjoint(QuicInterval<int>(2300, std::numeric_limits<int>::max())));
EXPECT_FALSE(is.IsDisjoint(QuicInterval<int>(100, 105)));
EXPECT_FALSE(is.IsDisjoint(QuicInterval<int>(199, 300)));
EXPECT_FALSE(is.IsDisjoint(QuicInterval<int>(250, 450)));
EXPECT_FALSE(is.IsDisjoint(QuicInterval<int>(299, 400)));
EXPECT_FALSE(is.IsDisjoint(QuicInterval<int>(250, 2000)));
EXPECT_FALSE(
is.IsDisjoint(QuicInterval<int>(2199, std::numeric_limits<int>::max())));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(90, 90)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(100, 100)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(100, 90)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(150, 150)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(200, 200)));
EXPECT_TRUE(is.IsDisjoint(QuicInterval<int>(400, 300)));
}
static bool VA_Check(const QuicIntervalSet<int>& is, int count, va_list ap) {
std::vector<QuicInterval<int>> intervals(is.begin(), is.end());
if (count != static_cast<int>(intervals.size())) {
QUIC_LOG(ERROR) << "Expected " << count << " intervals, got "
<< intervals.size() << ": " << is;
return false;
}
if (count != static_cast<int>(is.Size())) {
QUIC_LOG(ERROR) << "Expected " << count << " intervals, got Size "
<< is.Size() << ": " << is;
return false;
}
bool result = true;
for (int i = 0; i < count; i++) {
int min = va_arg(ap, int);
int max = va_arg(ap, int);
if (min != intervals[i].min() || max != intervals[i].max()) {
QUIC_LOG(ERROR) << "Expected: [" << min << ", " << max << ") got "
<< intervals[i] << " in " << is;
result = false;
}
}
return result;
}
static bool Check(const QuicIntervalSet<int>& is, int count, ...) {
va_list ap;
va_start(ap, count);
const bool result = VA_Check(is, count, ap);
va_end(ap);
return result;
}
static void TestContainsAndFind(const QuicIntervalSet<int>& is, int value) {
EXPECT_TRUE(is.Contains(value)) << "Set does not contain " << value;
auto it = is.Find(value);
EXPECT_NE(it, is.end()) << "No iterator to interval containing " << value;
EXPECT_TRUE(it->Contains(value)) << "Iterator does not contain " << value;
}
static void TestContainsAndFind(const QuicIntervalSet<int>& is, int min,
int max) {
EXPECT_TRUE(is.Contains(min, max))
<< "Set does not contain interval with min " << min << "and max " << max;
auto it = is.Find(min, max);
EXPECT_NE(it, is.end()) << "No iterator to interval with min " << min
<< "and max " << max;
EXPECT_TRUE(it->Contains(QuicInterval<int>(min, max)))
<< "Iterator does not contain interval with min " << min << "and max "
<< max;
}
static void TestNotContainsAndFind(const QuicIntervalSet<int>& is, int value) {
EXPECT_FALSE(is.Contains(value)) << "Set contains " << value;
auto it = is.Find(value);
EXPECT_EQ(it, is.end()) << "There is iterator to interval containing "
<< value;
}
static void TestNotContainsAndFind(const QuicIntervalSet<int>& is, int min,
int max) {
EXPECT_FALSE(is.Contains(min, max))
<< "Set contains interval with min " << min << "and max " << max;
auto it = is.Find(min, max);
EXPECT_EQ(it, is.end()) << "There is iterator to interval with min " << min
<< "and max " << max;
}
TEST_F(QuicIntervalSetTest, AddInterval) {
QuicIntervalSet<int> s;
s.Add(QuicInterval<int>(0, 10));
EXPECT_TRUE(Check(s, 1, 0, 10));
}
TEST_F(QuicIntervalSetTest, DecrementIterator) {
auto it = is.end();
EXPECT_NE(it, is.begin());
--it;
EXPECT_EQ(*it, QuicInterval<int>(2100, 2200));
++it;
EXPECT_EQ(it, is.end());
}
TEST_F(QuicIntervalSetTest, AddOptimizedForAppend) {
QuicIntervalSet<int> empty_one, empty_two;
empty_one.AddOptimizedForAppend(QuicInterval<int>(0, 99));
EXPECT_TRUE(Check(empty_one, 1, 0, 99));
empty_two.AddOptimizedForAppend(1, 50);
EXPECT_TRUE(Check(empty_two, 1, 1, 50));
QuicIntervalSet<int> iset;
iset.AddOptimizedForAppend(100, 150);
iset.AddOptimizedForAppend(200, 250);
EXPECT_TRUE(Check(iset, 2, 100, 150, 200, 250));
iset.AddOptimizedForAppend(199, 200);
EXPECT_TRUE(Check(iset, 2, 100, 150, 199, 250));
iset.AddOptimizedForAppend(251, 260);
EXPECT_TRUE(Check(iset, 3, 100, 150, 199, 250, 251, 260));
iset.AddOptimizedForAppend(252, 260);
EXPECT_TRUE(Check(iset, 3, 100, 150, 199, 250, 251, 260));
iset.AddOptimizedForAppend(252, 300);
EXPECT_TRUE(Check(iset, 3, 100, 150, 199, 250, 251, 300));
iset.AddOptimizedForAppend(300, 350);
EXPECT_TRUE(Check(iset, 3, 100, 150, 199, 250, 251, 350));
}
TEST_F(QuicIntervalSetTest, PopFront) {
QuicIntervalSet<int> iset{{100, 200}, {400, 500}, {700, 800}};
EXPECT_TRUE(Check(iset, 3, 100, 200, 400, 500, 700, 800));
iset.PopFront();
EXPECT_TRUE(Check(iset, 2, 400, 500, 700, 800));
iset.PopFront();
EXPECT_TRUE(Check(iset, 1, 700, 800));
iset.PopFront();
EXPECT_TRUE(iset.Empty());
}
TEST_F(QuicIntervalSetTest, TrimLessThan) {
QuicIntervalSet<int> iset{{100, 200}, {400, 500}, {700, 800}};
EXPECT_TRUE(Check(iset, 3, 100, 200, 400, 500, 700, 800));
EXPECT_FALSE(iset.TrimLessThan(99));
EXPECT_FALSE(iset.TrimLessThan(100));
EXPECT_TRUE(Check(iset, 3, 100, 200, 400, 500, 700, 800));
EXPECT_TRUE(iset.TrimLessThan(101));
EXPECT_TRUE(Check(iset, 3, 101, 200, 400, 500, 700, 800));
EXPECT_TRUE(iset.TrimLessThan(199));
EXPECT_TRUE(Check(iset, 3, 199, 200, 400, 500, 700, 800));
EXPECT_TRUE(iset.TrimLessThan(450));
EXPECT_TRUE(Check(iset, 2, 450, 500, 700, 800));
EXPECT_TRUE(iset.TrimLessThan(500));
EXPECT_TRUE(Check(iset, 1, 700, 800));
EXPECT_TRUE(iset.TrimLessThan(801));
EXPECT_TRUE(iset.Empty());
EXPECT_FALSE(iset.TrimLessThan(900));
EXPECT_TRUE(iset.Empty());
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetBasic) {
QuicIntervalSet<int> iset;
EXPECT_TRUE(iset.Empty());
EXPECT_EQ(0u, iset.Size());
iset.Add(100, 200);
EXPECT_FALSE(iset.Empty());
EXPECT_EQ(1u, iset.Size());
iset.Add(100, 150);
iset.Add(150, 200);
iset.Add(130, 170);
iset.Add(90, 150);
iset.Add(170, 220);
iset.Add(300, 400);
iset.Add(250, 450);
EXPECT_FALSE(iset.Empty());
EXPECT_EQ(2u, iset.Size());
EXPECT_TRUE(Check(iset, 2, 90, 220, 250, 450));
iset.Clear();
iset.Add(100, 200);
iset.Add(200, 300);
EXPECT_FALSE(iset.Empty());
EXPECT_EQ(1u, iset.Size());
EXPECT_TRUE(Check(iset, 1, 100, 300));
iset.Clear();
QuicIntervalSet<int> iset_add;
iset.Add(100, 200);
iset.Add(100, 150);
iset.Add(150, 200);
iset.Add(130, 170);
iset_add.Add(90, 150);
iset_add.Add(170, 220);
iset_add.Add(300, 400);
iset_add.Add(250, 450);
iset.Union(iset_add);
EXPECT_FALSE(iset.Empty());
EXPECT_EQ(2u, iset.Size());
EXPECT_TRUE(Check(iset, 2, 90, 220, 250, 450));
{
std::vector<QuicInterval<int>> expected(iset.begin(), iset.end());
std::vector<QuicInterval<int>> actual1;
std::copy(iset.begin(), iset.end(), back_inserter(actual1));
ASSERT_EQ(expected.size(), actual1.size());
std::vector<QuicInterval<int>> actual2;
std::copy(iset.begin(), iset.end(), back_inserter(actual2));
ASSERT_EQ(expected.size(), actual2.size());
for (size_t i = 0; i < expected.size(); i++) {
EXPECT_EQ(expected[i].min(), actual1[i].min());
EXPECT_EQ(expected[i].max(), actual1[i].max());
EXPECT_EQ(expected[i].min(), actual2[i].min());
EXPECT_EQ(expected[i].max(), actual2[i].max());
}
EXPECT_THAT(std::vector<QuicInterval<int>>(iset.rbegin(), iset.rend()),
ElementsAreArray(expected.rbegin(), expected.rend()));
}
TestNotContainsAndFind(iset, 89);
TestContainsAndFind(iset, 90);
TestContainsAndFind(iset, 120);
TestContainsAndFind(iset, 219);
TestNotContainsAndFind(iset, 220);
TestNotContainsAndFind(iset, 235);
TestNotContainsAndFind(iset, 249);
TestContainsAndFind(iset, 250);
TestContainsAndFind(iset, 300);
TestContainsAndFind(iset, 449);
TestNotContainsAndFind(iset, 450);
TestNotContainsAndFind(iset, 451);
TestNotContainsAndFind(iset, 50, 60);
TestNotContainsAndFind(iset, 50, 90);
TestNotContainsAndFind(iset, 50, 200);
TestNotContainsAndFind(iset, 90, 90);
TestContainsAndFind(iset, 90, 200);
TestContainsAndFind(iset, 100, 200);
TestContainsAndFind(iset, 100, 220);
TestNotContainsAndFind(iset, 100, 221);
TestNotContainsAndFind(iset, 220, 220);
TestNotContainsAndFind(iset, 240, 300);
TestContainsAndFind(iset, 250, 300);
TestContainsAndFind(iset, 260, 300);
TestContainsAndFind(iset, 300, 450);
TestNotContainsAndFind(iset, 300, 451);
QuicIntervalSet<int> iset_contains;
iset_contains.Add(50, 90);
EXPECT_FALSE(iset.Contains(iset_contains));
iset_contains.Clear();
iset_contains.Add(90, 200);
EXPECT_TRUE(iset.Contains(iset_contains));
iset_contains.Add(100, 200);
EXPECT_TRUE(iset.Contains(iset_contains));
iset_contains.Add(100, 220);
EXPECT_TRUE(iset.Contains(iset_contains));
iset_contains.Add(250, 300);
EXPECT_TRUE(iset.Contains(iset_contains));
iset_contains.Add(300, 450);
EXPECT_TRUE(iset.Contains(iset_contains));
iset_contains.Add(300, 451);
EXPECT_FALSE(iset.Contains(iset_contains));
EXPECT_FALSE(iset.Contains(QuicInterval<int>()));
EXPECT_FALSE(iset.Contains(QuicIntervalSet<int>()));
QuicIntervalSet<int> i2({{220, 230}});
EXPECT_FALSE(iset.Contains(i2));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetContainsEmpty) {
const QuicIntervalSet<int> empty;
const QuicIntervalSet<int> other_empty;
const QuicIntervalSet<int> non_empty({{10, 20}, {40, 50}});
EXPECT_FALSE(empty.Contains(empty));
EXPECT_FALSE(empty.Contains(other_empty));
EXPECT_FALSE(empty.Contains(non_empty));
EXPECT_FALSE(non_empty.Contains(empty));
}
TEST_F(QuicIntervalSetTest, Equality) {
QuicIntervalSet<int> is_copy = is;
EXPECT_EQ(is, is);
EXPECT_EQ(is, is_copy);
EXPECT_NE(is, other);
EXPECT_NE(is, QuicIntervalSet<int>());
EXPECT_EQ(QuicIntervalSet<int>(), QuicIntervalSet<int>());
}
TEST_F(QuicIntervalSetTest, LowerAndUpperBound) {
QuicIntervalSet<int> intervals;
intervals.Add(10, 20);
intervals.Add(30, 40);
EXPECT_EQ(intervals.LowerBound(5)->min(), 10);
EXPECT_EQ(intervals.LowerBound(10)->min(), 10);
EXPECT_EQ(intervals.LowerBound(15)->min(), 10);
EXPECT_EQ(intervals.LowerBound(20)->min(), 30);
EXPECT_EQ(intervals.LowerBound(25)->min(), 30);
EXPECT_EQ(intervals.LowerBound(30)->min(), 30);
EXPECT_EQ(intervals.LowerBound(35)->min(), 30);
EXPECT_EQ(intervals.LowerBound(40), intervals.end());
EXPECT_EQ(intervals.LowerBound(50), intervals.end());
EXPECT_EQ(intervals.UpperBound(5)->min(), 10);
EXPECT_EQ(intervals.UpperBound(10)->min(), 30);
EXPECT_EQ(intervals.UpperBound(15)->min(), 30);
EXPECT_EQ(intervals.UpperBound(20)->min(), 30);
EXPECT_EQ(intervals.UpperBound(25)->min(), 30);
EXPECT_EQ(intervals.UpperBound(30), intervals.end());
EXPECT_EQ(intervals.UpperBound(35), intervals.end());
EXPECT_EQ(intervals.UpperBound(40), intervals.end());
EXPECT_EQ(intervals.UpperBound(50), intervals.end());
}
TEST_F(QuicIntervalSetTest, SpanningInterval) {
{
QuicIntervalSet<int> iset;
const QuicInterval<int>& ival = iset.SpanningInterval();
EXPECT_TRUE(ival.Empty());
}
{
QuicIntervalSet<int> iset;
iset.Add(100, 200);
const QuicInterval<int>& ival = iset.SpanningInterval();
EXPECT_EQ(100, ival.min());
EXPECT_EQ(200, ival.max());
}
{
const QuicInterval<int>& ival = is.SpanningInterval();
EXPECT_EQ(100, ival.min());
EXPECT_EQ(2200, ival.max());
}
{
const QuicInterval<int>& ival = other.SpanningInterval();
EXPECT_EQ(50, ival.min());
EXPECT_EQ(2270, ival.max());
}
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetUnion) {
is.Union(other);
EXPECT_TRUE(Check(is, 12, 50, 70, 100, 200, 300, 400, 470, 600, 650, 670, 700,
830, 870, 1000, 1100, 1230, 1270, 1830, 1900, 2000, 2100,
2200, 2250, 2270));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersection) {
EXPECT_TRUE(is.Intersects(other));
EXPECT_TRUE(other.Intersects(is));
is.Intersection(other);
EXPECT_TRUE(Check(is, 7, 350, 360, 370, 380, 500, 530, 770, 800, 1300, 1400,
1500, 1600, 1700, 1800));
EXPECT_TRUE(is.Intersects(other));
EXPECT_TRUE(other.Intersects(is));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionBothEmpty) {
QuicIntervalSet<std::string> mine, theirs;
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionEmptyMine) {
QuicIntervalSet<std::string> mine;
QuicIntervalSet<std::string> theirs("a", "b");
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionEmptyTheirs) {
QuicIntervalSet<std::string> mine("a", "b");
QuicIntervalSet<std::string> theirs;
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionTheirsBeforeMine) {
QuicIntervalSet<std::string> mine("y", "z");
QuicIntervalSet<std::string> theirs;
theirs.Add("a", "b");
theirs.Add("c", "d");
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionMineBeforeTheirs) {
QuicIntervalSet<std::string> mine;
mine.Add("a", "b");
mine.Add("c", "d");
QuicIntervalSet<std::string> theirs("y", "z");
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest,
QuicIntervalSetIntersectionTheirsBeforeMineInt64Singletons) {
QuicIntervalSet<int64_t> mine({{10, 15}});
QuicIntervalSet<int64_t> theirs({{-20, -5}});
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest,
QuicIntervalSetIntersectionMineBeforeTheirsIntSingletons) {
QuicIntervalSet<int> mine({{10, 15}});
QuicIntervalSet<int> theirs({{90, 95}});
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionTheirsBetweenMine) {
QuicIntervalSet<int64_t> mine({{0, 5}, {40, 50}});
QuicIntervalSet<int64_t> theirs({{10, 15}});
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionMineBetweenTheirs) {
QuicIntervalSet<int> mine({{20, 25}});
QuicIntervalSet<int> theirs({{10, 15}, {30, 32}});
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionAlternatingIntervals) {
QuicIntervalSet<int> mine, theirs;
mine.Add(10, 20);
mine.Add(40, 50);
mine.Add(60, 70);
theirs.Add(25, 39);
theirs.Add(55, 59);
theirs.Add(75, 79);
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(mine.Empty());
EXPECT_FALSE(mine.Intersects(theirs));
EXPECT_FALSE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest,
QuicIntervalSetIntersectionAdjacentAlternatingNonIntersectingIntervals) {
const QuicIntervalSet<int> x1({{0, 10}});
const QuicIntervalSet<int> y1({{-50, 0}, {10, 95}});
QuicIntervalSet<int> result1 = x1;
result1.Intersection(y1);
EXPECT_TRUE(result1.Empty()) << result1;
const QuicIntervalSet<int16_t> x2({{0, 10}, {20, 30}, {40, 90}});
const QuicIntervalSet<int16_t> y2(
{{-50, -40}, {-2, 0}, {10, 20}, {32, 40}, {90, 95}});
QuicIntervalSet<int16_t> result2 = x2;
result2.Intersection(y2);
EXPECT_TRUE(result2.Empty()) << result2;
const QuicIntervalSet<int64_t> x3({{-1, 5}, {5, 10}});
const QuicIntervalSet<int64_t> y3({{-10, -1}, {10, 95}});
QuicIntervalSet<int64_t> result3 = x3;
result3.Intersection(y3);
EXPECT_TRUE(result3.Empty()) << result3;
}
TEST_F(QuicIntervalSetTest,
QuicIntervalSetIntersectionAlternatingIntersectingIntervals) {
const QuicIntervalSet<int> x1({{0, 10}});
const QuicIntervalSet<int> y1({{-50, 1}, {9, 95}});
const QuicIntervalSet<int> expected_result1({{0, 1}, {9, 10}});
QuicIntervalSet<int> result1 = x1;
result1.Intersection(y1);
EXPECT_EQ(result1, expected_result1);
const QuicIntervalSet<int16_t> x2({{0, 10}, {20, 30}, {40, 90}});
const QuicIntervalSet<int16_t> y2(
{{-50, -40}, {-2, 2}, {9, 21}, {32, 41}, {85, 95}});
const QuicIntervalSet<int16_t> expected_result2(
{{0, 2}, {9, 10}, {20, 21}, {40, 41}, {85, 90}});
QuicIntervalSet<int16_t> result2 = x2;
result2.Intersection(y2);
EXPECT_EQ(result2, expected_result2);
const QuicIntervalSet<int64_t> x3({{-1, 5}, {5, 10}});
const QuicIntervalSet<int64_t> y3({{-10, 3}, {4, 95}});
const QuicIntervalSet<int64_t> expected_result3({{-1, 3}, {4, 10}});
QuicIntervalSet<int64_t> result3 = x3;
result3.Intersection(y3);
EXPECT_EQ(result3, expected_result3);
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionIdentical) {
QuicIntervalSet<int> copy(is);
EXPECT_TRUE(copy.Intersects(is));
EXPECT_TRUE(is.Intersects(copy));
is.Intersection(copy);
EXPECT_EQ(copy, is);
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionSuperset) {
QuicIntervalSet<int> mine(-1, 10000);
EXPECT_TRUE(mine.Intersects(is));
EXPECT_TRUE(is.Intersects(mine));
mine.Intersection(is);
EXPECT_EQ(is, mine);
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionSubset) {
QuicIntervalSet<int> copy(is);
QuicIntervalSet<int> theirs(-1, 10000);
EXPECT_TRUE(copy.Intersects(theirs));
EXPECT_TRUE(theirs.Intersects(copy));
is.Intersection(theirs);
EXPECT_EQ(copy, is);
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetIntersectionLargeSet) {
QuicIntervalSet<int> mine, theirs;
for (int i = 0; i < 1000; i += 10) {
mine.Add(i, i + 9);
}
theirs.Add(500, 520);
theirs.Add(535, 545);
theirs.Add(801, 809);
EXPECT_TRUE(mine.Intersects(theirs));
EXPECT_TRUE(theirs.Intersects(mine));
mine.Intersection(theirs);
EXPECT_TRUE(Check(mine, 5, 500, 509, 510, 519, 535, 539, 540, 545, 801, 809));
EXPECT_TRUE(mine.Intersects(theirs));
EXPECT_TRUE(theirs.Intersects(mine));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifference) {
is.Difference(other);
EXPECT_TRUE(Check(is, 10, 100, 200, 300, 350, 360, 370, 380, 400, 530, 600,
700, 770, 900, 1000, 1100, 1200, 1900, 2000, 2100, 2200));
QuicIntervalSet<int> copy = is;
is.Difference(copy);
EXPECT_TRUE(is.Empty());
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceSingleBounds) {
std::vector<QuicInterval<int>> ivals(other.begin(), other.end());
for (const QuicInterval<int>& ival : ivals) {
is.Difference(ival.min(), ival.max());
}
EXPECT_TRUE(Check(is, 10, 100, 200, 300, 350, 360, 370, 380, 400, 530, 600,
700, 770, 900, 1000, 1100, 1200, 1900, 2000, 2100, 2200));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceSingleInterval) {
std::vector<QuicInterval<int>> ivals(other.begin(), other.end());
for (const QuicInterval<int>& ival : ivals) {
is.Difference(ival);
}
EXPECT_TRUE(Check(is, 10, 100, 200, 300, 350, 360, 370, 380, 400, 530, 600,
700, 770, 900, 1000, 1100, 1200, 1900, 2000, 2100, 2200));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceAlternatingIntervals) {
QuicIntervalSet<int> mine, theirs;
mine.Add(10, 20);
mine.Add(40, 50);
mine.Add(60, 70);
theirs.Add(25, 39);
theirs.Add(55, 59);
theirs.Add(75, 79);
mine.Difference(theirs);
EXPECT_TRUE(Check(mine, 3, 10, 20, 40, 50, 60, 70));
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceEmptyMine) {
QuicIntervalSet<std::string> mine, theirs;
theirs.Add("a", "b");
mine.Difference(theirs);
EXPECT_TRUE(mine.Empty());
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceEmptyTheirs) {
QuicIntervalSet<std::string> mine, theirs;
mine.Add("a", "b");
mine.Difference(theirs);
EXPECT_EQ(1u, mine.Size());
EXPECT_EQ("a", mine.begin()->min());
EXPECT_EQ("b", mine.begin()->max());
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceTheirsBeforeMine) {
QuicIntervalSet<std::string> mine, theirs;
mine.Add("y", "z");
theirs.Add("a", "b");
mine.Difference(theirs);
EXPECT_EQ(1u, mine.Size());
EXPECT_EQ("y", mine.begin()->min());
EXPECT_EQ("z", mine.begin()->max());
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceMineBeforeTheirs) {
QuicIntervalSet<std::string> mine, theirs;
mine.Add("a", "b");
theirs.Add("y", "z");
mine.Difference(theirs);
EXPECT_EQ(1u, mine.Size());
EXPECT_EQ("a", mine.begin()->min());
EXPECT_EQ("b", mine.begin()->max());
}
TEST_F(QuicIntervalSetTest, QuicIntervalSetDifferenceIdentical) {
QuicIntervalSet<std::string> mine;
mine.Add("a", "b");
mine.Add("c", "d");
QuicIntervalSet<std::string> theirs(mine);
mine.Difference(theirs);
EXPECT_TRUE(mine.Empty());
}
TEST_F(QuicIntervalSetTest, EmptyComplement) {
QuicIntervalSet<int> iset;
iset.Complement(100, 200);
EXPECT_TRUE(Check(iset, 1, 100, 200));
}
TEST(QuicIntervalSetMultipleCompactionTest, OuterCovering) {
QuicIntervalSet<int> iset;
iset.Add(100, 150);
iset.Add(200, 250);
iset.Add(300, 350);
iset.Add(400, 450);
EXPECT_TRUE(Check(iset, 4, 100, 150, 200, 250, 300, 350, 400, 450));
iset.Add(0, 500);
EXPECT_TRUE(Check(iset, 1, 0, 500));
}
TEST(QuicIntervalSetMultipleCompactionTest, InnerCovering) {
QuicIntervalSet<int> iset;
iset.Add(100, 150);
iset.Add(200, 250);
iset.Add(300, 350);
iset.Add(400, 450);
EXPECT_TRUE(Check(iset, 4, 100, 150, 200, 250, 300, 350, 400, 450));
iset.Add(125, 425);
EXPECT_TRUE(Check(iset, 1, 100, 450));
}
TEST(QuicIntervalSetMultipleCompactionTest, LeftCovering) {
QuicIntervalSet<int> iset;
iset.Add(100, 150);
iset.Add(200, 250);
iset.Add(300, 350);
iset.Add(400, 450);
EXPECT_TRUE(Check(iset, 4, 100, 150, 200, 250, 300, 350, 400, 450));
iset.Add(125, 500);
EXPECT_TRUE(Check(iset, 1, 100, 500));
}
TEST(QuicIntervalSetMultipleCompactionTest, RightCovering) {
QuicIntervalSet<int> iset;
iset.Add(100, 150);
iset.Add(200, 250);
iset.Add(300, 350);
iset.Add(400, 450);
EXPECT_TRUE(Check(iset, 4, 100, 150, 200, 250, 300, 350, 400, 450));
iset.Add(0, 425);
EXPECT_TRUE(Check(iset, 1, 0, 450));
}
static bool CheckOneComplement(int add_min, int add_max, int comp_min,
int comp_max, int count, ...) {
QuicIntervalSet<int> iset;
iset.Add(add_min, add_max);
iset.Complement(comp_min, comp_max);
bool result = true;
va_list ap;
va_start(ap, count);
if (!VA_Check(iset, count, ap)) {
result = false;
}
va_end(ap);
return result;
}
TEST_F(QuicIntervalSetTest, SingleIntervalComplement) {
EXPECT_TRUE(CheckOneComplement(0, 10, 50, 150, 1, 50, 150));
EXPECT_TRUE(CheckOneComplement(50, 150, 0, 100, 1, 0, 50));
EXPECT_TRUE(CheckOneComplement(50, 150, 50, 150, 0));
EXPECT_TRUE(CheckOneComplement(50, 500, 100, 300, 0));
EXPECT_TRUE(CheckOneComplement(50, 500, 0, 800, 2, 0, 50, 500, 800));
EXPECT_TRUE(CheckOneComplement(50, 150, 100, 300, 1, 150, 300));
EXPECT_TRUE(CheckOneComplement(50, 150, 200, 300, 1, 200, 300));
}
static bool CheckComplement(const QuicIntervalSet<int>& iset, int comp_min,
int comp_max, int count, ...) {
QuicIntervalSet<int> iset_copy = iset;
iset_copy.Complement(comp_min, comp_max);
bool result = true;
va_list ap;
va_start(ap, count);
if (!VA_Check(iset_copy, count, ap)) {
result = false;
}
va_end(ap);
return result;
}
TEST_F(QuicIntervalSetTest, MultiIntervalComplement) {
QuicIntervalSet<int> iset;
iset.Add(100, 200);
iset.Add(300, 400);
iset.Add(500, 600);
EXPECT_TRUE(CheckComplement(iset, 0, 50, 1, 0, 50));
EXPECT_TRUE(CheckComplement(iset, 0, 200, 1, 0, 100));
EXPECT_TRUE(CheckComplement(iset, 0, 220, 2, 0, 100, 200, 220));
EXPECT_TRUE(CheckComplement(iset, 100, 600, 2, 200, 300, 400, 500));
EXPECT_TRUE(CheckComplement(iset, 300, 400, 0));
EXPECT_TRUE(CheckComplement(iset, 250, 400, 1, 250, 300));
EXPECT_TRUE(CheckComplement(iset, 300, 450, 1, 400, 450));
EXPECT_TRUE(CheckComplement(iset, 250, 450, 2, 250, 300, 400, 450));
EXPECT_TRUE(
CheckComplement(iset, 0, 700, 4, 0, 100, 200, 300, 400, 500, 600, 700));
EXPECT_TRUE(CheckComplement(iset, 400, 700, 2, 400, 500, 600, 700));
EXPECT_TRUE(CheckComplement(iset, 350, 700, 2, 400, 500, 600, 700));
EXPECT_TRUE(CheckComplement(iset, 700, 800, 1, 700, 800));
}
TEST_F(QuicIntervalSetTest, ToString) {
QuicIntervalSet<int> iset;
iset.Add(300, 400);
iset.Add(100, 200);
iset.Add(500, 600);
EXPECT_TRUE(!iset.ToString().empty());
QUIC_VLOG(2) << iset;
EXPECT_EQ("{ [100, 200) [300, 400) [500, 600) }", iset.ToString());
EXPECT_EQ("{ [1, 2) }", QuicIntervalSet<int>(1, 2).ToString());
EXPECT_EQ("{ }", QuicIntervalSet<int>().ToString());
}
TEST_F(QuicIntervalSetTest, ConstructionDiscardsEmptyInterval) {
EXPECT_TRUE(QuicIntervalSet<int>(QuicInterval<int>(2, 2)).Empty());
EXPECT_TRUE(QuicIntervalSet<int>(2, 2).Empty());
EXPECT_FALSE(QuicIntervalSet<int>(QuicInterval<int>(2, 3)).Empty());
EXPECT_FALSE(QuicIntervalSet<int>(2, 3).Empty());
}
TEST_F(QuicIntervalSetTest, Swap) {
QuicIntervalSet<int> a, b;
a.Add(300, 400);
b.Add(100, 200);
b.Add(500, 600);
std::swap(a, b);
EXPECT_TRUE(Check(a, 2, 100, 200, 500, 600));
EXPECT_TRUE(Check(b, 1, 300, 400));
std::swap(a, b);
EXPECT_TRUE(Check(a, 1, 300, 400));
EXPECT_TRUE(Check(b, 2, 100, 200, 500, 600));
}
TEST_F(QuicIntervalSetTest, OutputReturnsOstreamRef) {
std::stringstream ss;
const QuicIntervalSet<int> v(QuicInterval<int>(1, 2));
auto return_type_is_a_ref = [](std::ostream&) {};
return_type_is_a_ref(ss << v);
}
struct NotOstreamable {
bool operator<(const NotOstreamable&) const { return false; }
bool operator>(const NotOstreamable&) const { return false; }
bool operator!=(const NotOstreamable&) const { return false; }
bool operator>=(const NotOstreamable&) const { return true; }
bool operator<=(const NotOstreamable&) const { return true; }
bool operator==(const NotOstreamable&) const { return true; }
};
TEST_F(QuicIntervalSetTest, IntervalOfTypeWithNoOstreamSupport) {
const NotOstreamable v;
const QuicIntervalSet<NotOstreamable> d(QuicInterval<NotOstreamable>(v, v));
EXPECT_EQ(d, d);
}
class QuicIntervalSetInitTest : public QuicTest {
protected:
const std::vector<QuicInterval<int>> intervals_{{0, 1}, {2, 4}};
};
TEST_F(QuicIntervalSetInitTest, DirectInit) {
std::initializer_list<QuicInterval<int>> il = {{0, 1}, {2, 3}, {3, 4}};
QuicIntervalSet<int> s(il);
EXPECT_THAT(s, ElementsAreArray(intervals_));
}
TEST_F(QuicIntervalSetInitTest, CopyInit) {
std::initializer_list<QuicInterval<int>> il = {{0, 1}, {2, 3}, {3, 4}};
QuicIntervalSet<int> s = il;
EXPECT_THAT(s, ElementsAreArray(intervals_));
}
TEST_F(QuicIntervalSetInitTest, AssignIterPair) {
QuicIntervalSet<int> s(0, 1000);
s.assign(intervals_.begin(), intervals_.end());
EXPECT_THAT(s, ElementsAreArray(intervals_));
}
TEST_F(QuicIntervalSetInitTest, AssignInitList) {
QuicIntervalSet<int> s(0, 1000);
s.assign({{0, 1}, {2, 3}, {3, 4}});
EXPECT_THAT(s, ElementsAreArray(intervals_));
}
TEST_F(QuicIntervalSetInitTest, AssignmentInitList) {
std::initializer_list<QuicInterval<int>> il = {{0, 1}, {2, 3}, {3, 4}};
QuicIntervalSet<int> s;
s = il;
EXPECT_THAT(s, ElementsAreArray(intervals_));
}
TEST_F(QuicIntervalSetInitTest, BracedInitThenBracedAssign) {
QuicIntervalSet<int> s{{0, 1}, {2, 3}, {3, 4}};
s = {{0, 1}, {2, 4}};
EXPECT_THAT(s, ElementsAreArray(intervals_));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_interval_set.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_interval_set_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
57f098d9-da3c-4ce4-a0e2-5b4d75de77da | cpp | google/quiche | quic_hkdf | quiche/quic/core/crypto/quic_hkdf.cc | quiche/quic/core/crypto/quic_hkdf_test.cc | #include "quiche/quic/core/crypto/quic_hkdf.h"
#include <memory>
#include "absl/strings/string_view.h"
#include "openssl/digest.h"
#include "openssl/hkdf.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
const size_t kSHA256HashLength = 32;
const size_t kMaxKeyMaterialSize = kSHA256HashLength * 256;
QuicHKDF::QuicHKDF(absl::string_view secret, absl::string_view salt,
absl::string_view info, size_t key_bytes_to_generate,
size_t iv_bytes_to_generate,
size_t subkey_secret_bytes_to_generate)
: QuicHKDF(secret, salt, info, key_bytes_to_generate, key_bytes_to_generate,
iv_bytes_to_generate, iv_bytes_to_generate,
subkey_secret_bytes_to_generate) {}
QuicHKDF::QuicHKDF(absl::string_view secret, absl::string_view salt,
absl::string_view info, size_t client_key_bytes_to_generate,
size_t server_key_bytes_to_generate,
size_t client_iv_bytes_to_generate,
size_t server_iv_bytes_to_generate,
size_t subkey_secret_bytes_to_generate) {
const size_t material_length =
2 * client_key_bytes_to_generate + client_iv_bytes_to_generate +
2 * server_key_bytes_to_generate + server_iv_bytes_to_generate +
subkey_secret_bytes_to_generate;
QUICHE_DCHECK_LT(material_length, kMaxKeyMaterialSize);
output_.resize(material_length);
if (output_.empty()) {
return;
}
::HKDF(&output_[0], output_.size(), ::EVP_sha256(),
reinterpret_cast<const uint8_t*>(secret.data()), secret.size(),
reinterpret_cast<const uint8_t*>(salt.data()), salt.size(),
reinterpret_cast<const uint8_t*>(info.data()), info.size());
size_t j = 0;
if (client_key_bytes_to_generate) {
client_write_key_ = absl::string_view(reinterpret_cast<char*>(&output_[j]),
client_key_bytes_to_generate);
j += client_key_bytes_to_generate;
}
if (server_key_bytes_to_generate) {
server_write_key_ = absl::string_view(reinterpret_cast<char*>(&output_[j]),
server_key_bytes_to_generate);
j += server_key_bytes_to_generate;
}
if (client_iv_bytes_to_generate) {
client_write_iv_ = absl::string_view(reinterpret_cast<char*>(&output_[j]),
client_iv_bytes_to_generate);
j += client_iv_bytes_to_generate;
}
if (server_iv_bytes_to_generate) {
server_write_iv_ = absl::string_view(reinterpret_cast<char*>(&output_[j]),
server_iv_bytes_to_generate);
j += server_iv_bytes_to_generate;
}
if (subkey_secret_bytes_to_generate) {
subkey_secret_ = absl::string_view(reinterpret_cast<char*>(&output_[j]),
subkey_secret_bytes_to_generate);
j += subkey_secret_bytes_to_generate;
}
if (client_key_bytes_to_generate) {
client_hp_key_ = absl::string_view(reinterpret_cast<char*>(&output_[j]),
client_key_bytes_to_generate);
j += client_key_bytes_to_generate;
}
if (server_key_bytes_to_generate) {
server_hp_key_ = absl::string_view(reinterpret_cast<char*>(&output_[j]),
server_key_bytes_to_generate);
j += server_key_bytes_to_generate;
}
}
QuicHKDF::~QuicHKDF() {}
} | #include "quiche/quic/core/crypto/quic_hkdf.h"
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
struct HKDFInput {
const char* key_hex;
const char* salt_hex;
const char* info_hex;
const char* output_hex;
};
static const HKDFInput kHKDFInputs[] = {
{
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"000102030405060708090a0b0c",
"f0f1f2f3f4f5f6f7f8f9",
"3cb25f25faacd57a90434f64d0362f2a2d2d0a90cf1a5a4c5db02d56ecc4c5bf340072"
"08d5"
"b887185865",
},
{
"000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122"
"2324"
"25262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f4041424344454647"
"4849"
"4a4b4c4d4e4f",
"606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182"
"8384"
"85868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7"
"a8a9"
"aaabacadaeaf",
"b0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2"
"d3d4"
"d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7"
"f8f9"
"fafbfcfdfeff",
"b11e398dc80327a1c8e7f78c596a49344f012eda2d4efad8a050cc4c19afa97c59045a"
"99ca"
"c7827271cb41c65e590e09da3275600c2f09b8367793a9aca3db71cc30c58179ec3e87"
"c14c"
"01d5c1f3434f1d87",
},
{
"0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b",
"",
"",
"8da4e775a563c18f715f802a063c5a31b8a11f5c5ee1879ec3454e5f3c738d2d9d2013"
"95fa"
"a4b61a96c8",
},
};
class QuicHKDFTest : public QuicTest {};
TEST_F(QuicHKDFTest, HKDF) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(kHKDFInputs); i++) {
const HKDFInput& test(kHKDFInputs[i]);
SCOPED_TRACE(i);
std::string key;
std::string salt;
std::string info;
std::string expected;
ASSERT_TRUE(absl::HexStringToBytes(test.key_hex, &key));
ASSERT_TRUE(absl::HexStringToBytes(test.salt_hex, &salt));
ASSERT_TRUE(absl::HexStringToBytes(test.info_hex, &info));
ASSERT_TRUE(absl::HexStringToBytes(test.output_hex, &expected));
QuicHKDF hkdf(key, salt, info, expected.size(), 0, 0);
ASSERT_EQ(expected.size(), hkdf.client_write_key().size());
EXPECT_EQ(0, memcmp(expected.data(), hkdf.client_write_key().data(),
expected.size()));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_hkdf.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/quic_hkdf_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
71e6de22-7854-442a-a961-5d2995db949f | cpp | abseil/abseil-cpp | periodic_sampler | absl/profiling/internal/periodic_sampler.cc | absl/profiling/internal/periodic_sampler_test.cc | #include "absl/profiling/internal/periodic_sampler.h"
#include <atomic>
#include "absl/profiling/internal/exponential_biased.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace profiling_internal {
int64_t PeriodicSamplerBase::GetExponentialBiased(int period) noexcept {
return rng_.GetStride(period);
}
bool PeriodicSamplerBase::SubtleConfirmSample() noexcept {
int current_period = period();
if (ABSL_PREDICT_FALSE(current_period < 2)) {
stride_ = 0;
return current_period == 1;
}
if (ABSL_PREDICT_FALSE(stride_ == 1)) {
stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period));
if (static_cast<int64_t>(stride_) < -1) {
++stride_;
return false;
}
}
stride_ = static_cast<uint64_t>(-GetExponentialBiased(current_period));
return true;
}
}
ABSL_NAMESPACE_END
} | #include "absl/profiling/internal/periodic_sampler.h"
#include <thread>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace profiling_internal {
namespace {
using testing::Eq;
using testing::Return;
using testing::StrictMock;
class MockPeriodicSampler : public PeriodicSamplerBase {
public:
virtual ~MockPeriodicSampler() = default;
MOCK_METHOD(int, period, (), (const, noexcept));
MOCK_METHOD(int64_t, GetExponentialBiased, (int), (noexcept));
};
TEST(PeriodicSamplerBaseTest, Sample) {
StrictMock<MockPeriodicSampler> sampler;
EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(16));
EXPECT_CALL(sampler, GetExponentialBiased(16))
.WillOnce(Return(2))
.WillOnce(Return(3))
.WillOnce(Return(4));
EXPECT_FALSE(sampler.Sample());
EXPECT_TRUE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_TRUE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
}
TEST(PeriodicSamplerBaseTest, ImmediatelySample) {
StrictMock<MockPeriodicSampler> sampler;
EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16));
EXPECT_CALL(sampler, GetExponentialBiased(16))
.WillOnce(Return(1))
.WillOnce(Return(2))
.WillOnce(Return(3));
EXPECT_TRUE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_TRUE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
}
TEST(PeriodicSamplerBaseTest, Disabled) {
StrictMock<MockPeriodicSampler> sampler;
EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(0));
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
}
TEST(PeriodicSamplerBaseTest, AlwaysOn) {
StrictMock<MockPeriodicSampler> sampler;
EXPECT_CALL(sampler, period()).Times(3).WillRepeatedly(Return(1));
EXPECT_TRUE(sampler.Sample());
EXPECT_TRUE(sampler.Sample());
EXPECT_TRUE(sampler.Sample());
}
TEST(PeriodicSamplerBaseTest, Disable) {
StrictMock<MockPeriodicSampler> sampler;
EXPECT_CALL(sampler, period()).WillOnce(Return(16));
EXPECT_CALL(sampler, GetExponentialBiased(16)).WillOnce(Return(3));
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(0));
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
}
TEST(PeriodicSamplerBaseTest, Enable) {
StrictMock<MockPeriodicSampler> sampler;
EXPECT_CALL(sampler, period()).WillOnce(Return(0));
EXPECT_FALSE(sampler.Sample());
EXPECT_CALL(sampler, period()).Times(2).WillRepeatedly(Return(16));
EXPECT_CALL(sampler, GetExponentialBiased(16))
.Times(2)
.WillRepeatedly(Return(3));
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_TRUE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
EXPECT_FALSE(sampler.Sample());
}
TEST(PeriodicSamplerTest, ConstructConstInit) {
struct Tag {};
ABSL_CONST_INIT static PeriodicSampler<Tag> sampler;
(void)sampler;
}
TEST(PeriodicSamplerTest, DefaultPeriod0) {
struct Tag {};
PeriodicSampler<Tag> sampler;
EXPECT_THAT(sampler.period(), Eq(0));
}
TEST(PeriodicSamplerTest, DefaultPeriod) {
struct Tag {};
PeriodicSampler<Tag, 100> sampler;
EXPECT_THAT(sampler.period(), Eq(100));
}
TEST(PeriodicSamplerTest, SetGlobalPeriod) {
struct Tag1 {};
struct Tag2 {};
PeriodicSampler<Tag1, 25> sampler1;
PeriodicSampler<Tag2, 50> sampler2;
EXPECT_THAT(sampler1.period(), Eq(25));
EXPECT_THAT(sampler2.period(), Eq(50));
std::thread thread([] {
PeriodicSampler<Tag1, 25> sampler1;
PeriodicSampler<Tag2, 50> sampler2;
EXPECT_THAT(sampler1.period(), Eq(25));
EXPECT_THAT(sampler2.period(), Eq(50));
sampler1.SetGlobalPeriod(10);
sampler2.SetGlobalPeriod(20);
});
thread.join();
EXPECT_THAT(sampler1.period(), Eq(10));
EXPECT_THAT(sampler2.period(), Eq(20));
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/profiling/internal/periodic_sampler.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/profiling/internal/periodic_sampler_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
0699d932-314d-46cb-9ab3-3f3a267a4ea0 | cpp | tensorflow/tensorflow | quantized_mul_op | tensorflow/core/kernels/quantized_mul_op.cc | tensorflow/core/kernels/quantized_mul_op_test.cc | #define EIGEN_USE_THREADS
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#define USE_NEON
#include <arm_neon.h>
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/meta_support.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
namespace {
template <class T, class Toutput>
void ScalarMultiply(OpKernelContext* context, const T* full_input,
int32_t full_input_offset, int64_t num_elements,
T scalar_input, int32_t scalar_input_offset,
Toutput* output) {
const int32_t scalar_minus_offset =
static_cast<int32>(scalar_input) - scalar_input_offset;
for (int i = 0; i < num_elements; ++i) {
output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) *
scalar_minus_offset;
}
}
#ifdef USE_NEON
template <>
void ScalarMultiply<quint8, qint32>(OpKernelContext* context,
const quint8* full_input,
int32 full_input_offset, int64 num_elements,
quint8 scalar_input,
int32 scalar_input_offset, qint32* output) {
const int16 scalar_minus_offset =
static_cast<int16>(scalar_input) - scalar_input_offset;
const int16x4_t scalar_minus_offset_16x4 = vmov_n_s16(scalar_minus_offset);
const uint8x8_t full_input_offset_8x8 = vmov_n_u8(full_input_offset);
int i;
for (i = 0; i < (num_elements - 15); i += 16) {
const uint8* full_input_ptr = &(full_input->value) + i;
const uint8x16_t full_input_8x16 = vld1q_u8(full_input_ptr);
const uint8x8_t full_input_high_8x8 = vget_high_u8(full_input_8x16);
const uint8x8_t full_input_low_8x8 = vget_low_u8(full_input_8x16);
const int16x8_t full_input_minus_offset_high_16x8 = vreinterpretq_s16_u16(
vsubl_u8(full_input_high_8x8, full_input_offset_8x8));
const int16x8_t full_input_minus_offset_low_16x8 = vreinterpretq_s16_u16(
vsubl_u8(full_input_low_8x8, full_input_offset_8x8));
const int16x4_t x_high_high_16x4 =
vget_high_s16(full_input_minus_offset_high_16x8);
const int16x4_t x_high_low_16x4 =
vget_low_s16(full_input_minus_offset_high_16x8);
const int16x4_t x_low_high_16x4 =
vget_high_s16(full_input_minus_offset_low_16x8);
const int16x4_t x_low_low_16x4 =
vget_low_s16(full_input_minus_offset_low_16x8);
const int32x4_t z_high_high_32x4 =
vmull_s16(x_high_high_16x4, scalar_minus_offset_16x4);
const int32x4_t z_high_low_32x4 =
vmull_s16(x_high_low_16x4, scalar_minus_offset_16x4);
const int32x4_t z_low_high_32x4 =
vmull_s16(x_low_high_16x4, scalar_minus_offset_16x4);
const int32x4_t z_low_low_32x4 =
vmull_s16(x_low_low_16x4, scalar_minus_offset_16x4);
int32* output_ptr = &(output->value) + i;
vst1q_s32(output_ptr + 0, z_low_low_32x4);
vst1q_s32(output_ptr + 4, z_low_high_32x4);
vst1q_s32(output_ptr + 8, z_high_low_32x4);
vst1q_s32(output_ptr + 12, z_high_high_32x4);
}
for (; i < num_elements; ++i) {
output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) *
scalar_minus_offset;
}
}
#endif
template <class T, class Toutput>
void VectorMultiply(OpKernelContext* context, const T* x_data, int32_t offset_x,
const T* y_data, int32_t offset_y, int64_t num_elements,
Toutput* output) {
for (int i = 0; i < num_elements; ++i) {
output[i] = (static_cast<int32>(x_data[i]) - offset_x) *
(static_cast<int32>(y_data[i]) - offset_y);
}
}
#ifdef USE_NEON
template <>
void VectorMultiply<quint8, qint32>(OpKernelContext* context,
const quint8* x_data, int32 offset_x,
const quint8* y_data, int32 offset_y,
int64 num_elements, qint32* output) {
const uint8x8_t offset_x_8x8 = vmov_n_u8(offset_x);
const uint8x8_t offset_y_8x8 = vmov_n_u8(offset_y);
int i;
for (i = 0; i < (num_elements - 15); i += 16) {
const uint8* x_data_ptr = &(x_data->value) + i;
const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr);
const uint8* y_data_ptr = &(y_data->value) + i;
const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr);
const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16);
const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16);
const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16);
const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16);
const int16x8_t x_minus_offset_high_16x8 =
vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8));
const int16x8_t x_minus_offset_low_16x8 =
vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8));
const int16x8_t y_minus_offset_high_16x8 =
vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8));
const int16x8_t y_minus_offset_low_16x8 =
vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8));
const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8);
const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8);
const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8);
const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8);
const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8);
const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8);
const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8);
const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8);
const int32x4_t z_high_high_32x4 =
vmull_s16(x_high_high_16x4, y_high_high_16x4);
const int32x4_t z_high_low_32x4 =
vmull_s16(x_high_low_16x4, y_high_low_16x4);
const int32x4_t z_low_high_32x4 =
vmull_s16(x_low_high_16x4, y_low_high_16x4);
const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4);
int32* output_ptr = &(output->value) + i;
vst1q_s32(output_ptr + 0, z_low_low_32x4);
vst1q_s32(output_ptr + 4, z_low_high_32x4);
vst1q_s32(output_ptr + 8, z_high_low_32x4);
vst1q_s32(output_ptr + 12, z_high_high_32x4);
}
for (; i < num_elements; ++i) {
output[i] = (static_cast<int32>(x_data[i]) - offset_x) *
(static_cast<int32>(y_data[i]) - offset_y);
}
}
#endif
template <class T, class Toutput>
void VectorTensorMultiply(const T* vector_data, int32_t vector_offset,
int64_t vector_num_elements, const T* tensor_data,
int32_t tensor_offset, int64_t tensor_num_elements,
Toutput* output) {
for (int i = 0; i < tensor_num_elements; ++i) {
const int64_t vector_i = i % vector_num_elements;
output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) *
(static_cast<int32>(tensor_data[i]) - tensor_offset);
}
}
#ifdef USE_NEON
template <>
void VectorTensorMultiply<quint8, qint32>(
const quint8* vector_data, int32 vector_offset, int64 vector_num_elements,
const quint8* tensor_data, int32 tensor_offset, int64 tensor_num_elements,
qint32* output) {
const uint8x8_t offset_x_8x8 = vmov_n_u8(vector_offset);
const uint8x8_t offset_y_8x8 = vmov_n_u8(tensor_offset);
CHECK_EQ(0, tensor_num_elements % vector_num_elements);
for (int base_i = 0; base_i < tensor_num_elements;
base_i += vector_num_elements) {
int i = base_i;
const int end_i = base_i + vector_num_elements;
int vector_i;
for (vector_i = 0; vector_i < (vector_num_elements - 15);
vector_i += 16, i += 16) {
const uint8* x_data_ptr = &(vector_data->value) + vector_i;
const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr);
const uint8* y_data_ptr = &(tensor_data->value) + i;
const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr);
const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16);
const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16);
const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16);
const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16);
const int16x8_t x_minus_offset_high_16x8 =
vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8));
const int16x8_t x_minus_offset_low_16x8 =
vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8));
const int16x8_t y_minus_offset_high_16x8 =
vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8));
const int16x8_t y_minus_offset_low_16x8 =
vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8));
const int16x4_t x_high_high_16x4 =
vget_high_s16(x_minus_offset_high_16x8);
const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8);
const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8);
const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8);
const int16x4_t y_high_high_16x4 =
vget_high_s16(y_minus_offset_high_16x8);
const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8);
const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8);
const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8);
const int32x4_t z_high_high_32x4 =
vmull_s16(x_high_high_16x4, y_high_high_16x4);
const int32x4_t z_high_low_32x4 =
vmull_s16(x_high_low_16x4, y_high_low_16x4);
const int32x4_t z_low_high_32x4 =
vmull_s16(x_low_high_16x4, y_low_high_16x4);
const int32x4_t z_low_low_32x4 =
vmull_s16(x_low_low_16x4, y_low_low_16x4);
int32* output_ptr = &(output->value) + i;
vst1q_s32(output_ptr + 0, z_low_low_32x4);
vst1q_s32(output_ptr + 4, z_low_high_32x4);
vst1q_s32(output_ptr + 8, z_high_low_32x4);
vst1q_s32(output_ptr + 12, z_high_high_32x4);
}
for (; i < end_i; ++i, ++vector_i) {
output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) *
(static_cast<int32>(tensor_data[i]) - tensor_offset);
}
}
}
#endif
}
template <class T, class Toutput>
class QuantizedMulOp : public OpKernel {
public:
explicit QuantizedMulOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& x = context->input(0);
const Tensor& y = context->input(1);
auto& min_x_tensor = context->input(2);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()),
errors::InvalidArgument("min_x must be a scalar"));
const float min_x = min_x_tensor.flat<float>()(0);
auto& max_x_tensor = context->input(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()),
errors::InvalidArgument("max_x must be a scalar"));
const float max_x = max_x_tensor.flat<float>()(0);
auto& min_y_tensor = context->input(4);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()),
errors::InvalidArgument("min_y must be a scalar"));
const float min_y = min_y_tensor.flat<float>()(0);
auto& max_y_tensor = context->input(5);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()),
errors::InvalidArgument("max_y must be a scalar"));
const float max_y = max_y_tensor.flat<float>()(0);
BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape()));
if (!bcast.IsValid()) {
context->SetStatus(errors::InvalidArgument(
"Incompatible shapes: ", x.shape().DebugString(), " vs. ",
y.shape().DebugString()));
return;
}
Tensor* z;
OP_REQUIRES_OK(context, context->allocate_output(
0, BCast::ToShape(bcast.output_shape()), &z));
OP_REQUIRES(context, (max_x > min_x),
errors::InvalidArgument("max_x must be larger than min_a."));
OP_REQUIRES(context, (max_y > min_y),
errors::InvalidArgument("max_x must be larger than min_b."));
const int32_t offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x);
const int32_t offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y);
const T* x_data = x.flat<T>().data();
const T* y_data = y.flat<T>().data();
Toutput* z_data = z->flat<Toutput>().data();
const int ndims = bcast.x_reshape().size();
if (ndims <= 1) {
if (x.NumElements() == 1) {
ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(),
x_data[0], offset_x, z_data);
} else if (y.NumElements() == 1) {
ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(),
y_data[0], offset_y, z_data);
} else {
VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y,
x.NumElements(), z_data);
}
} else if (ndims == 2) {
const T* vector_data;
int64_t vector_num_elements;
int32_t vector_offset;
const T* tensor_data;
int64_t tensor_num_elements;
int32_t tensor_offset;
if (x.NumElements() < y.NumElements()) {
vector_data = x_data;
vector_num_elements = x.NumElements();
vector_offset = offset_x;
tensor_data = y_data;
tensor_num_elements = y.NumElements();
tensor_offset = offset_y;
} else {
vector_data = y_data;
vector_num_elements = y.NumElements();
vector_offset = offset_y;
tensor_data = x_data;
tensor_num_elements = x.NumElements();
tensor_offset = offset_x;
}
if (vector_num_elements == 0) {
context->SetStatus(
errors::InvalidArgument("vector must have at least 1 element"));
return;
}
VectorTensorMultiply<T, Toutput>(
vector_data, vector_offset, vector_num_elements, tensor_data,
tensor_offset, tensor_num_elements, z_data);
} else {
LOG(INFO) << "ndims=" << ndims;
LOG(INFO) << "bcast.x_reshape()="
<< TensorShape(bcast.x_reshape()).DebugString();
LOG(INFO) << "bcast.y_reshape()="
<< TensorShape(bcast.y_reshape()).DebugString();
LOG(INFO) << "bcast.x_bcast()="
<< TensorShape(bcast.x_bcast()).DebugString();
LOG(INFO) << "bcast.y_bcast()="
<< TensorShape(bcast.y_bcast()).DebugString();
context->SetStatus(errors::Unimplemented(
"Broadcast between ", context->input(0).shape().DebugString(),
" and ", context->input(1).shape().DebugString(),
" is not supported yet."));
return;
}
float min_z_value;
float max_z_value;
QuantizationRangeForMultiplication<T, T, Toutput>(
min_x, max_x, min_y, max_y, &min_z_value, &max_z_value);
Tensor* z_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min));
z_min->flat<float>()(0) = min_z_value;
Tensor* z_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max));
z_max->flat<float>()(0) = max_z_value;
}
};
REGISTER_KERNEL_BUILDER(Name("QuantizedMul")
.Device(DEVICE_CPU)
.TypeConstraint<quint8>("T1")
.TypeConstraint<quint8>("T2")
.TypeConstraint<qint32>("Toutput"),
QuantizedMulOp<quint8, qint32>);
} | #define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace ops {
namespace {
void TestMul(const std::vector<int64_t>& x_shape,
const std::vector<float>& x_values, float x_min_value,
float x_max_value, const std::vector<int64_t>& y_shape,
const std::vector<float>& y_values, float y_min_value,
float y_max_value, const std::vector<int64_t>& expected_shape,
const std::vector<float>& expected_values, double tolerance) {
Scope root = Scope::NewRootScope();
Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));
test::FillValues<float>(&x_float_tensor, x_values);
Tensor x_quantized_tensor(DT_QUINT8, x_float_tensor.shape());
FloatTensorToQuantizedInPlace<quint8>(x_float_tensor, x_min_value,
x_max_value, &x_quantized_tensor);
Output x =
Const(root.WithOpName("x"), Input::Initializer(x_quantized_tensor));
Output x_min = Const(root.WithOpName("x_min"), x_min_value);
Output x_max = Const(root.WithOpName("x_max"), x_max_value);
Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));
test::FillValues<float>(&y_float_tensor, y_values);
Tensor y_quantized_tensor(DT_QUINT8, y_float_tensor.shape());
FloatTensorToQuantizedInPlace<quint8>(y_float_tensor, y_min_value,
y_max_value, &y_quantized_tensor);
Output y =
Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor));
Output y_min = Const(root.WithOpName("y_min"), y_min_value);
Output y_max = Const(root.WithOpName("y_max"), y_max_value);
QuantizedMul mul =
QuantizedMul(root.WithOpName("mul"), x, y, x_min, x_max, y_min, y_max);
TF_EXPECT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{mul.z, mul.min_z, mul.max_z}, &outputs));
const Tensor& z_quantized = outputs[0];
const float z_min = outputs[1].flat<float>()(0);
const float z_max = outputs[2].flat<float>()(0);
Tensor z_float = QuantizedTensorToFloat<qint32>(z_quantized, z_min, z_max);
Tensor expected_z_float(DT_FLOAT, TensorShape(expected_shape));
test::FillValues<float>(&expected_z_float, expected_values);
test::ExpectTensorNear<float>(expected_z_float, z_float, tolerance);
}
void TestMulShape(const std::vector<int64_t>& x_shape,
const std::vector<int64_t>& y_shape) {
const size_t x_num_elements = TensorShape(x_shape).num_elements();
std::vector<float> x_values(x_num_elements);
for (int i = 0; i < x_num_elements; ++i) {
x_values[i] = i % 256;
}
const float x_min_value = 0.0f;
const float x_max_value = 256.0f;
const size_t y_num_elements = TensorShape(y_shape).num_elements();
std::vector<float> y_values(y_num_elements);
for (int i = 0; i < y_num_elements; ++i) {
y_values[i] = ((i + 23) % 123) - 50;
}
const float y_min_value = -150.0f;
const float y_max_value = 150.0f;
Scope root = Scope::NewRootScope();
Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));
test::FillValues<float>(&x_float_tensor, x_values);
Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor));
Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));
test::FillValues<float>(&y_float_tensor, y_values);
Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor));
Mul mul = Mul(root.WithOpName("mul"), x, y);
TF_EXPECT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {mul.z}, &outputs));
const Tensor& expected_values_tensor = outputs[0];
const float* expected_values_data =
expected_values_tensor.flat<float>().data();
std::vector<float> expected_values(
expected_values_data,
expected_values_data + expected_values_tensor.NumElements());
std::vector<int64_t> expected_shape;
for (const int64_t dim : expected_values_tensor.shape().dim_sizes()) {
expected_shape.push_back(dim);
}
TestMul(x_shape, x_values, x_min_value, x_max_value, y_shape, y_values,
y_min_value, y_max_value, expected_shape, expected_values, 256.0);
}
void TimeMul(const std::vector<int64_t>& x_shape,
const std::vector<int64_t>& y_shape, int64_t iterations) {
TestMulShape(x_shape, y_shape);
Scope root = Scope::NewRootScope();
Tensor x_quantized_tensor(DT_QUINT8, TensorShape(x_shape));
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_QUINT8);
Output x_min = Const(root.WithOpName("x_min"), 0.0f);
Output x_max = Const(root.WithOpName("x_max"), 1.0f);
Tensor y_quantized_tensor(DT_QUINT8, TensorShape(y_shape));
Output y =
Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor));
Output y_min = Const(root.WithOpName("y_min"), 0.0f);
Output y_max = Const(root.WithOpName("y_max"), 1.0f);
QuantizedMul mul = QuantizedMul(root.WithOpName("mul"), placeholder, y, x_min,
x_max, y_min, y_max);
TF_EXPECT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
int64_t total_duration = 0;
for (int i = 0; i < iterations; ++i) {
const int64_t start_time = Env::Default()->NowMicros();
TF_EXPECT_OK(session.Run({{placeholder, x_quantized_tensor}},
{mul.z, mul.min_z, mul.max_z}, &outputs));
const int64_t end_time = Env::Default()->NowMicros();
total_duration += end_time - start_time;
}
const int64_t one_run_duration = total_duration / iterations;
const int64_t num_ops = outputs[0].NumElements();
const double million_ops_per_second =
(iterations * num_ops) / static_cast<double>(total_duration);
LOG(INFO) << "TimeMul: " << TensorShape(x_shape).DebugString() << " * "
<< TensorShape(y_shape).DebugString()
<< ": iterations=" << iterations
<< ", MOps/s=" << million_ops_per_second
<< ", one_run_duration=" << one_run_duration
<< ", total_duration=" << total_duration;
}
void TestManualScalar() {
TestMul(
{10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {1}, {10.0f}, -100.0f, 100.0f, {10},
{10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f},
3.0f);
TestMul(
{1}, {10.0f}, -100.0f, 100.0f, {10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {10},
{10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f, 70.0f, 80.0f, 90.0f, 100.0f},
3.0f);
}
void TestScalar() {
TestMulShape({100}, {1});
TestMulShape({1}, {100});
}
void TestManualVector() {
TestMul({10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f},
0.0f, 10.0f, {10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {10},
{1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f, 64.0f, 81.0f, 100.0f},
3.0f);
}
void TestVector() { TestMulShape({100}, {100}); }
void TestManualVectorTimesTensor() {
TestMul(
{10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {2, 10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
0.0f, 20.0f, {2, 10}, {1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f,
64.0f, 81.0f, 100.0f, 11.0f, 24.0f, 39.0f, 56.0f,
75.0f, 96.0f, 119.0f, 144.0f, 171.0f, 200.0f},
3.0f);
TestMul({2, 10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f,
8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
0.0f, 20.0f, {10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {2, 10}, {1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f,
64.0f, 81.0f, 100.0f, 11.0f, 24.0f, 39.0f, 56.0f,
75.0f, 96.0f, 119.0f, 144.0f, 171.0f, 200.0f},
3.0f);
TestMul(
{5, 2}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f},
0.0f, 10.0f, {2, 5, 2},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
0.0f, 20.0f, {2, 5, 2},
{1.0f, 4.0f, 9.0f, 16.0f, 25.0f, 36.0f, 49.0f,
64.0f, 81.0f, 100.0f, 11.0f, 24.0f, 39.0f, 56.0f,
75.0f, 96.0f, 119.0f, 144.0f, 171.0f, 200.0f},
3.0f);
}
void TestVectorTimesTensor() {
TestMulShape({100}, {2, 100});
TestMulShape({2, 100}, {100});
TestMulShape({5, 2}, {2, 5, 2});
}
void BenchmarkTensorScalar() {
TimeMul({200}, {1}, 10000);
TimeMul({10000}, {1}, 1000);
TimeMul({1000000}, {1}, 100);
TimeMul({10000000}, {1}, 100);
}
void BenchmarkVector() {
TimeMul({200}, {200}, 10000);
TimeMul({10000}, {10000}, 1000);
TimeMul({1000000}, {1000000}, 100);
TimeMul({10000000}, {10000000}, 100);
}
void BenchmarkVectorTimesTensor() {
TimeMul({10, 20}, {20}, 10000);
TimeMul({10, 1000}, {1000}, 1000);
TimeMul({1000, 1000}, {1000}, 100);
TimeMul({10000, 1000}, {1000}, 100);
TimeMul({100, 100}, {100}, 1000);
TimeMul({10000, 100}, {100}, 100);
TimeMul({100000, 100}, {100}, 100);
}
}
}
}
#define RUN_TEST(t) \
TEST(QuantizedAddOpTest, t) { tensorflow::ops::t(); }
RUN_TEST(TestManualScalar);
RUN_TEST(TestManualVector);
RUN_TEST(TestManualVectorTimesTensor);
RUN_TEST(TestScalar);
RUN_TEST(TestVector);
RUN_TEST(TestVectorTimesTensor);
#if defined(__ANDROID__)
RUN_TEST(BenchmarkTensorScalar);
RUN_TEST(BenchmarkVector);
RUN_TEST(BenchmarkVectorTimesTensor);
#endif
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_mul_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_mul_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df627059-2ef8-47c7-880a-5779a2e8d0a0 | cpp | tensorflow/tensorflow | hlo_unstacker | third_party/xla/xla/service/hlo_unstacker.cc | third_party/xla/xla/service/hlo_unstacker_test.cc | #include "xla/service/hlo_unstacker.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_util.h"
#include "xla/service/while_loop_unroller.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
enum class PatternType {
DSFusionNoBitcastPattern,
DSFusionPattern,
NestedDSFusionPattern,
Other,
};
static std::string PatternTypeToString(PatternType pattern_type) {
switch (pattern_type) {
case PatternType::DSFusionNoBitcastPattern:
return "DSFusionNoBitcastPattern";
case PatternType::DSFusionPattern:
return "DSFusionPattern";
case PatternType::NestedDSFusionPattern:
return "NestedDSFusionPattern";
case PatternType::Other:
return "Other";
}
}
struct PatternInfo {
PatternType type;
std::vector<const HloInstruction*> unstacked_instrs;
const HloInstruction* instr;
Shape unstacked_shape;
HloComputation* unstacking_computation;
std::string ToString() const {
if (unstacking_computation == nullptr) {
return absl::StrCat("type: \n\t", PatternTypeToString(type), "\n",
"instr: \n\t", instr->name(), "\n", "shape: \n\t",
unstacked_shape.ToString(true));
} else {
return absl::StrCat("type: \n\t", PatternTypeToString(type), "\n",
"instr: \n\t", instr->name(), "\n", "shape: \n\t",
unstacked_shape.ToString(true), "\n", "comp: \n",
unstacking_computation->name());
}
}
};
struct UnstackerMetadata {
static absl::StatusOr<UnstackerMetadata> Create(
HloModule* module, std::function<bool(HloInstruction*)> unfuse_slice) {
UnstackerMetadata metadata;
TF_ASSIGN_OR_RETURN(
bool prepared,
WhileLoopUnroller::PrepareModuleForUnrolling(module, {}));
if (prepared) {
VLOG(3) << "Prepared module: " << module->name() << " for unstacking.";
}
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> loops =
WhileLoopUnroller::GetUnrollableLoops(module, {},
std::nullopt);
for (const auto& [instr, while_loop_config] : loops) {
metadata.unrollable_loop_bodies[instr->while_body()] = while_loop_config;
metadata.bodies[instr->while_body()] = instr;
}
metadata.unfuse_slice = unfuse_slice;
return metadata;
}
absl::flat_hash_map<HloComputation*, WhileLoopConfig> unrollable_loop_bodies;
absl::flat_hash_map<const HloComputation*, HloInstruction*> bodies;
std::vector<
std::pair<std::function<std::optional<PatternInfo>(
const UnstackerMetadata&, const HloInstruction*, int64_t)>,
std::function<absl::Status(HloInstruction*, const Shape&)>>>
custom_handlers;
std::function<bool(HloInstruction*)> unfuse_slice;
};
class UnstackerTransformer {
public:
explicit UnstackerTransformer(const UnstackerMetadata& metadata)
: metadata_(metadata) {}
std::vector<const HloInstruction*> HandleInstruction(
const HloInstruction* instr, int64_t changed_idx) {
if (instr->opcode() != HloOpcode::kFusion) {
return {};
}
VLOG(3) << "HandleInstruction(" << instr->shape().ToString()
<< instr->name() << ", " << changed_idx << ")";
for (const auto& [custom_pattern, custom_handler] :
metadata_.custom_handlers) {
std::optional<PatternInfo> stacked_user =
custom_pattern(metadata_, instr, changed_idx);
if (!stacked_user.has_value()) {
continue;
}
PatternInfo& pattern_info = stacked_user.value();
pattern_type_ = pattern_info.type;
VLOG(3) << "PatternInfo:" << "\n" << pattern_info.ToString();
if (pattern_info.unstacking_computation != nullptr &&
unstacking_computation_ != nullptr) {
if (!absl::EqualsIgnoreCase(
pattern_info.unstacking_computation->ToString(
HloPrintOptions::Fingerprint()),
unstacking_computation_->ToString(
HloPrintOptions::Fingerprint()))) {
VLOG(3) << "Seen multiple unstacking computations, cannot handle: "
<< "\n previous computations: \n"
<< unstacking_computation_->ToString(
HloPrintOptions::Fingerprint())
<< "\n current computations: \n"
<< pattern_info.unstacking_computation->ToString(
HloPrintOptions::Fingerprint());
return {};
}
}
if (pattern_info.unstacking_computation != nullptr) {
unstacking_computation_ = pattern_info.unstacking_computation;
}
unstacked_shape_ = std::make_unique<Shape>(pattern_info.unstacked_shape);
unstacked_instrs_.push_back(instr);
std::function<absl::Status()> unstack_wrapper =
[&custom_handler = custom_handler,
pattern_info]() mutable -> absl::Status {
HloInstruction* mutable_dynamic_slicing_fusion =
const_cast<HloInstruction*>(pattern_info.instr);
return custom_handler(mutable_dynamic_slicing_fusion,
pattern_info.unstacked_shape.tuple_shapes(0));
};
body_changes_.push_back(unstack_wrapper);
return pattern_info.unstacked_instrs;
}
return {};
}
const UnstackerMetadata& GetMetadata() const { return metadata_; }
std::vector<const HloInstruction*>& GetUnstackedInstructions() {
return unstacked_instrs_;
}
const Shape* GetUnstackedShape() const { return unstacked_shape_.get(); }
HloComputation* GetUnstackingComputation() const {
return unstacking_computation_;
}
std::vector<std::function<void(const UnstackerTransformer&)>>&
GetLoopChanges() {
return loop_changes_;
}
std::vector<std::function<absl::Status()>>& GetBodyChanges() {
return body_changes_;
}
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>>&
GetOperandChanges() {
return operand_changes_;
}
void AddOperandChange(HloInstruction* instr, int64_t index) {
operand_changes_[instr].push_back(index);
}
void AddLoopChange(
std::function<void(const UnstackerTransformer&)> loop_change) {
loop_changes_.push_back(loop_change);
}
PatternType GetPatternType() const { return pattern_type_; }
private:
PatternType pattern_type_;
const UnstackerMetadata& metadata_;
std::unique_ptr<Shape> unstacked_shape_ = nullptr;
HloComputation* unstacking_computation_ = nullptr;
std::vector<std::function<void(const UnstackerTransformer&)>> loop_changes_;
std::vector<std::function<absl::Status()>> body_changes_;
absl::flat_hash_map<HloInstruction*, std::vector<int64_t>> operand_changes_;
std::vector<const HloInstruction*> unstacked_instrs_;
};
bool CanUnstackWhileOperand(const HloInstruction* while_instr,
UnstackerTransformer& unstacker, int64_t index);
bool UnstackWhileOperandAtIndex(
const UnstackerMetadata& metadata, HloInstruction* while_instr,
int64_t index, std::vector<const HloInstruction*>& unstacked_instructions);
bool PropagateGteShapeChange(HloInstruction* gte,
UnstackerTransformer& unstacker) {
VLOG(5) << "PropagateGteShapeChange(" << gte->name() << ")";
std::vector<const HloInstruction*> handled_instrs;
absl::flat_hash_map<HloInstruction*, int64_t> visited;
std::deque<HloInstruction*> worklist;
worklist.push_back(gte);
visited.insert({gte, gte->tuple_index()});
while (!worklist.empty()) {
HloInstruction* changed_instr_to_propagate = worklist.front();
int64_t changed_operand_index =
FindOrDie(visited, changed_instr_to_propagate);
worklist.pop_front();
for (HloInstruction* user : changed_instr_to_propagate->users()) {
if (ContainsKey(visited, user)) {
continue;
}
if (user->opcode() == HloOpcode::kGetTupleElement) {
if (user->tuple_index() != changed_operand_index) {
continue;
}
visited.insert({user, changed_operand_index});
worklist.push_back(user);
} else if (user->opcode() == HloOpcode::kTuple) {
int64_t use_index = user->operand_index(changed_instr_to_propagate);
visited.insert({user, {use_index}});
worklist.push_back(user);
} else if (user->opcode() == HloOpcode::kWhile) {
bool changed_nested_while =
CanUnstackWhileOperand(user, unstacker, changed_operand_index);
if (!changed_nested_while) {
return false;
}
visited.insert({user, changed_operand_index});
worklist.push_back(user);
} else {
if (absl::c_find(handled_instrs, user) != handled_instrs.end()) {
continue;
}
if (user->IsCustomCall("DynamicGte") ||
user->IsCustomCall("DynamicTuple")) {
continue;
}
int64_t use_index = user->operand_index(changed_instr_to_propagate);
std::vector<const HloInstruction*> curr_handled_instrs =
unstacker.HandleInstruction(user, use_index);
if (curr_handled_instrs.empty()) {
VLOG(3) << "Custom unstacker not found for " << user->name();
return false;
}
for (const HloInstruction* instr : curr_handled_instrs) {
for (HloInstruction* handled_instr_user : instr->users()) {
if (user->shape() == gte->shape()) {
visited.insert({handled_instr_user, changed_operand_index});
worklist.push_back(handled_instr_user);
}
}
handled_instrs.push_back(instr);
}
}
}
}
for (const auto& [instr, index] : visited) {
unstacker.AddOperandChange(instr, index);
}
return true;
}
bool CanPropagateGteShapeChangesInComputation(
const HloComputation* comp, const HloInstruction* operand,
UnstackerTransformer& shape_transformer, int64_t idx) {
VLOG(3) << "Propagating shape change of index " << idx
<< " in : " << comp->name();
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->tuple_index() == idx) {
if (instr->operand(0) != operand) {
continue;
}
bool can_propagate = PropagateGteShapeChange(instr, shape_transformer);
if (!can_propagate) {
VLOG(3) << "Failed to propagate shape change for " << instr->name();
return false;
}
}
}
VLOG(3) << "Finish propagating shape change of index " << idx
<< " in: " << comp->name();
return true;
}
void UnstackWhileInput(const UnstackerTransformer& unstacker,
HloInstruction* while_instr, int64_t index) {
VLOG(3) << "Unstacking while input: " << while_instr->name() << " at "
<< index;
const Shape* new_shape = unstacker.GetUnstackedShape();
HloComputation* unstacking_computation = unstacker.GetUnstackingComputation();
const Shape& slice_shape = new_shape->tuple_shapes(0);
HloInstruction* old_while_input =
while_instr->while_init()->mutable_operand(index);
if (old_while_input->shape().IsTuple()) {
VLOG(3) << "Input is already unstacked: " << old_while_input->name();
return;
}
std::vector<HloInstruction*> slices;
if (old_while_input->IsCustomCall("AllocateBuffer")) {
for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) {
slices.push_back(while_instr->AddInstruction(
HloInstruction::CreateCustomCall(slice_shape, {}, "AllocateBuffer")));
}
} else {
for (int64_t i = 0; i < new_shape->tuple_shapes_size(); ++i) {
HloInstruction* root_instr = unstacking_computation->root_instruction();
HloInstruction* slice = nullptr;
if (unstacker.GetPatternType() == PatternType::DSFusionPattern ||
unstacker.GetPatternType() == PatternType::NestedDSFusionPattern ||
unstacker.GetPatternType() == PatternType::DSFusionNoBitcastPattern) {
HloInstruction* dynamic_slice = nullptr;
if (unstacker.GetPatternType() == PatternType::DSFusionPattern ||
unstacker.GetPatternType() == PatternType::NestedDSFusionPattern) {
dynamic_slice = root_instr->mutable_operand(0);
} else if (unstacker.GetPatternType() ==
PatternType::DSFusionNoBitcastPattern) {
dynamic_slice = root_instr;
}
std::vector<int64_t> new_start_indices;
new_start_indices.reserve(dynamic_slice->shape().rank());
std::vector<int64_t> new_limit_indices;
new_limit_indices.reserve(dynamic_slice->shape().rank());
std::vector<int64_t> new_strides;
new_strides.reserve(dynamic_slice->shape().rank());
new_start_indices.push_back(i);
new_limit_indices.push_back(i + 1);
new_strides.push_back(1);
for (int64_t j = 1; j < dynamic_slice->shape().rank(); ++j) {
new_start_indices.push_back(0);
new_limit_indices.push_back(
dynamic_slice->mutable_operand(0)->shape().dimensions(j));
new_strides.push_back(1);
}
slice = while_instr->AddInstruction(HloInstruction::CreateSlice(
dynamic_slice->shape(), old_while_input, new_start_indices,
new_limit_indices, new_strides));
}
if (slice == nullptr || !unstacker.GetMetadata().unfuse_slice(slice)) {
std::vector<HloInstruction*> operands = {
old_while_input,
while_instr->AddInstruction(MakeScalarConstantWithShape(
unstacking_computation->parameter_instruction(1)->shape(), i))};
slice = while_instr->AddInstruction(HloInstruction::CreateFusion(
slice_shape, HloInstruction::FusionKind::kLoop, operands,
while_instr->GetModule()->AddEmbeddedComputation(
unstacking_computation->Clone()),
"hoisted"));
}
slices.push_back(slice);
}
}
HloInstruction* new_operand_element =
while_instr->AddInstruction(HloInstruction::CreateTuple(slices));
HloInstruction* new_while_init =
TupleUtil::ReplaceTupleWith(new_operand_element,
while_instr->while_init(), {index}, false)
.value();
CHECK_OK(while_instr->ReplaceOperandWithDifferentShape(0, new_while_init));
}
bool CanUnstackWhileOperand(const HloInstruction* while_instr,
UnstackerTransformer& unstacker, int64_t index) {
VLOG(5) << "ReplaceWhileOperandShape: " << while_instr->name() << " at "
<< index;
bool body_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->while_body(),
while_instr->while_body()->parameter_instruction(0), unstacker, index);
if (!body_changes_collected) {
return false;
}
bool condition_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->while_condition(),
while_instr->while_condition()->parameter_instruction(0), unstacker,
index);
if (!condition_changes_collected) {
return false;
}
bool parent_changes_collected = CanPropagateGteShapeChangesInComputation(
while_instr->parent(), while_instr, unstacker, index);
if (!parent_changes_collected) {
VLOG(3) << "Failed: parent_changes_collected";
return false;
}
HloInstruction* root_operand =
while_instr->while_body()->root_instruction()->mutable_operand(index);
if (root_operand == nullptr) {
return false;
}
HloInstruction* gte_operand = nullptr;
if (Match(root_operand, match::GetTupleElement(match::Op(>e_operand)))) {
if (Match(gte_operand, match::While())) {
VLOG(3) << "Faced a gte originating from loop: "
<< root_operand->ToString();
bool loop_feeding_root_changes_collected = CanUnstackWhileOperand(
root_operand->operand(0), unstacker, root_operand->tuple_index());
if (!loop_feeding_root_changes_collected) {
VLOG(3) << "Failed: loop " << root_operand->operand(0)->name()
<< " output at " << index << " is not unstackable";
return false;
}
} else if (!Match(gte_operand, match::Parameter().WithParameterNum(0))) {
VLOG(3) << "Failed: root operand of while_body at " << index
<< " is not a parameter";
return false;
}
}
auto loop_change = [=](const UnstackerTransformer& unstacker,
HloInstruction* loop, int64_t idx) mutable {
Shape old_shape = ShapeUtil::MakeStaticShape(
loop->while_body()->parameter_instruction(0)->shape());
ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), idx,
&old_shape);
loop->while_body()->ReplaceParameter(
0, HloInstruction::CreateParameter(0, old_shape, "unstacked"));
loop->while_condition()->ReplaceParameter(
0, HloInstruction::CreateParameter(0, old_shape, "unstacked"));
CHECK_NE(unstacker.GetUnstackingComputation(), nullptr);
UnstackWhileInput(unstacker, loop, idx);
*loop->mutable_shape() = old_shape;
};
auto loop_change_wrapper = [&loop_change, while_instr,
index](const UnstackerTransformer& unstacker) {
HloInstruction* mutable_loop = const_cast<HloInstruction*>(while_instr);
loop_change(unstacker, mutable_loop, index);
};
unstacker.AddLoopChange(loop_change_wrapper);
return true;
}
bool UnstackWhileOperandAtIndex(
const UnstackerMetadata& metadata, HloInstruction* while_instr,
int64_t index, std::vector<const HloInstruction*>& unstacked_instructions) {
UnstackerTransformer unstacker = UnstackerTransformer(metadata);
bool can_unstack = CanUnstackWhileOperand(while_instr, unstacker, index);
if (!can_unstack) {
VLOG(3) << "Unstacking failed for " << while_instr->name() << " at "
<< index;
return false;
}
if (unstacker.GetUnstackedShape() == nullptr) {
VLOG(3) << "Failed: unstacked shape is null";
return false;
}
if (unstacker.GetUnstackingComputation() == nullptr) {
VLOG(3) << "Failed: unstacking computation is null";
return false;
}
for (auto& [instr, indices] : unstacker.GetOperandChanges()) {
switch (instr->opcode()) {
case HloOpcode::kGetTupleElement:
VLOG(3) << "Changing shape of: " << instr->name();
*instr->mutable_shape() = *unstacker.GetUnstackedShape();
break;
case HloOpcode::kTuple: {
for (int64_t index : indices) {
VLOG(3) << "Changing shape of: " << instr->name() << " at " << index;
*instr->mutable_shape()->mutable_tuple_shapes(index) =
*unstacker.GetUnstackedShape();
}
break;
}
case HloOpcode::kWhile:
for (int64_t index : indices) {
VLOG(3) << "Changing shape of: " << instr->name() << " at " << index;
ShapeUtil::UpdateTupleShape(*unstacker.GetUnstackedShape(), index,
instr->mutable_shape());
}
break;
default:
LOG(FATAL) << "Unsupported opcode: " << instr->name();
}
}
for (const auto& body_change : unstacker.GetBodyChanges()) {
CHECK_OK(body_change());
}
for (auto& loop_change : unstacker.GetLoopChanges()) {
loop_change(unstacker);
}
for (const HloInstruction* instr : unstacker.GetUnstackedInstructions()) {
unstacked_instructions.push_back(instr);
}
return true;
}
Shape MakeUnstackedShapeFromSlice(const Shape& slice_shape, int64_t layers) {
std::vector<Shape> shapes;
shapes.reserve(layers);
for (int64_t i = 0; i < layers; ++i) {
shapes.push_back(slice_shape);
}
return ShapeUtil::MakeTupleShape(shapes);
}
std::optional<WhileLoopConfig> IsFusionInsideUnrollableLoopWithNumParameter(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t num_fusion_params) {
if (instr->opcode() != HloOpcode::kFusion) {
return std::nullopt;
}
if (instr->fused_parameters().size() != num_fusion_params) {
VLOG(3) << "Fusion has different number of parameters";
return std::nullopt;
}
if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {
VLOG(5) << "Fusion not inside unrollable while body, " << instr->name()
<< " inside " << instr->parent()->name();
return std::nullopt;
}
return metadata.unrollable_loop_bodies.at(instr->parent());
}
HloInstruction* GetMostMajorEffectivelyStaticDynamicSliceInFusion(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t num_fusion_params, int64_t stacked_operand_idx) {
std::optional<WhileLoopConfig> while_instr_config =
IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr,
num_fusion_params);
if (!while_instr_config.has_value()) {
return nullptr;
}
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
std::optional<int64_t> dynamic_index =
MatchEffectivelyStaticDynamicSliceInsideLoop(
fused_instr,
instr->fused_instructions_computation()->parameter_instruction(
stacked_operand_idx),
while_instr_config.value());
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
return fused_instr;
}
}
return nullptr;
}
HloInstruction* GetMostMajorShapeCoveringDynamicIndexInFusion(
const UnstackerMetadata& metadata, const HloInstruction* instr,
HloOpcode opcode, int64_t num_fusion_params, int64_t stacked_operand_idx) {
std::optional<WhileLoopConfig> while_instr_config =
IsFusionInsideUnrollableLoopWithNumParameter(metadata, instr,
num_fusion_params);
if (!while_instr_config.has_value()) {
return nullptr;
}
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (fused_instr->opcode() != opcode) {
continue;
}
std::optional<int64_t> dynamic_index =
MatchShapeCoveringDynamicIndexInstruction(
fused_instr,
instr->fused_instructions_computation()->parameter_instruction(
stacked_operand_idx),
opcode, while_instr_config.value());
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
return fused_instr;
}
}
return nullptr;
}
std::optional<PatternInfo> GetDSFusionPattern(const UnstackerMetadata& metadata,
const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSFusion";
HloInstruction* shape_covering_instr =
GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
HloInstruction* bitcast_operand = nullptr;
if (Match(instr->fused_instructions_computation()->root_instruction(),
match::Bitcast(match::Op(&bitcast_operand)))) {
if (bitcast_operand == shape_covering_instr) {
PatternInfo pattern_info;
pattern_info.type = PatternType::DSFusionPattern;
pattern_info.instr = instr;
const Shape& slice_shape = shape_covering_instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation =
instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDSFusionPattern(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent();
HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0);
HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction(
HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(),
new_operand));
return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape(
bitcast);
}
std::optional<PatternInfo> GetDSFusionNoBitcastPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSFusionNoBitcast";
HloInstruction* shape_covering_instr =
GetMostMajorEffectivelyStaticDynamicSliceInFusion(metadata, instr, 2,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
if (instr->fused_instructions_computation()->root_instruction() !=
shape_covering_instr) {
return std::nullopt;
}
PatternInfo pattern_info;
pattern_info.type = PatternType::DSFusionNoBitcastPattern;
pattern_info.instr = instr;
const Shape& slice_shape = shape_covering_instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation = instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
absl::Status UnstackDSFusionNoBitcastPattern(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_slicing_fusion->parent();
HloInstruction* stacked = mutable_dynamic_slicing_fusion->mutable_operand(0);
HloInstruction* offset = mutable_dynamic_slicing_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return mutable_dynamic_slicing_fusion->ReplaceAllUsesWithDifferentShape(
new_operand);
}
std::optional<PatternInfo> GetDUSFusionPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DUSFusion";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicUpdateSlice, 3,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
if (Match(shape_covering_instr->operand(1),
match::Bitcast(match::Parameter()))) {
if (shape_covering_instr->parent()->root_instruction() ==
shape_covering_instr) {
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(
instr->operand(2)->shape(), instr->operand(0)->shape().dimensions(0));
pattern_info.unstacking_computation = nullptr;
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDUSFusionPattern(
HloInstruction* mutable_dynamic_update_slicing_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent();
HloInstruction* stacked =
mutable_dynamic_update_slicing_fusion->mutable_operand(0);
HloInstruction* offset =
mutable_dynamic_update_slicing_fusion->mutable_operand(1);
HloInstruction* update =
mutable_dynamic_update_slicing_fusion->mutable_operand(2);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
stacked->shape(), {stacked, update, offset}, "DynamicTuple"));
for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) {
TF_RETURN_IF_ERROR(
mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape(
user, new_operand));
}
return absl::OkStatus();
}
std::optional<PatternInfo> GetDUSFusionWithPadPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DUSFusionWithPad";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicUpdateSlice, 3,
stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
if (Match(
shape_covering_instr->operand(1),
match::Bitcast(match::Pad(match::Parameter(), match::Constant())))) {
if (shape_covering_instr->parent()->root_instruction() ==
shape_covering_instr) {
const HloInstruction* pad_instr =
shape_covering_instr->operand(1)->operand(0);
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
pattern_info.unstacked_shape = MakeUnstackedShapeFromSlice(
pad_instr->shape(),
shape_covering_instr->operand(0)->shape().dimensions(0));
pattern_info.unstacking_computation = nullptr;
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDUSFusionWithPadPattern(
HloInstruction* mutable_dynamic_update_slicing_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_update_slicing_fusion->parent();
HloComputation* fused_computation =
mutable_dynamic_update_slicing_fusion->fused_instructions_computation();
HloInstruction* stacked =
mutable_dynamic_update_slicing_fusion->mutable_operand(
fused_computation->root_instruction()
->mutable_operand(0)
->parameter_number());
HloInstruction* offset =
mutable_dynamic_update_slicing_fusion->mutable_operand(
fused_computation->root_instruction()
->mutable_operand(2)
->parameter_number());
HloInstruction* pad_instr = fused_computation->root_instruction()
->mutable_operand(1)
->mutable_operand(0);
fused_computation->set_root_instruction(pad_instr, true);
*mutable_dynamic_update_slicing_fusion->mutable_shape() = pad_instr->shape();
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
stacked->shape(),
{stacked, mutable_dynamic_update_slicing_fusion, offset},
"DynamicTuple"));
for (HloInstruction* user : mutable_dynamic_update_slicing_fusion->users()) {
if (user != new_operand) {
TF_RETURN_IF_ERROR(
mutable_dynamic_update_slicing_fusion->ReplaceUseWithDifferentShape(
user, new_operand));
}
}
return absl::OkStatus();
}
std::optional<PatternInfo> GetDSFusionWithAddPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSFusionWithAdd";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
HloComputation* fused_computation = instr->fused_instructions_computation();
HloInstruction* fusion_root = fused_computation->root_instruction();
HloInstruction* add_operand;
if (Match(fusion_root,
match::Reduce(match::Add(match::Op(&add_operand),
match::Broadcast(match::Constant())),
match::Constant()))) {
if (add_operand == shape_covering_instr) {
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(instr->shape(), num_layers);
HloComputation::Builder builder("unstack_add");
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, fused_computation->parameter_instruction(0)->shape(), "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(
1, fused_computation->parameter_instruction(1)->shape(), "p1"));
HloInstruction* zero =
builder.AddInstruction(MakeScalarConstantWithShape(p1->shape(), 0));
std::vector<HloInstruction*> slice_starts;
slice_starts.reserve(shape_covering_instr->shape().rank());
slice_starts.push_back(p1);
for (int64_t i = 0; i < shape_covering_instr->shape().rank() - 1; i++) {
slice_starts.push_back(zero);
}
HloInstruction* slice =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
shape_covering_instr->shape(), p0, slice_starts,
shape_covering_instr->dynamic_slice_sizes()));
HloInstruction* zero_reduce =
builder.AddInstruction(MakeScalarConstantWithShape(
ShapeUtil::MakeScalarShape(slice->shape().element_type()), 0));
HloInstruction* reduce =
builder.AddInstruction(HloInstruction::CreateReduce(
instr->shape(), slice, zero_reduce, fusion_root->dimensions(),
fused_computation->root_instruction()->to_apply()));
HloComputation* unstack_add =
instr->GetModule()->AddEmbeddedComputation(builder.Build());
unstack_add->set_root_instruction(reduce);
pattern_info.unstacking_computation = unstack_add;
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackDSFusionWithAddPattern(
HloInstruction* mutable_dynamic_slice_with_add_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_dynamic_slice_with_add_fusion->parent();
HloInstruction* stacked =
mutable_dynamic_slice_with_add_fusion->mutable_operand(0);
HloInstruction* offset =
mutable_dynamic_slice_with_add_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
HloInstruction* one = parent_loop->AddInstruction(MakeScalarConstantWithShape(
ShapeUtil::MakeScalarShape(slice_shape.element_type()), 1));
HloInstruction* broadcast = parent_loop->AddInstruction(
HloInstruction::CreateBroadcast(slice_shape, one, {}));
HloInstruction* add = mutable_dynamic_slice_with_add_fusion->AddInstruction(
HloInstruction::CreateBinary(new_operand->shape(), HloOpcode::kAdd,
new_operand, broadcast));
TF_RETURN_IF_ERROR(
mutable_dynamic_slice_with_add_fusion->ReplaceAllUsesWith(add));
return absl::OkStatus();
}
std::optional<PatternInfo> GetNestedDSFusionPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
if (instr->opcode() != HloOpcode::kFusion) {
return std::nullopt;
}
if (!metadata.unrollable_loop_bodies.contains(instr->parent())) {
VLOG(5) << "Instruction not inside unrollable while body, " << instr->name()
<< " inside " << instr->parent()->name();
return std::nullopt;
}
WhileLoopConfig while_instr_config =
metadata.unrollable_loop_bodies.at(instr->parent());
VLOG(3) << "Checking NestedDSFusionPattern";
HloInstruction* inner_fusion_user = nullptr;
for (HloInstruction* fused_instr :
instr->fused_instructions_computation()->MakeInstructionPostOrder()) {
if (Match(fused_instr, match::Parameter(stacked_operand_idx))) {
if (fused_instr->user_count() != 1) {
return std::nullopt;
}
if (Match(fused_instr->users()[0],
match::Fusion(match::Op(), match::Op()))) {
inner_fusion_user = fused_instr->users()[0];
break;
}
}
}
if (inner_fusion_user == nullptr) {
return std::nullopt;
}
for (HloInstruction* inner_fusion_instr :
inner_fusion_user->fused_instructions_computation()
->MakeInstructionPostOrder()) {
if (!Match(inner_fusion_instr, match::DynamicSlice())) {
continue;
}
std::optional<int64_t> dynamic_index =
MatchEffectivelyStaticDynamicSliceInsideLoop(
inner_fusion_instr,
inner_fusion_user->fused_instructions_computation()
->parameter_instruction(0),
while_instr_config);
if (dynamic_index.has_value() && dynamic_index.value() == 0) {
const int64_t num_layers =
inner_fusion_user->operand(0)->shape().dimensions(0);
PatternInfo pattern_info;
pattern_info.type = PatternType::NestedDSFusionPattern;
pattern_info.instr = inner_fusion_user;
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(inner_fusion_instr->shape(), num_layers);
pattern_info.unstacking_computation =
inner_fusion_user->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(inner_fusion_user);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackNestedDSFusionPattern(
HloInstruction* mutable_dynamic_slicing_fusion, const Shape& slice_shape) {
HloInstruction* parent_fusion =
mutable_dynamic_slicing_fusion->parent()->FusionInstruction();
HloInstruction* stacked_in_ds_fusion =
mutable_dynamic_slicing_fusion->mutable_operand(0);
CHECK_EQ(stacked_in_ds_fusion->opcode(), HloOpcode::kParameter);
int64_t stacked_param_number = stacked_in_ds_fusion->parameter_number();
HloInstruction* stacked =
parent_fusion->mutable_operand(stacked_param_number);
HloInstruction* offset_in_ds_fusion =
mutable_dynamic_slicing_fusion->mutable_operand(1);
CHECK_EQ(offset_in_ds_fusion->opcode(), HloOpcode::kParameter);
HloInstruction* offset =
parent_fusion->mutable_operand(offset_in_ds_fusion->parameter_number());
HloInstruction* sliced_param =
parent_fusion->fused_instructions_computation()->ReplaceParameter(
stacked_param_number,
HloInstruction::CreateParameter(stacked_param_number, slice_shape,
"sliced"));
HloInstruction* bitcast = mutable_dynamic_slicing_fusion->AddInstruction(
HloInstruction::CreateBitcast(mutable_dynamic_slicing_fusion->shape(),
sliced_param));
HloInstruction* bitcast_fusion =
mutable_dynamic_slicing_fusion->AddInstruction(
HloInstruction::CreateFusion(mutable_dynamic_slicing_fusion->shape(),
HloInstruction::FusionKind::kLoop,
bitcast));
TF_RETURN_IF_ERROR(
mutable_dynamic_slicing_fusion->ReplaceAllUsesWith(bitcast_fusion));
HloInstruction* new_operand =
parent_fusion->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return parent_fusion->ReplaceOperandWithDifferentShape(
sliced_param->parameter_number(), new_operand);
}
std::optional<PatternInfo> GetDSAndDUSPattern(const UnstackerMetadata& metadata,
const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking DSAndDUSPattern";
if (instr->opcode() != HloOpcode::kFusion) {
return std::nullopt;
}
const HloInstruction* stacked = instr->operand(stacked_operand_idx);
if (stacked->user_count() != 2) {
return std::nullopt;
}
HloInstruction* shape_covering_ds_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);
if (shape_covering_ds_instr == nullptr) {
return std::nullopt;
}
HloInstruction* bitcast_operand = nullptr;
if (!Match(instr->fused_instructions_computation()->root_instruction(),
match::Bitcast(match::Op(&bitcast_operand)))) {
return std::nullopt;
}
if (bitcast_operand != shape_covering_ds_instr) {
return std::nullopt;
}
if (!GetDUSFusionPattern(metadata, stacked->users()[1],
stacked->users()[1]->operand_index(stacked))) {
return std::nullopt;
}
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
const Shape& slice_shape = instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation = instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
pattern_info.unstacked_instrs.push_back(stacked->users()[1]);
return pattern_info;
}
absl::Status UnstackDSAndDUSPattern(HloInstruction* mutable_dynamic_slice,
const Shape& slice_shape) {
HloInstruction* stacked_gte = mutable_dynamic_slice->mutable_operand(0);
int64_t stacked_gte_index = stacked_gte->tuple_index();
HloComputation* parent = stacked_gte->parent();
ShapeUtil::UpdateTupleShape(stacked_gte->shape(), stacked_gte_index,
parent->root_instruction()->mutable_shape());
HloComputation* parent_loop = mutable_dynamic_slice->parent();
HloInstruction* stacked = mutable_dynamic_slice->mutable_operand(0);
HloInstruction* offset = mutable_dynamic_slice->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
TF_RETURN_IF_ERROR(
mutable_dynamic_slice->ReplaceAllUsesWithDifferentShape(new_operand));
HloInstruction* mutable_dynamic_update_slice = stacked_gte->users()[1];
TF_RETURN_IF_ERROR(
UnstackDUSFusionPattern(mutable_dynamic_update_slice, slice_shape));
return absl::OkStatus();
}
std::optional<PatternInfo> GetReduceFusionPattern(
const UnstackerMetadata& metadata, const HloInstruction* instr,
int64_t stacked_operand_idx) {
VLOG(3) << "Checking ReduceFusion";
HloInstruction* shape_covering_instr =
GetMostMajorShapeCoveringDynamicIndexInFusion(
metadata, instr, HloOpcode::kDynamicSlice, 2, stacked_operand_idx);
if (shape_covering_instr == nullptr) {
return std::nullopt;
}
HloInstruction* reduce_operand = nullptr;
HloInstruction* fusion_root =
instr->fused_instructions_computation()->root_instruction();
if (Match(fusion_root, match::Reduce(match::Op(&reduce_operand),
match::ConstantScalar())) &&
Match(fusion_root->to_apply()->root_instruction(),
match::Add(match::Parameter(), match::Parameter()))) {
if (reduce_operand == shape_covering_instr) {
PatternInfo pattern_info;
pattern_info.type = PatternType::Other;
pattern_info.instr = instr;
const Shape& slice_shape = instr->shape();
const int64_t num_layers = instr->operand(0)->shape().dimensions(0);
pattern_info.unstacked_shape =
MakeUnstackedShapeFromSlice(slice_shape, num_layers);
pattern_info.unstacking_computation =
instr->fused_instructions_computation();
pattern_info.unstacked_instrs.push_back(instr);
return pattern_info;
}
}
return std::nullopt;
}
absl::Status UnstackReduceFusionPattern(HloInstruction* mutable_reduce_fusion,
const Shape& slice_shape) {
HloComputation* parent_loop = mutable_reduce_fusion->parent();
HloInstruction* stacked = mutable_reduce_fusion->mutable_operand(0);
HloInstruction* offset = mutable_reduce_fusion->mutable_operand(1);
HloInstruction* new_operand =
parent_loop->AddInstruction(HloInstruction::CreateCustomCall(
slice_shape, {stacked, offset}, "DynamicGte"));
return mutable_reduce_fusion->ReplaceAllUsesWithDifferentShape(new_operand);
}
};
absl::StatusOr<bool> HloUnstacker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(auto metadata,
UnstackerMetadata::Create(module, unfuse_slice_));
metadata.custom_handlers.push_back(
std::make_pair(GetDSAndDUSPattern, UnstackDSAndDUSPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetDSFusionPattern, UnstackDSFusionPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetDUSFusionPattern, UnstackDUSFusionPattern));
metadata.custom_handlers.push_back(std::make_pair(
GetDUSFusionWithPadPattern, UnstackDUSFusionWithPadPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetDSFusionWithAddPattern, UnstackDSFusionWithAddPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetReduceFusionPattern, UnstackReduceFusionPattern));
metadata.custom_handlers.push_back(
std::make_pair(GetNestedDSFusionPattern, UnstackNestedDSFusionPattern));
metadata.custom_handlers.push_back(std::make_pair(
GetDSFusionNoBitcastPattern, UnstackDSFusionNoBitcastPattern));
std::vector<HloInstruction*> entry_loops;
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (Match(instr, match::While(match::Tuple())) &&
Match(instr->while_body()->root_instruction(), match::Tuple())) {
entry_loops.push_back(instr);
}
}
bool unstacked = false;
std::vector<const HloInstruction*> unstacked_instructions;
for (HloInstruction* loop : entry_loops) {
for (int64_t i = 0; i < loop->shape().tuple_shapes_size(); ++i) {
if (loop->while_init()->operand(i)->shape().IsTuple()) {
continue;
}
VLOG(3) << "Attempting to unstack " << loop->name() << " at " << i
<< " = " << loop->while_init()->operand(i)->shape().ToString(true)
<< loop->while_init()->operand(i)->ToShortString();
unstacked |=
UnstackWhileOperandAtIndex(metadata, loop, i, unstacked_instructions);
VLOG(3) << "###################";
}
}
if (!unstacked) {
return false;
}
TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());
std::vector<HloInstruction*> loops_to_unroll;
for (const HloInstruction* instr : unstacked_instructions) {
HloInstruction* loop = metadata.bodies[instr->parent()];
if (std::find(loops_to_unroll.begin(), loops_to_unroll.end(), loop) ==
loops_to_unroll.end()) {
loops_to_unroll.push_back(loop);
}
}
for (int64_t i = loops_to_unroll.size() - 1; i >= 0; --i) {
HloInstruction* loop = loops_to_unroll[i];
TF_ASSIGN_OR_RETURN(UnrollResult unroll_result,
WhileLoopUnroller::UnrollAndReturnReplacement(
loop, -1,
false,
true, false));
bool unrolled = unroll_result.unrolled;
CHECK(unrolled);
}
VLOG(3) << "after unstacking \n" << module->ToString();
return true;
}
} | #include "xla/service/hlo_unstacker.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using UnstackerTest = HloTestBase;
int64_t GetInstrCountWithOpcodeInEntry(HloModule* module, HloOpcode opcode) {
int64_t instr_with_opcode_count = 0;
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == opcode) {
instr_with_opcode_count++;
}
}
return instr_with_opcode_count;
}
TEST_F(UnstackerTest, UnstackDSFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
0);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, NotUnstackDSFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.tuple {
%param_0.51117 = s8[3,128,128] parameter(0)
mult = multiply(param_0.51117, param_0.51117)
ROOT out = tuple(param_0.51117, mult)
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
fusion_mult = (s8[3,128,128], s8[3,128,128]) fusion(s8[3,128,128] p1), kind=kLoop, calls=%fused_computation.tuple
mult = s8[3,128,128] get-tuple-element(fusion_mult), index=1
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, mult)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_FALSE(unstacked);
}
TEST_F(UnstackerTest, UnstackReduceFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
dynamic-slice.609.reduce_sub_computation {
lhs.53 = s8[] parameter(0)
rhs.53 = s8[] parameter(1)
ROOT add.3090 = s8[] add(lhs.53, rhs.53)
}
fused_computation.1096.clone {
param_0.5572 = s8[3,128,128] parameter(0)
param_1.6711 = s32[]{:T(128)} parameter(1)
constant.12008 = s32[]{:T(128)} constant(0)
dynamic-slice.1545 = s8[1,128,128] dynamic-slice(param_0.5572, param_1.6711, constant.12008, constant.12008), dynamic_slice_sizes={1,128, 128}
constant.12009 = s8[] constant(-0)
ROOT reduce.919 = s8[128,128] reduce(dynamic-slice.1545, constant.12009), dimensions={0}, to_apply=dynamic-slice.609.reduce_sub_computation
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.1096.clone
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcast) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
0);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSFusionPatternNoBitcastKeepFused) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = s8[1,128,128] fusion(s8[3,128,128] p1, i), kind=kLoop, calls=%fused_computation.slice
bitcast.102 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)
conv = bf16[8,128] convolution(bf16[8,128] p0, s8[128,128] bitcast.102), dim_labels=bf_io->bf
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
auto unfuse = [](HloInstruction* instruction) { return false; };
TF_ASSERT_OK_AND_ASSIGN(bool unstacked,
HloUnstacker(unfuse).Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 0);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSFusionPatternWithDifferentLayout) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.30.clone (param_0.153: bf16[32,4,64,64,3], param_1.123: s32[]) -> bf16[64,4,64,3] {
%param_0.153 = bf16[32,4,64,64,3]{2,1,4,3,0} parameter(0)
%param_1.123 = s32[]{:T(128)} parameter(1)
%constant.227 = s32[]{:T(128)} constant(0)
%dynamic-slice.5 = bf16[1,4,64,64,3]{2,1,4,3,0} dynamic-slice(bf16[32,4,64,64,3]{2,1,4,3,0} %param_0.153, s32[]{:T(128)} %param_1.123, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227, s32[]{:T(128)} %constant.227), dynamic_slice_sizes={1,4,64,64,3}
ROOT %bitcast.102 = bf16[64,4,64,3]{0,1,3,2} bitcast(bf16[1,4,64,64,3]{2,1,4,3,0} %dynamic-slice.5)
}
%while.body (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> (s32[], bf16[8,128], bf16[32,4,64,64,3]) {
wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = bf16[32,4,64,64,3]{2,1,4,3,0} get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67830 = bf16[64,4,64,3]{0,1,3,2} fusion(p1, i), kind=kLoop, calls=%fused_computation.30.clone
ROOT out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(inc, p0, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], bf16[32,4,64,64,3])) -> pred[] {
wide_p = (s32[], bf16[8,128], bf16[32,4,64,64,3]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(32)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = bf16[32,4,64,64,3] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], bf16[32,4,64,64,3]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], bf16[32,4,64,64,3]) while(while.input), condition=%while.cond , body=%while.body
while_use = bf16[32,4,64,64,3] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice),
32);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kFusion),
0);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDynamicIndex) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[6,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[6,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[6,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[6,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[6,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[6,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> (s32[], bf16[8,128], s8[6,128,128]) {
wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[6,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
two = s32[] constant(2)
mult = s32[] multiply(i, two)
fusion.conv = bf16[8,128] fusion(p0, p1, mult), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[6,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[6,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[6,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[6,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[6,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[6,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[6,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithMultipleIndex) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[4,128,128] get-tuple-element(wide_p), index=2
p2 = s8[4,128,128] get-tuple-element(wide_p), index=3
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv.1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1
fusion.conv.2 = bf16[8,128] fusion(p0, p2, i), kind=kOutput, calls=%fused_computation.inner.2
plus = bf16[8,128] add(fusion.conv.1, fusion.conv.2)
ROOT out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(inc, plus, p1, p2)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[4,128,128] parameter(0)
p1 = s8[4,128,128] parameter(1)
p2 = bf16[8,128] parameter(2)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) tuple(init, p2, p0, p1)
while.out = (s32[], bf16[8,128], s8[4,128,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithDiffereOperandsOrder) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_1.30691: s8[3,128,128], p2: s32[], param_0.34523: bf16[8,128]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(2)
%param_1.30691 = s8[3,128,128] parameter(0)
p2 = s32[] parameter(1)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(p1, i, p0), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, fusion.conv, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternWithSameUnstackingComps) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner.1 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%fused_computation.inner.2 (param_0.34523: bf16[8,128], param_1.30691: s8[3,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[3,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[3,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice.2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv1 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.1
fusion.conv2 = bf16[8,128] fusion(p0, p1, i), kind=kOutput, calls=%fused_computation.inner.2
add = bf16[8,128] add(fusion.conv1, fusion.conv2)
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, add, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 3);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, NotUnstackNestedDSFusionPatternWithSameUnstackingComps) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice.1 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[1,128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
}
%fused_computation.slice.2 (param_0.51117: s8[3,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[3,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[3,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%while.body (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> (s32[], bf16[8,128], s8[3,128,128]) {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
p0 = bf16[8,128] get-tuple-element(wide_p), index=1
p1 = s8[3,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
%fusion.67831 = s8[128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.2
%fusion.67830 = s8[1,128,128] fusion(p1, i), kind=kLoop, calls=%fused_computation.slice.1
%bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %fusion.67830)
ROOT out = (s32[], bf16[8,128], s8[3,128,128]) tuple(inc, p0, p1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[3,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[3,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(3)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
ENTRY main {
p0 = s8[3,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(0)
while.input = (s32[], bf16[8,128], s8[3,128,128]) tuple(init, p1, p0)
while.out = (s32[], bf16[8,128], s8[3,128,128]) while(while.input), condition=%while.cond , body=%while.body
while_use = s8[3,128,128] get-tuple-element(while.out), index=2
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_FALSE(unstacked);
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternSingleNestedLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in), condition=%while.cond.inner, body=%while.body.inner
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
ENTRY main {
weight = s8[4,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(1)
while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)
while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond , body=%while.body
ROOT out = bf16[8,128] get-tuple-element(while.out), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 4);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackNestedDSFusionPatternTwoNestedLoops) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice1 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner1 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice1
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in.1 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out.1 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.1), condition=%while.cond.inner1, body=%while.body.inner1
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.1), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond1 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
%fused_computation.slice2 (param_0.51117: s8[4,128,128], p1: s32[]) -> s8[128,128] {
%param_0.51117 = s8[4,128,128] parameter(0)
p1 = s32[] parameter(1)
%constant.85694 = s32[] constant(0)
%dynamic-slice.22040 = s8[1,128,128] dynamic-slice(s8[4,128,128] %param_0.51117, p1, s32[] %constant.85694, s32[] %constant.85694), dynamic_slice_sizes={1,128,128}
ROOT %bitcast.31250 = s8[128,128] bitcast(s8[1,128,128] %dynamic-slice.22040)
}
%fused_computation.inner2 (param_0.34523: bf16[8,128], param_1.30691: s8[4,128,128], p2: s32[]) -> bf16[8,128] {
%param_0.34523 = bf16[8,128] parameter(0)
%param_1.30691 = s8[4,128,128] parameter(1)
p2 = s32[] parameter(2)
%fusion.67830 = s8[128,128] fusion(s8[4,128,128] %param_1.30691, p2), kind=kLoop, calls=%fused_computation.slice2
ROOT %convolution.3447 = bf16[8,128] convolution(bf16[8,128] %param_0.34523, s8[128,128] %fusion.67830), dim_labels=bf_io->bf
}
%while.body.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
inner_param_0 = bf16[8,128] get-tuple-element(wide_p), index=1
inner_param_1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(1)
inc = s32[] add(i, one)
fusion.conv = bf16[8,128] fusion(inner_param_0, inner_param_1, i), kind=kOutput, calls=%fused_computation.inner2
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(inc, fusion.conv, inner_param_1)
}
%while.cond.inner2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(4)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, s32[] %constant.12857), direction=LT
}
%while.body2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> (s32[], bf16[8,128], s8[4,128,128]) {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
param0 = bf16[8,128] get-tuple-element(wide_p), index=1
param1 = s8[4,128,128] get-tuple-element(wide_p), index=2
one = s32[] constant(2)
zero = s32[] constant(0)
mult = s32[] multiply(i, one)
inner.in.2 = (s32[], bf16[8,128], s8[4,128,128]) tuple(zero, param0, param1)
inner.out.2 = (s32[], bf16[8,128], s8[4,128,128]) while(inner.in.2), condition=%while.cond.inner2, body=%while.body.inner2
fusion.conv.inner = bf16[8,128] get-tuple-element(inner.out.2), index=1
ROOT out = (s32[], bf16[8,128], s8[4,128,128]) tuple(mult, fusion.conv.inner, param1)
}
%while.cond2 (wide_param: (s32[], bf16[8,128], s8[4,128,128])) -> pred[] {
wide_p = (s32[], bf16[8,128], s8[4,128,128]) parameter(0)
i = s32[] get-tuple-element(wide_p), index=0
%constant.12857 = s32[] constant(20)
add = s32[] add(%constant.12857, %constant.12857)
ROOT %compare.1921 = pred[]{:T(512)} compare(s32[] i, add), direction=LT
}
ENTRY main {
weight = s8[4,128,128] parameter(0)
p1 = bf16[8,128] parameter(1)
init = s32[] constant(1)
while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init, p1, weight)
while.out = (s32[], bf16[8,128], s8[4,128,128]) while(while.input), condition=%while.cond1 , body=%while.body1
init2 = s32[] get-tuple-element(while.out), index=0
second.while.input = (s32[], bf16[8,128], s8[4,128,128]) tuple(init2, p1, weight)
second.while.out = (s32[], bf16[8,128], s8[4,128,128]) while(second.while.input), condition=%while.cond2 , body=%while.body2
out = bf16[8,128] get-tuple-element(while.out), index=1
second.out = bf16[8,128] get-tuple-element(second.while.out), index=1
ROOT result = bf16[8,128] add(out, second.out)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_EQ(GetInstrCountWithOpcodeInEntry(module.get(), HloOpcode::kSlice), 8);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSAndDUSPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s32[4,3], offset: s32[]) -> s32[3] {
%param_0.51117 = s32[4,3] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = s32[1,3] dynamic-slice(s32[4,3] %param_0.51117, offset, zero), dynamic_slice_sizes={1,3}
ROOT %bitcast.31250 = s32[3] bitcast(s32[1,3] %dynamic-slice.22040)
}
%fused_computation.update.slice (param_0.51117: s32[4,3], p1: s32[], p2: s32[3]) -> s32[4,3] {
%param_0.51117 = s32[4,3] parameter(0)
%p1 = s32[] parameter(1)
%p2 = s32[3] parameter(2)
%zero = s32[] constant(0)
%bitcast.31250 = s32[1,3] bitcast(%p2)
ROOT output_dus = s32[4,3]{1,0} dynamic-update-slice(%param_0.51117, %bitcast.31250, %p1, zero)
}
SimpleLoop.body {
loop_var.1 = (s32[], s32[4,3]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[4,3] get-tuple-element(loop_var.1), index=1
zero = s32[] constant(0)
some_const = s32[3] constant({0,1,2})
constant.1 = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, constant.1)
ds = s32[3]{0} fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice
update = s32[3] add(ds, ds)
dus = s32[3] dynamic-update-slice(ds, update, zero)
output = s32[4,3] fusion(get-tuple-element.2, get-tuple-element.1, dus), kind=kLoop, calls=%fused_computation.update.slice
ROOT tuple = (s32[], s32[4,3]) tuple(idx, output)
}
SimpleLoop.condition {
loop_var.1 = (s32[], s32[4,3]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.2 = s32[] constant(4)
ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT
}
ENTRY SimpleLoop {
reference = s32[4,3] parameter(0)
zero = s32[] constant(0)
zero1 = s32[] constant(0)
one = s32[] constant(1)
tuple.1 = (s32[], s32[4,3]) tuple(zero, reference)
while = (s32[], s32[4,3]) while(tuple.1), condition=SimpleLoop.condition, body=SimpleLoop.body
ROOT out = s32[] get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSAndDUSPatternNestedLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = bf16[1,1,8,257,128]
dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128}
ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)
}
%fused_computation.slice.2 (param_0.51117: bf16[4,1,8,257,128], offset: s32[]) -> bf16[1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128}
ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)
}
inner.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2
sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice
sliced.2 = bf16[1,8,257,128] fusion(get-tuple-element.3, get-tuple-element.1), kind=kLoop,calls=%fused_computation.slice.2
temp = bf16[1,8,257,128] add(sliced, sliced.2)
one = s32[] constant(1) idx = s32[] add(get-tuple-element.1, one)
ROOT out = tuple(idx, get-tuple-element.2, get-tuple-element.3)
}
inner.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128])
parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),
index=0 constant.2 = s32[] constant(4) ROOT less-than = pred[]
compare(get-tuple-element.1, constant.2), direction=LT
}
outer.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
get-tuple-element.3 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=2
zero = s32[] constant(0)
buffer = bf16[4,1,8,257,128] custom-call(), custom_call_target="AllocateBuffer"
inner.input = tuple(zero, buffer, get-tuple-element.2)
inner = while(inner.input), condition=inner.condition, body=inner.body
out1 = bf16[4,1,8,257,128] get-tuple-element(inner), index=1
one = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, one)
ROOT tuple = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128]) tuple(idx, out1, get-tuple-element.3)
}
outer.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128], bf16[4,1,8,257,128])
parameter(0) get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),
index=0 constant.2 = s32[] constant(4) mul = s32[]
multiply(get-tuple-element.1, constant.2) ROOT less-than = pred[]
compare(get-tuple-element.1, mul), direction=LT
}
ENTRY SimpleLoop {
param1 = bf16[4,1,8,257,128] parameter(0)
param2 = bf16[4,1,8,257,128] parameter(1)
zero = s32[] constant(0)
zero1 = s32[] constant(0)
one = s32[] constant(1)
tuple.1 = tuple(zero, param1, param2)
while = while(tuple.1), condition=outer.condition, body=outer.body
ROOT out = s32[] get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
TEST_F(UnstackerTest, UnstackDSAndDUSPatternLoopFeedingLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.update.slice (param_0.51117: bf16[4,1,8,257,128], p1: s32[], param_0.51118: bf16[1,8,257,128]) -> bf16[4,1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
p1 = s32[] parameter(1)
%param_0.51118 = bf16[1,8,257,128] parameter(2)
bitcast = bf16[1,1,8,257,128] bitcast(param_0.51118)
%constant.85694 = s32[] constant(0)
ROOT %dynamic-update-slice.22040 = bf16[4,1,8,257,128] dynamic-update-slice(bf16[4,1,8,257,128] %param_0.51117, bitcast, p1, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694, s32[] %constant.85694)
}
%fused_computation.slice (param_0.51117: bf16[4,1,8,257,128], offset:s32[]) -> bf16[1,8,257,128] {
%param_0.51117 = bf16[4,1,8,257,128] parameter(0)
offset = s32[] parameter(1)
zero = s32[] constant(0)
%dynamic-slice.22040 = bf16[1,1,8,257,128] dynamic-slice(bf16[4,1,8,257,128] %param_0.51117, offset, zero, zero, zero, zero), dynamic_slice_sizes={1,1,8,257,128}
ROOT %bitcast.31250 = bf16[1,8,257,128] bitcast(%dynamic-slice.22040)
}
first.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
constant = bf16[1,8,257,128] constant({...})
sliced = bf16[1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1), kind=kLoop, calls=%fused_computation.slice
tmp = bf16[1,8,257,128] add(sliced, sliced)
one = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, one)
ROOT out = tuple(idx, get-tuple-element.2)
}
first.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.2 = s32[] constant(4)
ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT
}
next.body {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1),index=0
get-tuple-element.2 = bf16[4,1,8,257,128] get-tuple-element(loop_var.1), index=1
constant = bf16[1,8,257,128] constant({...})
update.sliced = bf16[4,1,8,257,128] fusion(get-tuple-element.2, get-tuple-element.1, constant), kind=kLoop, calls=%fused_computation.update.slice
one = s32[] constant(1)
idx = s32[] add(get-tuple-element.1, one)
ROOT out = tuple(idx, update.sliced)
}
next.condition {
loop_var.1 = (s32[], bf16[4,1,8,257,128]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.2 = s32[] constant(4)
ROOT less-than = pred[] compare(get-tuple-element.1, constant.2), direction=LT
}
ENTRY SimpleLoop {
param1 = bf16[4,1,8,257,128] parameter(0)
param2 = bf16[4,1,8,257,128] parameter(1)
zero = s32[] constant(0)
zero1 = s32[] constant(0)
one = s32[] constant(1)
tuple.1 = tuple(zero, param1)
while = while(tuple.1), condition=first.condition, body=first.body
while.out = bf16[4,1,8,257,128] get-tuple-element(while), index=1
next.input = tuple(zero, while.out)
next = while(next.input), condition=next.condition, body=next.body
ROOT out = s32[] get-tuple-element(next), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
}
TEST_F(UnstackerTest, UnstackDUSFusionWithPadPatternLoopFeedingLoop) {
std::string hlo_string = R"(
HloModule SimpleLoop
fused_computation.75.clone {
param_0.5713 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0)
param_2.4396 = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} parameter(2)
constant.12166 = bf16[]{:T(256)} constant(0)
pad.496 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} pad(param_2.4396, constant.12166), padding=0_0x0_0x0_256x0_0
bitcast.1262 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} bitcast(pad.496)
param_1.6823 = s32[]{:T(128)} parameter(1)
constant.12165 = s32[]{:T(128)} constant(0)
ROOT dynamic-update-slice.193 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-update-slice(param_0.5713, bitcast.1262, param_1.6823, constant.12165, constant.12165, constant.12165, constant.12165)
}
fused_computation.1 {
param_0.5712 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}parameter(0)
param_1.6822 = s32[]{:T(128)} parameter(1)
constant.12164 = s32[]{:T(128)} constant(0)
dynamic-slice.1597 = bf16[1,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} dynamic-slice(param_0.5712, param_1.6822, constant.12164, constant.12164, constant.12164, constant.12164), dynamic_slice_sizes={1,1,8,513,128}
ROOT bitcast.1261 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} bitcast(dynamic-slice.1597)
}
first.body {
wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0
constant.12144..sunk.2 = s32[]{:T(128)} constant(1)
add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)
get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1
fusion.2381 = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.1
tmp = bf16[1,8,513,128]{3,2,1,0:T(8,128)(2,1)} add(fusion.2381, fusion.2381)
ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178)
}
first.cond {
wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0
constant.12162 = s32[]{:T(128)} constant(2)
ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT
}
wide.region_54.2652.clone_spmd {
wide.param.29 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0
constant.12144..sunk.2 = s32[]{:T(128)} constant(1)
add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)
get-tuple-element.12178 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1
update = bf16[1,8,257,128]{3,2,1,0:T(8,128)(2,1)} constant({...})
fusion.2382 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177, update), kind=kLoop, calls=fused_computation.75.clone
ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) tuple(add.4517, fusion.2382)
}
wide.region_55.2732.clone_spmd {
wide.param.28 = (s32[]{:T(128)}, bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0
constant.12162 = s32[]{:T(128)} constant(2)
ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT
}
ENTRY main {
p0 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} parameter(0)
init = s32[]{:T(128)} constant(0)
first.input = tuple(init, p0)
first.out = while(first.input), condition=first.cond , body=first.body
o1 = bf16[2,1,8,513,128]{4,3,2,1,0:T(8,128)(2,1)} get-tuple-element(first.out), index=1
input = tuple(init, o1)
out = while(input), condition=wide.region_55.2732.clone_spmd , body=wide.region_54.2652.clone_spmd
ROOT res = s32[]{:T(128)} get-tuple-element(out), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
}
TEST_F(UnstackerTest, UnstackDUSFusionWithAddPattern) {
std::string hlo_string = R"(
HloModule SimpleLoop
add.2771.reduce_sub_computation {
lhs.44 = bf16[] parameter(0)
rhs.44 = bf16[] parameter(1)
ROOT add.3079 = bf16[] add(lhs.44, rhs.44)
}
fused_computation.75.clone {
param_0.31658 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0)
param_1.26202 = s32[]{:T(128)} parameter(1)
constant.47557 = s32[]{:T(128)} constant(0)
dynamic-slice.12289 = bf16[1,4096]{1,0:T(2,128)(2,1)} dynamic-slice(param_0.31658, param_1.26202, constant.47557), dynamic_slice_sizes={1,4096}
constant.47559 = bf16[]{:T(256)} constant(1)
broadcast.39214 = bf16[1,4096]{1,0:T(2,128)(2,1)} broadcast(constant.47559), dimensions={}
add.13176 = bf16[1,4096]{1,0:T(2,128)(2,1)} add(dynamic-slice.12289, broadcast.39214)
constant.47558 = bf16[] constant(-0)
ROOT reduce.8210 = bf16[4096]{0:T(1024)(128)(2,1)} reduce(add.13176, constant.47558), dimensions={0}, to_apply=add.2771.reduce_sub_computation
}
first.body {
wide.param.29 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12177 = s32[]{:T(128)} get-tuple-element(wide.param.29), index=0
constant.12144..sunk.2 = s32[]{:T(128)} constant(1)
add.4517 = s32[]{:T(128)} add(get-tuple-element.12177, constant.12144..sunk.2)
get-tuple-element.12178 = bf16[2,4096]{1,0:T(8,128)(2,1)} get-tuple-element(wide.param.29), index=1
fusion.2381 = bf16[4096]{0:T(1024)(128)(2,1)} fusion(get-tuple-element.12178, get-tuple-element.12177), kind=kLoop, calls=fused_computation.75.clone
tmp = bf16[4096]{0:T(1024)(128)(2,1)} add(fusion.2381, fusion.2381)
ROOT tuple.949 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) tuple(add.4517, get-tuple-element.12178)
}
first.cond {
wide.param.28 = (s32[]{:T(128)}, bf16[2,4096]{1,0:T(8,128)(2,1)}) parameter(0)
get-tuple-element.12167 = s32[]{:T(128)} get-tuple-element(wide.param.28), index=0
constant.12162 = s32[]{:T(128)} constant(2)
ROOT compare.1815 = pred[]{:T(512)} compare(get-tuple-element.12167, constant.12162), direction=LT
}
ENTRY main {
p0 = bf16[2,4096]{1,0:T(8,128)(2,1)} parameter(0)
init = s32[]{:T(128)} constant(0)
first.input = tuple(init, p0)
first.out = while(first.input), condition=first.cond , body=first.body
ROOT o1 = s32[]{:T(128)} get-tuple-element(first.out), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto original = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool unstacked, HloUnstacker().Run(module.get()));
EXPECT_TRUE(unstacked);
EXPECT_TRUE(RunAndCompareTwoModules(std::move(module), std::move(original),
std::nullopt, false));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_unstacker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a2e11f9f-5cdd-42c4-8ac0-3ccdbc5b0ec6 | cpp | tensorflow/tensorflow | logdet | third_party/xla/xla/hlo/builder/lib/logdet.cc | third_party/xla/xla/hlo/builder/lib/logdet_test.cc | #include "xla/hlo/builder/lib/logdet.h"
#include <limits>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/qr.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
SignAndLogDet SLogDet(XlaOp a) {
absl::StatusOr<SignAndLogDet> result =
[&]() -> absl::StatusOr<SignAndLogDet> {
TF_ASSIGN_OR_RETURN(Shape a_shape, a.builder()->GetShape(a));
auto qr = Qr(a);
int64_t m = ShapeUtil::GetDimension(a_shape, -2);
int64_t n = ShapeUtil::GetDimension(a_shape, -1);
if (m != n) {
return InvalidArgument(
"Arguments to logdet must be (batched) square matrices, got: %s",
a_shape.ToString());
}
auto log_abs_det = Einsum(Log(Abs(qr.q_and_r)), "...aa->...");
auto sign_diag = Reduce(
Sign(Einsum(qr.q_and_r, "...aa->...a")),
One(a.builder(), a_shape.element_type()),
CreateScalarMultiplyComputation(a_shape.element_type(), a.builder()),
{a_shape.rank() - 2});
auto sliced_taus = SliceInMinorDims(qr.taus, {0}, {n - 1});
auto sign_taus = Reduce(
Select(Ne(sliced_taus, ZerosLike(sliced_taus)),
FullLike(sliced_taus, -1), FullLike(sliced_taus, 1)),
One(a.builder(), a_shape.element_type()),
CreateScalarMultiplyComputation(a_shape.element_type(), a.builder()),
{a_shape.rank() - 2});
return SignAndLogDet{sign_diag * sign_taus, log_abs_det};
}();
if (!result.ok()) {
XlaOp error = a.builder()->ReportError(result.status());
return SignAndLogDet{error, error};
}
return result.value();
}
XlaOp LogDet(XlaOp a) {
SignAndLogDet slogdet = SLogDet(a);
return Select(
Ge(slogdet.sign, ZerosLike(slogdet.sign)), slogdet.logdet,
FullLike(slogdet.logdet, std::numeric_limits<float>::quiet_NaN()));
}
} | #include "xla/hlo/builder/lib/logdet.h"
#include <limits>
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
namespace {
using LogDetTest = xla::ClientLibraryTestBase;
XLA_TEST_F(LogDetTest, Simple) {
xla::XlaBuilder builder(TestName());
xla::Array2D<float> a_vals({
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
});
xla::XlaOp a;
auto a_data = CreateR2Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR0<float>(1.f),
xla::LiteralUtil::CreateR0<float>(14.1601f),
xla::LiteralUtil::CreateR0<float>(14.1601f));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
XLA_TEST_F(LogDetTest, SimpleTriangle) {
xla::XlaBuilder builder(TestName());
xla::Array2D<float> a_vals({
{4, 6, 8, 10},
{4, -39, 62, 73},
{0, 0, -146, 166},
{4, 6, 8, 320},
});
xla::XlaOp a;
auto a_data = CreateR2Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR0<float>(1.f),
xla::LiteralUtil::CreateR0<float>(15.9131355f),
xla::LiteralUtil::CreateR0<float>(15.9131355f));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
XLA_TEST_F(LogDetTest, SimpleBatched) {
xla::XlaBuilder builder(TestName());
xla::Array3D<float> a_vals({
{
{4, 6, 8, 10},
{6, 45, 54, 63},
{8, 54, 146, 166},
{10, 63, 166, 310},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 456, 106},
{12, 48, 106, 62},
},
{{2, 2, 3, 4}, {4, 5, 6, 7}, {7, 8, 9, 8}, {10, 11, 12, 13}},
{{0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}},
});
xla::XlaOp a;
auto a_data = CreateR3Parameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR1<float>({1.f, 1.f, -1.f, 0.f}),
xla::LiteralUtil::CreateR1<float>(
{14.1601f, 14.3092f, 2.4849f,
-std::numeric_limits<float>::infinity()}),
xla::LiteralUtil::CreateR1<float>(
{14.1601f, 14.3092f, std::numeric_limits<float>::quiet_NaN(),
-std::numeric_limits<float>::infinity()}));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
XLA_TEST_F(LogDetTest, LogdetOfLargerMatricesBatched) {
xla::XlaBuilder builder(TestName());
xla::Array<float> a_vals = {
{{7.2393, 1.1413, 4.1883, -4.8272, 3.2831, -0.0568, -2.4776},
{0.4347, 3.4095, 1.6259, -4.7100, 1.5942, 1.4217, -2.8009},
{3.6964, 0.4882, 6.5276, -1.2128, 1.3851, 0.7417, -3.8515},
{-3.7986, -5.1188, -1.9410, 14.0205, -5.4515, 3.1831, 5.1488},
{1.5621, 3.0426, 1.4819, -4.5938, 10.1397, 4.9312, -2.8351},
{-1.5436, -0.0287, -0.1139, 4.4499, 2.5894, 6.1216, 2.7201},
{-3.7241, -2.7670, -3.8162, 4.5961, -1.7251, -0.4190, 8.6562}},
{{3.3789, -2.3607, -1.2471, 2.1503, 0.6062, -0.6057, 1.7748},
{-1.8670, 11.0947, 0.1229, 0.0599, 3.1714, -4.7941, -4.5442},
{-0.6905, -0.0829, 5.2156, 2.9528, 2.6200, 6.1638, 1.8652},
{3.0521, 2.2174, 0.7444, 10.7268, 0.6443, -2.7732, 1.6840},
{1.8479, 3.0821, 4.5671, 2.9254, 6.1338, 5.2066, 2.3662},
{-0.0360, -5.5341, 5.9687, -0.3297, 2.1174, 13.0016, 4.0118},
{0.4380, -4.6683, 3.1548, 0.0924, 0.7176, 6.4679, 6.1819}},
{{10.0487, 4.0350, -0.8471, -1.2887, -0.8172, -3.3698, 1.3191},
{4.8678, 4.6081, 0.8419, -0.2454, -3.2599, -1.2386, 2.4070},
{1.4877, 0.8362, 2.6077, 1.1782, -0.1116, 1.7130, -1.1883},
{-0.9245, -0.7435, -0.9456, 2.5936, 1.9887, -0.1324, -0.1453},
{0.2918, -0.5301, -0.8775, 1.0478, 8.9262, 2.4731, -0.4393},
{-3.5759, -1.5619, 2.4410, 1.3046, 4.2678, 7.3587, -4.0935},
{-1.1187, 0.9150, -1.8253, 0.0390, -2.5684, -4.0778, 4.1447}}};
xla::XlaOp a;
auto a_data = CreateParameter<float>(a_vals, 0, "a", &builder, &a);
xla::SignAndLogDet slogdet = xla::SLogDet(a);
xla::XlaOp logdet = xla::LogDet(a);
xla::Tuple(&builder, {slogdet.sign, slogdet.logdet, logdet});
xla::Literal expected = xla::LiteralUtil::MakeTupleOwned(
xla::LiteralUtil::CreateR1<float>({1.f, 1.f, 1.f}),
xla::LiteralUtil::CreateR1<float>({8.93788053, 6.77846303, 7.4852403}),
xla::LiteralUtil::CreateR1<float>({8.93788053, 6.77846303, 7.4852403}));
ComputeAndCompareLiteral(&builder, expected, {a_data.get()},
xla::ErrorSpec(1e-4));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/logdet.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/logdet_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b34f93e-e91e-4025-b82e-0a8ba602f71d | cpp | tensorflow/tensorflow | divide | tensorflow/lite/experimental/shlo/ops/divide.cc | tensorflow/lite/experimental/shlo/ops/divide_test.cc | #include "tensorflow/lite/experimental/shlo/ops/divide.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Divide : std::divides<void> {};
DivideOp Create(DivideOp::Attributes) { return {}; }
absl::Status Prepare(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(CheckCtx("divide"), lhs,
IsIntTensor, IsFloatTensor,
IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("divide"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("divide"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(DivideOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Divide divide;
if (IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), divide, lhs, rhs, output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
divide, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.divide: Unsupported tensor type.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/divide.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<DivideOp> {
static std::string Get() { return "Divide"; }
};
struct Divide : std::divides<void> {};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Divide, BinaryElementwiseOpShapePropagationTest,
DivideOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
DivideOp,
ConcatTypes<BaselineConstraintIntTypes, BaselineConstraintFloatTypes,
BaselineConstraintQuantizedPerTensorTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Divide, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<DivideOp, ConcatTypes<BoolTestType, PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Divide, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using ArithmeticTypes = ConcatTypes<ArithmeticTestTypes>;
template <class T>
struct DivideTest : ::testing::Test {};
TYPED_TEST_SUITE(DivideTest, ArithmeticTypes, TestParamNames);
TYPED_TEST(DivideTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), Divide());
auto op = Create(DivideOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
template <class T>
struct QuantizedDivideTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedDivideTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedDivideTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(2);
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data = RandomBuffer<TypeParam::kStorage>(
shape, zero_point + 1, zero_point + 5);
Vector<StorageT> output_data(shape.NumElements());
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor lhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = rhs_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[zero_point, scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs = Dequantize(lhs, zero_point, scale);
const ExpressedT dequantized_rhs = Dequantize(rhs, zero_point, scale);
const ExpressedT dequantized_res =
Divide()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(DivideOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/divide.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/divide_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f25e9d48-d814-44db-83e3-84a6b1b9ef2d | cpp | google/arolla | status | arolla/util/status.cc | arolla/util/status_test.cc | #include "absl/status/status.h"
#include <cstdint>
#include <initializer_list>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
namespace arolla {
absl::Status SizeMismatchError(std::initializer_list<int64_t> sizes) {
return absl::InvalidArgumentError(absl::StrCat(
"argument sizes mismatch: (", absl::StrJoin(sizes, ", "), ")"));
}
} | #include "arolla/util/status.h"
#include <initializer_list>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Test;
TEST(StatusTest, CheckInputStatus) {
EXPECT_OK(CheckInputStatus());
EXPECT_OK(CheckInputStatus(13));
EXPECT_OK(CheckInputStatus(13, "a"));
EXPECT_OK(CheckInputStatus(absl::StatusOr<int>(13), "a"));
EXPECT_OK(CheckInputStatus(13, absl::StatusOr<std::string>("a")));
EXPECT_OK(CheckInputStatus(absl::StatusOr<int>(13),
absl::StatusOr<std::string>("a")));
absl::Status bad_status1{absl::StatusCode::kInvalidArgument, "bad 1"};
absl::Status bad_status2{absl::StatusCode::kDataLoss, "bad 2"};
EXPECT_EQ(CheckInputStatus(absl::StatusOr<int>(bad_status1)), bad_status1);
EXPECT_EQ(CheckInputStatus(13, absl::StatusOr<int>(bad_status1)),
bad_status1);
EXPECT_EQ(CheckInputStatus(absl::StatusOr<int>(13),
absl::StatusOr<int>(bad_status1)),
bad_status1);
EXPECT_EQ(CheckInputStatus(absl::StatusOr<int>(bad_status1),
absl::StatusOr<int>(bad_status2)),
bad_status1);
}
TEST(StatusTest, LiftStatusUpSuccess) {
std::tuple<> empty = LiftStatusUp().value();
EXPECT_THAT(empty, Eq(std::make_tuple()));
std::tuple<int> one = LiftStatusUp(absl::StatusOr<int>(1)).value();
EXPECT_THAT(one, Eq(std::make_tuple(1)));
std::tuple<std::string, int> two =
LiftStatusUp(absl::StatusOr<std::string>("one"), absl::StatusOr<int>(2))
.value();
EXPECT_THAT(two, Eq(std::make_tuple(std::string("one"), 2)));
ASSERT_OK_AND_ASSIGN(
std::vector<int> vec,
LiftStatusUp(absl::Span<const absl::StatusOr<int>>{1, 2}));
EXPECT_THAT(vec, ::testing::ElementsAre(1, 2));
absl::flat_hash_map<int, int> fhm =
LiftStatusUp(std::initializer_list<std::pair<int, absl::StatusOr<int>>>{
{1, 2}, {3, 4}})
.value();
EXPECT_THAT(fhm, Eq(absl::flat_hash_map<int, int>{{1, 2}, {3, 4}}));
absl::flat_hash_map<int, int> fhm1 =
LiftStatusUp(std::initializer_list<
std::pair<absl::StatusOr<int>, absl::StatusOr<int>>>{
{1, 2}, {3, 4}})
.value();
EXPECT_THAT(fhm, Eq(absl::flat_hash_map<int, int>{{1, 2}, {3, 4}}));
}
TEST(StatusTest, LiftStatusUpErrors) {
absl::Status bad_status1{absl::StatusCode::kInvalidArgument, "bad 1"};
absl::Status bad_status2{absl::StatusCode::kDataLoss, "bad 2"};
EXPECT_EQ(LiftStatusUp(absl::StatusOr<int>(bad_status1)).status(),
bad_status1);
EXPECT_EQ(LiftStatusUp(absl::StatusOr<std::string>("one"),
absl::StatusOr<int>(bad_status2))
.status(),
bad_status2);
EXPECT_EQ(LiftStatusUp(absl::StatusOr<std::string>("one"),
absl::StatusOr<int>(bad_status1),
absl::StatusOr<float>(bad_status2))
.status(),
bad_status1);
EXPECT_EQ(LiftStatusUp(absl::StatusOr<float>(bad_status2),
absl::StatusOr<std::string>("one"),
absl::StatusOr<int>(bad_status1))
.status(),
bad_status2);
EXPECT_THAT(LiftStatusUp(absl::Span<const absl::StatusOr<int>>{bad_status1,
bad_status2})
.status(),
bad_status1);
EXPECT_THAT(
LiftStatusUp(std::initializer_list<std::pair<int, absl::StatusOr<int>>>{
{1, bad_status1}, {2, 3}, {4, bad_status2}})
.status(),
AnyOf(bad_status1, bad_status2));
EXPECT_THAT(
LiftStatusUp(std::initializer_list<
std::pair<absl::StatusOr<int>, absl::StatusOr<int>>>{
{bad_status1, 1}, {2, 3}, {4, bad_status2}})
.status(),
AnyOf(bad_status1, bad_status2));
EXPECT_THAT(
LiftStatusUp(std::initializer_list<
std::pair<absl::StatusOr<int>, absl::StatusOr<int>>>{
{1, bad_status1}, {2, 3}, {4, bad_status2}})
.status(),
AnyOf(bad_status1, bad_status2));
}
TEST(StatusTest, UnStatus) {
using T = std::unique_ptr<int>;
using StatusOrT = absl::StatusOr<T>;
{
StatusOrT status_or_t = std::make_unique<int>(1);
const T& value = UnStatus(status_or_t);
EXPECT_EQ(*value, 1);
EXPECT_EQ(value.get(), status_or_t.value().get());
}
{
StatusOrT status_or_t = std::make_unique<int>(1);
T value = UnStatus(std::move(status_or_t));
EXPECT_EQ(*value, 1);
}
{
T original_value = std::make_unique<int>(1);
const T& value = UnStatus(original_value);
EXPECT_EQ(*value, 1);
EXPECT_EQ(value.get(), original_value.get());
}
{
T original_value = std::make_unique<int>(1);
T value = UnStatus(std::move(original_value));
EXPECT_EQ(*value, 1);
}
}
TEST(StatusTest, FirstErrorStatus) {
absl::Status ok_status = absl::OkStatus();
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
absl::Status internal_error = absl::InternalError("msg2");
EXPECT_OK(FirstErrorStatus({}));
EXPECT_OK(FirstErrorStatus({ok_status, ok_status}));
EXPECT_EQ(FirstErrorStatus({failed_precondition}), failed_precondition);
EXPECT_EQ(FirstErrorStatus({ok_status, failed_precondition}),
failed_precondition);
EXPECT_EQ(FirstErrorStatus({failed_precondition, ok_status}),
failed_precondition);
EXPECT_EQ(FirstErrorStatus({failed_precondition, internal_error}),
failed_precondition);
EXPECT_EQ(FirstErrorStatus({internal_error, failed_precondition}),
internal_error);
}
TEST(StatusTest, GetStatusOrOk) {
absl::Status ok_status = absl::OkStatus();
EXPECT_OK(GetStatusOrOk(5));
EXPECT_OK(GetStatusOrOk(ok_status));
EXPECT_OK(GetStatusOrOk(absl::StatusOr<int>(5)));
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
EXPECT_EQ(GetStatusOrOk(failed_precondition), failed_precondition);
EXPECT_EQ(GetStatusOrOk(absl::StatusOr<int>(failed_precondition)),
failed_precondition);
}
TEST(StatusTest, IsOkStatus) {
absl::Status ok_status = absl::OkStatus();
EXPECT_TRUE(IsOkStatus(5));
EXPECT_TRUE(IsOkStatus(ok_status));
EXPECT_TRUE(IsOkStatus(absl::StatusOr<int>(5)));
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
EXPECT_FALSE(IsOkStatus(failed_precondition));
EXPECT_FALSE(IsOkStatus(absl::StatusOr<int>(failed_precondition)));
}
TEST(StatusTest, UnStatusCaller) {
absl::Status failed_precondition = absl::FailedPreconditionError("msg1");
absl::Status failed_precondition2 = absl::FailedPreconditionError("msg2");
auto add_op = [](int a, int b) { return a + b; };
UnStatusCaller<decltype(add_op)> add_op_wrap{add_op};
auto add_op_with_status = [](int a, int b) -> absl::StatusOr<int> {
return a + b;
};
auto add_op_with_status_wrap = MakeUnStatusCaller(add_op_with_status);
auto add_op_always_error = [&](int a, int b) -> absl::StatusOr<int> {
return failed_precondition;
};
auto add_op_always_error_wrap = MakeUnStatusCaller(add_op_always_error);
EXPECT_THAT(add_op_wrap(5, 7), IsOkAndHolds(12));
EXPECT_THAT(add_op_with_status_wrap(5, 7), IsOkAndHolds(12));
EXPECT_EQ(add_op_always_error_wrap(5, 7).status(), failed_precondition);
EXPECT_EQ(add_op_wrap(5, absl::StatusOr<int>(failed_precondition)).status(),
failed_precondition);
EXPECT_EQ(add_op_wrap(absl::StatusOr<int>(failed_precondition),
absl::StatusOr<int>(failed_precondition2))
.status(),
failed_precondition);
EXPECT_EQ(
add_op_always_error_wrap(5, absl::StatusOr<int>(failed_precondition2))
.status(),
failed_precondition2);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/status.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/status_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
f9f10a84-8072-4a0b-badf-15411ee7d548 | cpp | tensorflow/tensorflow | cost_analysis | tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.cc | third_party/xla/xla/service/memory_space_assignment/cost_analysis_test.cc | #include "tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tfrt/constants.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tfrt/compiler/opdefs/tfrt_op_interfaces.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
constexpr int64_t kDefaultCheapCost = 1;
int64_t GetRankedTensorSize(mlir::TensorType type) {
auto shape = type.getShape();
int64_t size = 1;
for (int64_t dim : shape) {
size *= std::max(kDefaultCheapCost, dim);
}
return size;
}
int64_t InferTensorSize(const CostContext& context, mlir::TensorType type) {
if (type.hasRank()) return GetRankedTensorSize(type);
return context.default_unranked_tensor_size;
}
int64_t InferLookupTableFindV2Cost(const CostContext& context,
mlir::TF::LookupTableFindV2Op op) {
constexpr int64_t kLookupTableFindCostScale = 8;
constexpr int64_t kLookupTableFindStringKeyCostScale = 16;
auto value_type = mlir::cast<mlir::TensorType>(op.getValues().getType());
auto key_type = mlir::cast<mlir::TensorType>(op.getKeys().getType());
int64_t output_size = InferTensorSize(context, value_type);
int64_t cost = kLookupTableFindCostScale * output_size;
if (mlir::isa<mlir::TF::StringType>(key_type.getElementType()))
cost *= kLookupTableFindStringKeyCostScale;
return cost;
}
int64_t InferGatherV2Cost(const CostContext& context, mlir::TF::GatherV2Op op) {
return InferTensorSize(
context, mlir::cast<mlir::TensorType>(op.getOutput().getType()));
}
template <typename OpType>
int64_t InferSparseSegmentOpCost(const CostContext& context, OpType op) {
return InferTensorSize(
context, mlir::cast<mlir::TensorType>(op.getOutput().getType()));
}
using CostFunctionRegistry = absl::flat_hash_map<std::string, CostFunction>;
void RegisterCostFunction(CostFunctionRegistry& registry,
absl::string_view op_name,
CostFunction cost_function) {
auto r = registry.try_emplace(op_name, std::move(cost_function));
assert(r.second);
(void)r;
}
template <typename OpType, typename F>
void RegisterCostFunction(CostFunctionRegistry& registry, F f) {
RegisterCostFunction(
registry, OpType::getOperationName().str(),
[f = std::move(f)](const CostContext& context, mlir::Operation* op) {
return f(context, llvm::cast<OpType>(op));
});
}
CostFunctionRegistry& GetCostFunctionRegistry() {
static auto* const registry = []() {
auto* registry = new CostFunctionRegistry;
RegisterCostFunction<mlir::TF::GatherV2Op>(*registry, InferGatherV2Cost);
RegisterCostFunction<mlir::TF::SparseSegmentSumOp>(
*registry, InferSparseSegmentOpCost<mlir::TF::SparseSegmentSumOp>);
RegisterCostFunction<mlir::TF::SparseSegmentMeanOp>(
*registry, InferSparseSegmentOpCost<mlir::TF::SparseSegmentMeanOp>);
RegisterCostFunction<mlir::TF::SparseSegmentSqrtNOp>(
*registry, InferSparseSegmentOpCost<mlir::TF::SparseSegmentSqrtNOp>);
RegisterCostFunction<mlir::TF::LookupTableFindV2Op>(
*registry, InferLookupTableFindV2Cost);
return registry;
}();
return *registry;
}
}
void RegisterCostFunction(absl::string_view op_name,
CostFunction cost_function) {
RegisterCostFunction(GetCostFunctionRegistry(), op_name,
std::move(cost_function));
}
bool HasCostFunctionRegistered(absl::string_view op_name) {
return GetCostFunctionRegistry().contains(op_name);
}
int64_t CostAnalysis::GetCost(mlir::Operation* op) const {
assert(cost_map_.count(op) > 0);
return cost_map_.lookup(op);
}
void CostAnalysis::AnalyzeArguments(mlir::func::FuncOp func_op) {
for (auto arg : func_op.getArguments()) {
if (!mlir::isa<mlir::TensorType>(arg.getType())) continue;
auto type = mlir::cast<mlir::TensorType>(arg.getType());
if (type.hasRank()) {
max_arg_size_ = std::max(max_arg_size_, GetRankedTensorSize(type));
}
}
}
void CostAnalysis::AnalyzeBlock(mlir::Block* block) {
for (auto& op : *block) {
EvaluateCost(&op);
}
}
void CostAnalysis::EvaluateCost(mlir::Operation* op) {
if (auto cost_function =
mlir::dyn_cast<tfrt::compiler::CostFunctionInterface>(op)) {
cost_map_[op] = cost_function.cost();
return;
}
if (!llvm::isa<mlir::TF::TensorFlowDialect>(op->getDialect())) {
cost_map_[op] = max_arg_size_;
return;
}
const auto& registry = GetCostFunctionRegistry();
absl::string_view op_name = op->getName().getStringRef();
auto iter = registry.find(op_name);
if (iter != registry.end()) {
CostContext context;
context.default_unranked_tensor_size = max_arg_size_;
cost_map_[op] = iter->second(context, op);
return;
}
if (cost_recorder_ != nullptr) {
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (op_key_attr) {
cost_map_[op] = cost_recorder_->GetCost(op_key_attr.getInt());
return;
}
}
if (llvm::isa<mlir::TF::ShapeOp, mlir::TF::StridedSliceOp,
mlir::TF::ReshapeOp, mlir::TF::ExpandDimsOp>(op)) {
cost_map_[op] = kDefaultCheapCost;
return;
}
int64_t cost = kDefaultCheapCost;
for (auto operand : op->getOperands()) {
auto type = mlir::cast<mlir::TensorType>(operand.getType());
if (type.hasRank()) {
cost += GetRankedTensorSize(type);
} else {
cost += max_arg_size_;
}
}
cost_map_[op] = cost;
}
}
} | #include "xla/service/memory_space_assignment/cost_analysis.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using memory_space_assignment::CostAnalysis;
using memory_space_assignment::CostAnalysisOptions;
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
class MemorySpaceAssignmentCostAnalysisTest : public HloTestBase {
protected:
absl::Status Initialize(const HloModule* module,
float pipeline_overhead_window_size_mib = 0.0) {
HloCostAnalysis::Options options;
options_.alternate_mem_bandwidth_bytes_per_second = 128;
options_.async_copy_bandwidth_bytes_per_second = 32;
options_.pipeline_overhead_window_size_mib =
pipeline_overhead_window_size_mib;
options.shape_size = ShapeSize;
options.set_flops_per_second(8);
options.set_bytes_per_second(32);
options.set_transcendentals_per_second(16);
options.set_flops_min_latency_second(1);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(options);
TF_RETURN_IF_ERROR(
module->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<memory_space_assignment::HloCostAnalysisCosts>(
*hlo_cost_analysis_);
TF_ASSIGN_OR_RETURN(
cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_, options_, *module));
return absl::OkStatus();
}
CostAnalysisOptions options_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<memory_space_assignment::HloCostAnalysisCosts>
hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
};
TEST_F(MemorySpaceAssignmentCostAnalysisTest, NoPipelineOverhead) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
param1 = f32[2,4] parameter(1)
ROOT add = f32[2,4] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(Initialize(module.get()));
const HloInstruction* add = module->entry_computation()->root_instruction();
const float expected_compute_elapsed = std::max(
8.0f / 8.0f,
hlo_cost_analysis_->min_latency_seconds(HloCostAnalysis::kFlopsKey));
LOG(INFO) << "Expected compute elapsed = " << expected_compute_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add),
expected_compute_elapsed);
float expected_memory_elapsed =
(3 * 4 * 8) / 32.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsed(*add),
expected_memory_elapsed);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedInAlternateMemory(*add, {}, {}),
expected_memory_elapsed);
expected_memory_elapsed =
((2 * 4 * 8) / 32.0) +
((4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {}),
expected_memory_elapsed);
expected_memory_elapsed =
((4 * 8) / 32.0) +
((2 * 4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
expected_memory_elapsed =
(3 * 4 * 8) / 128.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_compute_elapsed);
}
TEST_F(MemorySpaceAssignmentCostAnalysisTest, PipelineOverhead) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
param1 = f32[2,4] parameter(1)
ROOT add = f32[2,4] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(
Initialize(module.get(),
(64.0 / 1024 / 1024)));
const HloInstruction* add = module->entry_computation()->root_instruction();
const float expected_compute_elapsed = std::max(
8.0f / 8.0f,
hlo_cost_analysis_->min_latency_seconds(HloCostAnalysis::kFlopsKey));
LOG(INFO) << "Expected compute elapsed = " << expected_compute_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add),
expected_compute_elapsed);
float expected_memory_elapsed =
(3 * 4 * 8) / 32.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add),
expected_memory_elapsed);
float expected_overhead = expected_compute_elapsed * 2 / 3;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(*add),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsed(*add),
expected_memory_elapsed + expected_overhead);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedInAlternateMemory(*add, {}, {}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
((2 * 4 * 8) / 32.0) +
((4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(*add, {{0, {}}}),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
((4 * 8) / 32.0) +
((2 * 4 * 8) / 128.0);
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
expected_overhead = expected_compute_elapsed / 3;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(
cost_analysis_->GetDefaultMemoryAccessOverhead(*add, {{0, {}}}, {{}}),
expected_overhead);
EXPECT_EQ(
cost_analysis_->GetInstructionElapsedDueToMemory(*add, {{0, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}}, {{}}),
expected_memory_elapsed + expected_overhead);
expected_memory_elapsed =
(3 * 4 * 8) / 128.0;
LOG(INFO) << "Expected memory elapsed = " << expected_memory_elapsed;
expected_overhead = 0;
LOG(INFO) << "Expected overhead = " << expected_overhead;
EXPECT_EQ(cost_analysis_->GetDefaultMemoryAccessOverhead(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_overhead);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_memory_elapsed);
EXPECT_EQ(cost_analysis_->GetInstructionElapsedInAlternateMemory(
*add, {{0, {}}, {1, {}}}, {{}}),
expected_compute_elapsed);
}
TEST_F(MemorySpaceAssignmentCostAnalysisTest, LatencyBoundCompute) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
param0 = f32[2,2] parameter(0)
param1 = f32[2,2] parameter(1)
ROOT add = f32[2,2] add(param0, param1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(Initialize(module.get()));
const HloInstruction* add = module->entry_computation()->root_instruction();
EXPECT_EQ(cost_analysis_->GetInstructionElapsedDueToCompute(*add), 1.0f);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/cost_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a94ec4bd-38b0-43ce-b94c-ab20abb9e699 | cpp | tensorflow/tensorflow | sharding_serdes | third_party/xla/xla/python/ifrt/sharding_serdes.cc | third_party/xla/xla/python/ifrt/sharding_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/ifrt/sharding_serdes.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class SingleDeviceShardingSerDes
: public llvm::RTTIExtends<SingleDeviceShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::SingleDeviceSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const SingleDeviceSharding& sharding =
llvm::cast<SingleDeviceSharding>(serializable);
SingleDeviceShardingProto proto;
proto.set_device_id(sharding.devices()->devices().front()->Id().value());
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
SingleDeviceShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized SimpleDeviceSharding");
}
TF_ASSIGN_OR_RETURN(Device * device,
deserialize_sharding_options->lookup_device(
DeviceId(proto.device_id())));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
return SingleDeviceSharding::Create(device, memory_kind);
}
static char ID;
};
class OpaqueShardingSerDes
: public llvm::RTTIExtends<OpaqueShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::OpaqueSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const OpaqueSharding& sharding = llvm::cast<OpaqueSharding>(serializable);
OpaqueShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
OpaqueShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized OpaqueSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
return OpaqueSharding::Create(std::move(devices), memory_kind);
}
static char ID;
};
class ConcreteShardingSerDes
: public llvm::RTTIExtends<ConcreteShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::ConcreteSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const ConcreteSharding& sharding =
llvm::cast<ConcreteSharding>(serializable);
ConcreteShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
if (sharding.has_static_shape()) {
*proto.mutable_shape() = sharding.shape().ToProto();
for (const Shape& shape : sharding.shard_shapes()) {
*proto.add_shard_shapes() = shape.ToProto();
}
} else {
*proto.mutable_dynamic_shape() = sharding.dynamic_shape().ToProto();
for (const DynamicShape& dynamic_shape :
sharding.shard_dynamic_shapes()) {
*proto.add_shard_dynamic_shapes() = dynamic_shape.ToProto();
}
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
ConcreteShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized ConcreteSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
if (proto.has_shape()) {
TF_ASSIGN_OR_RETURN(auto shape, Shape::FromProto(proto.shape()));
std::vector<Shape> shard_shapes;
shard_shapes.reserve(proto.shard_shapes_size());
for (const auto& shard_shape_proto : proto.shard_shapes()) {
TF_ASSIGN_OR_RETURN(auto shard_shape,
Shape::FromProto(shard_shape_proto));
shard_shapes.push_back(std::move(shard_shape));
}
return ConcreteSharding::Create(std::move(devices), memory_kind,
std::move(shape),
std::move(shard_shapes));
}
if (!proto.has_dynamic_shape()) {
return absl::InvalidArgumentError(
"ConcreteSharding must have Shape or DynamicShape.");
}
TF_ASSIGN_OR_RETURN(auto dynamic_shape,
DynamicShape::FromProto(proto.dynamic_shape()));
std::vector<DynamicShape> shard_dynamic_shapes;
shard_dynamic_shapes.reserve(proto.shard_dynamic_shapes_size());
for (const auto& shard_dynamic_shape_proto : proto.shard_dynamic_shapes()) {
TF_ASSIGN_OR_RETURN(auto dynamic_shape,
DynamicShape::FromProto(shard_dynamic_shape_proto));
shard_dynamic_shapes.push_back(std::move(dynamic_shape));
}
return ConcreteSharding::Create(std::move(devices), memory_kind,
std::move(dynamic_shape),
std::move(shard_dynamic_shapes));
}
static char ID;
};
class ConcreteEvenShardingSerDes
: public llvm::RTTIExtends<ConcreteEvenShardingSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::ConcreteEvenSharding";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const ConcreteEvenSharding& sharding =
llvm::cast<ConcreteEvenSharding>(serializable);
ConcreteEvenShardingProto proto;
*proto.mutable_devices() = sharding.devices()->ToProto();
if (sharding.memory_kind().memory_kind().has_value()) {
proto.set_memory_kind(std::string(*sharding.memory_kind().memory_kind()));
}
*proto.mutable_shape() = sharding.shape().ToProto();
*proto.mutable_shard_shape() = sharding.shard_shape().ToProto();
proto.set_is_fully_replicated(sharding.IsFullyReplicated());
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_sharding_options =
llvm::cast<DeserializeShardingOptions>(options.get());
ConcreteEvenShardingProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized ConcreteEvenSharding");
}
TF_ASSIGN_OR_RETURN(
auto devices,
DeviceList::FromProto(deserialize_sharding_options->lookup_device,
proto.devices()));
MemoryKind memory_kind;
if (proto.has_memory_kind()) {
memory_kind = MemoryKind(proto.memory_kind());
}
TF_ASSIGN_OR_RETURN(auto shape, Shape::FromProto(proto.shape()));
TF_ASSIGN_OR_RETURN(auto shard_shape,
Shape::FromProto(proto.shard_shape()));
return ConcreteEvenSharding::Create(
std::move(devices), memory_kind, std::move(shape),
std::move(shard_shape), proto.is_fully_replicated());
}
static char ID;
};
[[maybe_unused]] char SingleDeviceShardingSerDes::ID = 0;
[[maybe_unused]] char OpaqueShardingSerDes::ID = 0;
[[maybe_unused]] char ConcreteShardingSerDes::ID = 0;
[[maybe_unused]] char ConcreteEvenShardingSerDes::ID = 0;
bool register_single_device_sharding_serdes = ([]{
RegisterSerDes<SingleDeviceSharding>(
std::make_unique<SingleDeviceShardingSerDes>());
}(), true);
bool register_opaque_sharding_serdes = ([]{
RegisterSerDes<OpaqueSharding>(
std::make_unique<OpaqueShardingSerDes>());
}(), true);
bool register_concrete_sharding_serdes = ([]{
RegisterSerDes<ConcreteSharding>(
std::make_unique<ConcreteShardingSerDes>());
}(), true);
bool register_concrete_even_sharding_serdes = ([]{
RegisterSerDes<ConcreteEvenSharding>(
std::make_unique<ConcreteEvenShardingSerDes>());
}(), true);
}
}
} | #include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
class ShardingSerDesTest : public test_util::DeviceTest {};
TEST_P(ShardingSerDesTest, SingleDeviceShardingRoundTrip) {
auto sharding = SingleDeviceSharding::Create(
GetDevices({0})->devices().front(), MemoryKind("abc"));
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<SingleDeviceSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
}
TEST_P(ShardingSerDesTest, OpaqueShardingRoundTrip) {
auto sharding = OpaqueSharding::Create(GetDevices({0, 1}), MemoryKind("abc"));
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<OpaqueSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
}
TEST_P(ShardingSerDesTest, ConcreteShardingRoundTrip) {
auto sharding = ConcreteSharding::Create(
GetDevices({0, 1}), MemoryKind("abc"),
Shape({10, 20}),
{Shape({3, 20}), Shape({7, 20})});
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<ConcreteSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_THAT(out_sharding->shape(), sharding->shape());
EXPECT_THAT(out_sharding->shard_shapes(),
ElementsAreArray(sharding->shard_shapes()));
}
TEST_P(ShardingSerDesTest, ConcreteShardingWithDynamicShapeRoundTrip) {
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape dynamic_shape,
DynamicShape::Create(Shape({10, 20}),
BoundedDynamicShapeTag({false, true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape1,
DynamicShape::Create(Shape({3, 20}),
BoundedDynamicShapeTag({false, true})));
TF_ASSERT_OK_AND_ASSIGN(
DynamicShape shard_dynamic_shape2,
DynamicShape::Create(Shape({7, 20}),
BoundedDynamicShapeTag({false, true})));
auto sharding = ConcreteSharding::Create(
GetDevices({0, 1}), MemoryKind("abc"),
dynamic_shape,
{shard_dynamic_shape1, shard_dynamic_shape2});
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<ConcreteSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_THAT(out_sharding->dynamic_shape(), sharding->dynamic_shape());
EXPECT_THAT(out_sharding->shard_dynamic_shapes(),
ElementsAreArray(sharding->shard_dynamic_shapes()));
}
TEST_P(ShardingSerDesTest, ConcreteEvenShardingRoundTrip) {
auto sharding = ConcreteEvenSharding::Create(
GetDevices({0, 1}), MemoryKind("abc"),
Shape({10, 20}),
Shape({5, 20}), true);
TF_ASSERT_OK_AND_ASSIGN(auto serialized, Serialize(*sharding));
TF_ASSERT_OK_AND_ASSIGN(
auto out_sharding,
Deserialize<ConcreteEvenSharding>(
serialized, std::make_unique<DeserializeShardingOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_THAT(out_sharding->devices()->devices(),
ElementsAreArray(sharding->devices()->devices()));
EXPECT_THAT(out_sharding->shape(), sharding->shape());
EXPECT_THAT(out_sharding->shard_shape(), sharding->shard_shape());
EXPECT_THAT(out_sharding->IsFullyReplicated(), sharding->IsFullyReplicated());
}
INSTANTIATE_TEST_SUITE_P(NumDevices, ShardingSerDesTest,
testing::Values(test_util::DeviceTestParam{
2,
2}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/sharding_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/sharding_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a3462daa-9223-4973-afc8-c1e4444e9482 | cpp | tensorflow/tensorflow | queue_runner | tensorflow/cc/training/queue_runner.cc | tensorflow/cc/training/queue_runner_test.cc | #include "tensorflow/cc/training/queue_runner.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/cc/training/coordinator.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/ops_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
#include "tensorflow/core/public/session.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
Status QueueRunner::New(const QueueRunnerDef& queue_runner_def,
std::unique_ptr<QueueRunner>* result) {
result->reset(new QueueRunner());
return (*result)->Init(queue_runner_def);
}
Status QueueRunner::New(const QueueRunnerDef& queue_runner_def,
Coordinator* coord,
std::unique_ptr<QueueRunner>* result) {
result->reset(new QueueRunner());
(*result)->coord_ = coord;
return (*result)->Init(queue_runner_def);
}
void QueueRunner::AddErrorCallback(const std::function<void(Status)>& cb) {
mutex_lock l(cb_mu_);
callbacks_.push_back(cb);
}
void QueueRunner::ClearErrorCallbacks() {
mutex_lock l(cb_mu_);
callbacks_.clear();
}
Status QueueRunner::Init(const QueueRunnerDef& queue_runner_def) {
queue_name_ = queue_runner_def.queue_name();
enqueue_op_names_.clear();
enqueue_op_names_.insert(enqueue_op_names_.end(),
queue_runner_def.enqueue_op_name().begin(),
queue_runner_def.enqueue_op_name().end());
size_t op_names_size = enqueue_op_names_.size();
if (op_names_size > kint32max) {
return Status(absl::StatusCode::kInvalidArgument,
"Enqueue ops to run cannot exceed kint32max");
}
runs_ = static_cast<int>(op_names_size);
if (runs_ == 0) {
return Status(absl::StatusCode::kInvalidArgument,
"Empty enqueue ops to run.");
}
close_op_name_ = queue_runner_def.close_op_name();
cancel_op_name_ = queue_runner_def.cancel_op_name();
if (queue_runner_def.queue_closed_exception_types_size() == 0) {
queue_closed_exception_types_.insert(error::OUT_OF_RANGE);
} else {
for (const auto& code : queue_runner_def.queue_closed_exception_types()) {
queue_closed_exception_types_.insert(static_cast<int>(code));
}
}
int nthreads = runs_;
if (coord_) {
nthreads++;
}
thread_pool_.reset(new thread::ThreadPool(
Env::Default(), SanitizeThreadSuffix(queue_name_), nthreads));
return absl::OkStatus();
}
QueueRunner::~QueueRunner() {
Join().IgnoreError();
}
Status QueueRunner::Start(Session* sess) { return Start(sess, 0); }
Status QueueRunner::StartAndCollectCostGraph(Session* sess,
const RunOptions& run_options) {
SetRunArgumentsAndCostGraph(run_options);
return Start(sess, 0);
}
Status QueueRunner::Start(Session* sess, int wait_for) {
counter_.reset(new BlockingCounter(runs_));
for (const string& enqueue_op : enqueue_op_names_) {
thread_pool_->Schedule(
std::bind(&QueueRunner::Run, this, sess, enqueue_op));
}
if (coord_) {
thread_pool_->Schedule(std::bind(&QueueRunner::Stop, this, sess));
}
if (wait_for > 0) {
if (!counter_->WaitFor(std::chrono::milliseconds(wait_for))) {
return Status(absl::StatusCode::kDeadlineExceeded,
"Queues not fed before the timeout");
}
mutex_lock l(mu_);
if (!enqueue_status_.ok()) {
return enqueue_status_;
} else {
return status_;
}
}
return absl::OkStatus();
}
Status QueueRunner::StartAndCollectCostGraph(Session* session, int wait_for_ms,
const RunOptions& run_options) {
SetRunArgumentsAndCostGraph(run_options);
return Start(session, wait_for_ms);
}
void QueueRunner::Stop(Session* sess) {
if (coord_ != nullptr) {
coord_->WaitForStop();
}
if (!cancel_op_name_.empty()) {
UpdateStatus(RealRun(sess, cancel_op_name_, false));
}
stopped_ = true;
}
Status QueueRunner::Join() {
thread_pool_.reset();
mutex_lock l(mu_);
return status_;
}
void QueueRunner::UpdateStatus(const Status& status) {
{
mutex_lock l(mu_);
if (!status_.ok() || status.ok() || IsQueueClosed(status)) {
return;
}
status_ = status;
}
if (coord_) {
coord_->ReportStatus(status);
}
mutex_lock l(cb_mu_);
for (auto& cb : callbacks_) {
cb(status);
}
}
void QueueRunner::Run(Session* sess, const string& enqueue_op) {
bool first_iteration = true;
Status status;
while (status.ok()) {
if (coord_ && coord_->ShouldStop()) {
break;
}
status = RealRun(sess, enqueue_op, true);
if (first_iteration) {
if (!status.ok()) {
mutex_lock l(mu_);
enqueue_status_ = status;
}
counter_->DecrementCount();
first_iteration = false;
}
}
bool last_run = false;
{
mutex_lock l(mu_);
runs_--;
last_run = (runs_ == 0);
}
if (IsQueueClosed(status) && (!coord_ || !coord_->ShouldStop())) {
if (last_run && !close_op_name_.empty()) {
UpdateStatus(RealRun(sess, close_op_name_, false));
}
} else if (!status.ok()) {
LOG(ERROR) << "Queue runner thread got a failure status: " << status;
UpdateStatus(status);
if (coord_) {
coord_->RequestStop().IgnoreError();
}
}
}
Status QueueRunner::GetStatus() {
mutex_lock l(mu_);
return status_;
}
Status QueueRunner::ExportCostGraph(CostGraphDef* cost_graph) const {
if (!cg_mu_) {
return Status(absl::StatusCode::kFailedPrecondition,
"This QueueRunner doesn't collect a cost graph.");
}
mutex_lock l(*cg_mu_);
cost_graph->MergeFrom(*cost_graph_);
return absl::OkStatus();
}
void QueueRunner::SetRunArgumentsAndCostGraph(const RunOptions& run_options) {
cg_mu_.reset(new mutex());
{
mutex_lock l(*cg_mu_);
cost_graph_.reset(new CostGraphDef());
}
run_options_ = run_options;
}
Status QueueRunner::RealRun(Session* sess, const string& op,
bool update_costs) {
Status s;
if (update_costs && cg_mu_) {
RunMetadata metadata;
s = sess->Run(run_options_, {}, {}, {op}, nullptr, &metadata);
mutex_lock l(*cg_mu_);
cost_graph_->Swap(metadata.mutable_cost_graph());
} else {
s = sess->Run({}, {}, {op}, nullptr);
}
return s;
}
} | #include "tensorflow/cc/training/queue_runner.h"
#include <string>
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/data_flow_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/random_ops.h"
#include "tensorflow/cc/ops/state_ops.h"
#include "tensorflow/cc/training/coordinator.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/queue_runner.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/status.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using error::Code;
using ops::Assign;
using ops::Const;
using ops::CountUpTo;
using ops::FIFOQueue;
using ops::QueueClose;
using ops::QueueDequeue;
using ops::QueueEnqueue;
using ops::RandomNormal;
using ops::Square;
using ops::Variable;
constexpr char kAssignOpName[] = "assign";
constexpr char kCancelOp0[] = "cancel0";
constexpr char kCancelOp1[] = "cancel1";
constexpr char kCloseOp0[] = "close0";
constexpr char kCloseOp1[] = "close1";
constexpr char kCountUpToOpName[] = "count";
constexpr char kDequeueOp0[] = "dequeue0";
constexpr char kDequeueOp1[] = "dequeue1";
constexpr char kEnqueueOp0[] = "enqueue0";
constexpr char kEnqueueOp1[] = "enqueue1";
constexpr char kIllegalOpName1[] = "would fail";
constexpr char kIllegalOpName2[] = "fail again";
constexpr char kQueueName[] = "unit_test";
constexpr char kQueueName0[] = "q0";
constexpr char kQueueName1[] = "q1";
constexpr char kSquareOpName[] = "square";
constexpr char kVarOpName[] = "var";
GraphDef BuildSimpleGraph() {
Scope root = Scope::NewRootScope();
auto init_value = Const(root, 0);
auto var = Variable(root.WithOpName(kVarOpName), TensorShape({}),
DataType::DT_INT32);
auto assign = Assign(root.WithOpName(kAssignOpName), var, init_value);
auto count = CountUpTo(root.WithOpName(kCountUpToOpName), var, 10);
Square(root.WithOpName(kSquareOpName), var);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
return graph_def;
}
QueueRunnerDef BuildQueueRunnerDef(
const std::string& queue_name, const std::vector<std::string>& enqueue_ops,
const std::string& close_op, const std::string& cancel_op,
const std::vector<Code>& queue_closed_error_codes) {
QueueRunnerDef queue_runner_def;
*queue_runner_def.mutable_queue_name() = queue_name;
for (const std::string& enqueue_op : enqueue_ops) {
*queue_runner_def.mutable_enqueue_op_name()->Add() = enqueue_op;
}
*queue_runner_def.mutable_close_op_name() = close_op;
*queue_runner_def.mutable_cancel_op_name() = cancel_op;
for (const auto& error_code : queue_closed_error_codes) {
*queue_runner_def.mutable_queue_closed_exception_types()->Add() =
error_code;
}
return queue_runner_def;
}
std::unique_ptr<Session> BuildSessionAndInitVariable(
const GraphDef& graph_def) {
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
TF_CHECK_OK(session->Run({}, {}, {kAssignOpName}, nullptr));
return session;
}
TEST(QueueRunnerTest, BasicTest) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName}, kSquareOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session->Run({}, {kSquareOpName}, {}, &outputs));
int square_value = *outputs[0].scalar<int>().data();
EXPECT_EQ(square_value, 100);
}
TEST(QueueRunnerTest, QueueClosedCode) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName, kCountUpToOpName}, kSquareOpName, "",
{Code::OUT_OF_RANGE, Code::CANCELLED});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> outputs;
TF_EXPECT_OK(session->Run({}, {kSquareOpName}, {}, &outputs));
int square_value = *outputs[0].scalar<int>().data();
EXPECT_EQ(square_value, 100);
}
TEST(QueueRunnerTest, QueueCloseFails) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kCountUpToOpName}, kIllegalOpName1, "",
{Code::OUT_OF_RANGE});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
auto status = qr->Join();
EXPECT_EQ(status.code(), Code::NOT_FOUND) << status;
}
TEST(QueueRunnerTest, CatchErrorInJoin) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kIllegalOpName1, kIllegalOpName2}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_EXPECT_OK(qr->Start(session.get()));
EXPECT_EQ(qr->Join().code(), Code::NOT_FOUND);
}
GraphDef BuildDoubleQueueGraph() {
Scope root = Scope::NewRootScope();
auto q0 = FIFOQueue(root.WithOpName(kQueueName0), {DataType::DT_INT32});
auto ten = Const(root, 10);
auto enqueue0 = QueueEnqueue(root.WithOpName(kEnqueueOp0), q0, {ten});
auto close0 = QueueClose(root.WithOpName(kCloseOp0), q0);
auto cancel0 = QueueClose(root.WithOpName(kCancelOp0), q0,
QueueClose::CancelPendingEnqueues(true));
auto q1 = FIFOQueue(root.WithOpName(kQueueName1), {DataType::DT_INT32},
FIFOQueue::Capacity(3));
auto dequeue0 =
QueueDequeue(root.WithOpName(kDequeueOp0), q0, {DataType::DT_INT32});
auto enqueue1 = QueueEnqueue(root.WithOpName(kEnqueueOp1), q1, {dequeue0[0]});
auto dequeue1 =
QueueDequeue(root.WithOpName(kDequeueOp1), q1, {DataType::DT_INT32});
auto close1 = QueueClose(root.WithOpName(kCloseOp1), q1);
auto cancel1 = QueueClose(root.WithOpName(kCancelOp1), q1,
QueueClose::CancelPendingEnqueues(true));
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
return graph_def;
}
TEST(QueueRunnerTest, RealEnqueueDequeue) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kEnqueueOp1}, kCloseOp1, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
TF_EXPECT_OK(session->Run({}, {}, {kCloseOp0}, nullptr));
TF_EXPECT_OK(qr->Join());
std::vector<Tensor> dq1;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq1));
EXPECT_EQ(*dq1[0].scalar<int>().data(), 10);
std::vector<Tensor> dq2;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq2));
EXPECT_EQ(*dq2[0].scalar<int>().data(), 10);
EXPECT_EQ(session->Run({}, {kDequeueOp1}, {}, nullptr).code(),
Code::OUT_OF_RANGE);
}
void JoinThread(QueueRunner* queue_runner, bool* join_succeeded,
Notification* join_done) {
EXPECT_EQ(queue_runner->Join().code(), Code::CANCELLED);
*join_succeeded = true;
join_done->Notify();
}
TEST(QueueRunnerTest, SessionCloseCancelPendingEnqueue) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(session->Run({}, {}, {kEnqueueOp0}, nullptr));
std::vector<Tensor> dq1;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq1));
EXPECT_EQ(*dq1[0].scalar<int>().data(), 10);
bool join_succeeded = false;
Notification join_done;
Env::Default()->SchedClosure(
std::bind(&JoinThread, qr.get(), &join_succeeded, &join_done));
Env::Default()->SleepForMicroseconds(10000000);
EXPECT_EQ(join_succeeded, false);
TF_EXPECT_OK(session->Close());
join_done.WaitForNotification();
EXPECT_EQ(join_succeeded, true);
}
TEST(QueueRunnerTest, EmptyEnqueueOps) {
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
EXPECT_EQ(QueueRunner::New(queue_runner_def, &qr).code(),
Code::INVALID_ARGUMENT);
}
TEST(QueueRunnerTest, StartTimeout) {
GraphDef graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
EXPECT_EQ(qr->Start(session.get(), 1).code(), Code::DEADLINE_EXCEEDED);
TF_EXPECT_OK(session->Close());
}
TEST(QueueRunnerTest, TestCoordinatorStop) {
auto graph_def = BuildDoubleQueueGraph();
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner0 =
BuildQueueRunnerDef(kQueueName0, {kEnqueueOp0}, kCloseOp0, kCancelOp0,
{Code::OUT_OF_RANGE, Code::CANCELLED});
QueueRunnerDef queue_runner1 =
BuildQueueRunnerDef(kQueueName1, {kEnqueueOp1}, kCloseOp1, kCancelOp1,
{Code::OUT_OF_RANGE, Code::CANCELLED});
Coordinator coord;
std::unique_ptr<QueueRunner> qr0;
TF_EXPECT_OK(QueueRunner::New(queue_runner0, &coord, &qr0));
TF_CHECK_OK(qr0->Start(session.get()));
std::unique_ptr<QueueRunner> qr1;
TF_EXPECT_OK(QueueRunner::New(queue_runner1, &coord, &qr1));
TF_CHECK_OK(qr1->Start(session.get()));
TF_EXPECT_OK(coord.RegisterRunner(std::move(qr0)));
TF_EXPECT_OK(coord.RegisterRunner(std::move(qr1)));
std::vector<Tensor> dq;
TF_EXPECT_OK(session->Run({}, {kDequeueOp1}, {}, &dq));
EXPECT_EQ(*dq[0].scalar<int>().data(), 10);
TF_EXPECT_OK(coord.RequestStop());
TF_EXPECT_OK(coord.Join());
}
TEST(QueueRunnerTest, CallbackCalledOnError) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kIllegalOpName1, kIllegalOpName2}, kCountUpToOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
bool error_caught = false;
qr->AddErrorCallback([&error_caught](const Status&) { error_caught = true; });
TF_EXPECT_OK(qr->Start(session.get()));
EXPECT_FALSE(qr->Join().ok());
EXPECT_TRUE(error_caught);
}
TEST(QueueRunnerTest, RunMetaDataTest) {
Scope root = Scope::NewRootScope();
auto q0 = FIFOQueue(root.WithOpName(kQueueName), {DataType::DT_FLOAT});
Output rnd = RandomNormal(root.WithOpName("rnd"), {1, 1}, DataType::DT_FLOAT);
Output square = Square(root.WithOpName(kSquareOpName), rnd);
auto enqueue0 = QueueEnqueue(root.WithOpName(kEnqueueOp0), q0, {square});
auto close0 = QueueClose(root.WithOpName(kCloseOp0), q0);
auto cancel0 = QueueClose(root.WithOpName(kCancelOp0), q0,
QueueClose::CancelPendingEnqueues(true));
auto dequeue0 =
QueueDequeue(root.WithOpName(kDequeueOp0), q0, {DataType::DT_FLOAT});
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
for (auto& node : *graph_def.mutable_node()) {
node.set_device("/cpu:0");
}
SessionOptions sess_options;
sess_options.config.mutable_graph_options()->set_build_cost_model(1);
std::unique_ptr<Session> session(NewSession(sess_options));
TF_CHECK_OK(session->Create(graph_def));
QueueRunnerDef queue_runner_def =
BuildQueueRunnerDef(kQueueName, {kEnqueueOp0}, kCloseOp0, kCancelOp0, {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
RunOptions run_options;
TF_CHECK_OK(qr->StartAndCollectCostGraph(session.get(), run_options));
std::vector<Tensor> dq0;
TF_EXPECT_OK(session->Run({}, {kDequeueOp0}, {}, &dq0));
TF_EXPECT_OK(session->Run({}, {kDequeueOp0}, {}, &dq0));
CostGraphDef cost_graph;
TF_CHECK_OK(qr->ExportCostGraph(&cost_graph));
EXPECT_TRUE(cost_graph.node_size() > 0);
qr->Stop(session.get());
}
TEST(QueueRunnerTest, NoRunMetaDataTest) {
GraphDef graph_def = BuildSimpleGraph();
auto session = BuildSessionAndInitVariable(graph_def);
QueueRunnerDef queue_runner_def = BuildQueueRunnerDef(
kQueueName, {kCountUpToOpName}, kSquareOpName, "", {});
std::unique_ptr<QueueRunner> qr;
TF_EXPECT_OK(QueueRunner::New(queue_runner_def, &qr));
TF_CHECK_OK(qr->Start(session.get()));
TF_EXPECT_OK(qr->Join());
CostGraphDef cost_graph;
EXPECT_EQ(qr->ExportCostGraph(&cost_graph).code(),
error::FAILED_PRECONDITION);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/queue_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/queue_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e025328a-7fcb-4e73-a31a-719e9981215e | cpp | google/quiche | quic_socket_address | quiche/quic/platform/api/quic_socket_address.cc | quiche/quic/platform/api/quic_socket_address_test.cc | #include "quiche/quic/platform/api/quic_socket_address.h"
#include <cstring>
#include <limits>
#include <string>
#include "absl/strings/str_cat.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_ip_address_family.h"
namespace quic {
namespace {
uint32_t HashIP(const QuicIpAddress& ip) {
if (ip.IsIPv4()) {
return ip.GetIPv4().s_addr;
}
if (ip.IsIPv6()) {
auto v6addr = ip.GetIPv6();
const uint32_t* v6_as_ints =
reinterpret_cast<const uint32_t*>(&v6addr.s6_addr);
return v6_as_ints[0] ^ v6_as_ints[1] ^ v6_as_ints[2] ^ v6_as_ints[3];
}
return 0;
}
}
QuicSocketAddress::QuicSocketAddress(QuicIpAddress address, uint16_t port)
: host_(address), port_(port) {}
QuicSocketAddress::QuicSocketAddress(const struct sockaddr_storage& saddr) {
switch (saddr.ss_family) {
case AF_INET: {
const sockaddr_in* v4 = reinterpret_cast<const sockaddr_in*>(&saddr);
host_ = QuicIpAddress(v4->sin_addr);
port_ = ntohs(v4->sin_port);
break;
}
case AF_INET6: {
const sockaddr_in6* v6 = reinterpret_cast<const sockaddr_in6*>(&saddr);
host_ = QuicIpAddress(v6->sin6_addr);
port_ = ntohs(v6->sin6_port);
break;
}
default:
QUIC_BUG(quic_bug_10075_1)
<< "Unknown address family passed: " << saddr.ss_family;
break;
}
}
QuicSocketAddress::QuicSocketAddress(const sockaddr* saddr, socklen_t len) {
sockaddr_storage storage;
static_assert(std::numeric_limits<socklen_t>::max() >= sizeof(storage),
"Cannot cast sizeof(storage) to socklen_t as it does not fit");
if (len < static_cast<socklen_t>(sizeof(sockaddr)) ||
(saddr->sa_family == AF_INET &&
len < static_cast<socklen_t>(sizeof(sockaddr_in))) ||
(saddr->sa_family == AF_INET6 &&
len < static_cast<socklen_t>(sizeof(sockaddr_in6))) ||
len > static_cast<socklen_t>(sizeof(storage))) {
QUIC_BUG(quic_bug_10075_2) << "Socket address of invalid length provided";
return;
}
memcpy(&storage, saddr, len);
*this = QuicSocketAddress(storage);
}
bool operator==(const QuicSocketAddress& lhs, const QuicSocketAddress& rhs) {
return lhs.host_ == rhs.host_ && lhs.port_ == rhs.port_;
}
bool operator!=(const QuicSocketAddress& lhs, const QuicSocketAddress& rhs) {
return !(lhs == rhs);
}
bool QuicSocketAddress::IsInitialized() const { return host_.IsInitialized(); }
std::string QuicSocketAddress::ToString() const {
switch (host_.address_family()) {
case IpAddressFamily::IP_V4:
return absl::StrCat(host_.ToString(), ":", port_);
case IpAddressFamily::IP_V6:
return absl::StrCat("[", host_.ToString(), "]:", port_);
default:
return "";
}
}
int QuicSocketAddress::FromSocket(int fd) {
sockaddr_storage addr;
socklen_t addr_len = sizeof(addr);
int result = getsockname(fd, reinterpret_cast<sockaddr*>(&addr), &addr_len);
bool success = result == 0 && addr_len > 0 &&
static_cast<size_t>(addr_len) <= sizeof(addr);
if (success) {
*this = QuicSocketAddress(addr);
return 0;
}
return -1;
}
QuicSocketAddress QuicSocketAddress::Normalized() const {
return QuicSocketAddress(host_.Normalized(), port_);
}
QuicIpAddress QuicSocketAddress::host() const { return host_; }
uint16_t QuicSocketAddress::port() const { return port_; }
sockaddr_storage QuicSocketAddress::generic_address() const {
union {
sockaddr_storage storage;
sockaddr_in v4;
sockaddr_in6 v6;
} result;
memset(&result.storage, 0, sizeof(result.storage));
switch (host_.address_family()) {
case IpAddressFamily::IP_V4:
result.v4.sin_family = AF_INET;
result.v4.sin_addr = host_.GetIPv4();
result.v4.sin_port = htons(port_);
break;
case IpAddressFamily::IP_V6:
result.v6.sin6_family = AF_INET6;
result.v6.sin6_addr = host_.GetIPv6();
result.v6.sin6_port = htons(port_);
break;
default:
result.storage.ss_family = AF_UNSPEC;
break;
}
return result.storage;
}
uint32_t QuicSocketAddress::Hash() const {
uint32_t value = 0;
value ^= HashIP(host_);
value ^= port_ | (port_ << 16);
return value;
}
} | #include "quiche/quic/platform/api/quic_socket_address.h"
#include <memory>
#include <sstream>
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace {
TEST(QuicSocketAddress, Uninitialized) {
QuicSocketAddress uninitialized;
EXPECT_FALSE(uninitialized.IsInitialized());
}
TEST(QuicSocketAddress, ExplicitConstruction) {
QuicSocketAddress ipv4_address(QuicIpAddress::Loopback4(), 443);
QuicSocketAddress ipv6_address(QuicIpAddress::Loopback6(), 443);
EXPECT_TRUE(ipv4_address.IsInitialized());
EXPECT_EQ("127.0.0.1:443", ipv4_address.ToString());
EXPECT_EQ("[::1]:443", ipv6_address.ToString());
EXPECT_EQ(QuicIpAddress::Loopback4(), ipv4_address.host());
EXPECT_EQ(QuicIpAddress::Loopback6(), ipv6_address.host());
EXPECT_EQ(443, ipv4_address.port());
}
TEST(QuicSocketAddress, OutputToStream) {
QuicSocketAddress ipv4_address(QuicIpAddress::Loopback4(), 443);
std::stringstream stream;
stream << ipv4_address;
EXPECT_EQ("127.0.0.1:443", stream.str());
}
TEST(QuicSocketAddress, FromSockaddrIPv4) {
union {
sockaddr_storage storage;
sockaddr addr;
sockaddr_in v4;
} address;
memset(&address, 0, sizeof(address));
address.v4.sin_family = AF_INET;
address.v4.sin_addr = QuicIpAddress::Loopback4().GetIPv4();
address.v4.sin_port = htons(443);
EXPECT_EQ("127.0.0.1:443",
QuicSocketAddress(&address.addr, sizeof(address.v4)).ToString());
EXPECT_EQ("127.0.0.1:443", QuicSocketAddress(address.storage).ToString());
}
TEST(QuicSocketAddress, FromSockaddrIPv6) {
union {
sockaddr_storage storage;
sockaddr addr;
sockaddr_in6 v6;
} address;
memset(&address, 0, sizeof(address));
address.v6.sin6_family = AF_INET6;
address.v6.sin6_addr = QuicIpAddress::Loopback6().GetIPv6();
address.v6.sin6_port = htons(443);
EXPECT_EQ("[::1]:443",
QuicSocketAddress(&address.addr, sizeof(address.v6)).ToString());
EXPECT_EQ("[::1]:443", QuicSocketAddress(address.storage).ToString());
}
TEST(QuicSocketAddres, ToSockaddrIPv4) {
union {
sockaddr_storage storage;
sockaddr_in v4;
} address;
address.storage =
QuicSocketAddress(QuicIpAddress::Loopback4(), 443).generic_address();
ASSERT_EQ(AF_INET, address.v4.sin_family);
EXPECT_EQ(QuicIpAddress::Loopback4(), QuicIpAddress(address.v4.sin_addr));
EXPECT_EQ(htons(443), address.v4.sin_port);
}
TEST(QuicSocketAddress, Normalize) {
QuicIpAddress dual_stacked;
ASSERT_TRUE(dual_stacked.FromString("::ffff:127.0.0.1"));
ASSERT_TRUE(dual_stacked.IsIPv6());
QuicSocketAddress not_normalized(dual_stacked, 443);
QuicSocketAddress normalized = not_normalized.Normalized();
EXPECT_EQ("[::ffff:127.0.0.1]:443", not_normalized.ToString());
EXPECT_EQ("127.0.0.1:443", normalized.ToString());
}
#if defined(__linux__) && !defined(ANDROID)
#include <errno.h>
#include <sys/socket.h>
#include <sys/types.h>
TEST(QuicSocketAddress, FromSocket) {
int fd;
QuicSocketAddress address;
bool bound = false;
for (int port = 50000; port < 50400; port++) {
fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_UDP);
ASSERT_GT(fd, 0);
address = QuicSocketAddress(QuicIpAddress::Loopback6(), port);
sockaddr_storage raw_address = address.generic_address();
int bind_result = bind(fd, reinterpret_cast<const sockaddr*>(&raw_address),
sizeof(sockaddr_in6));
if (bind_result < 0 && errno == EADDRINUSE) {
close(fd);
continue;
}
ASSERT_EQ(0, bind_result);
bound = true;
break;
}
ASSERT_TRUE(bound);
QuicSocketAddress real_address;
ASSERT_EQ(0, real_address.FromSocket(fd));
ASSERT_TRUE(real_address.IsInitialized());
EXPECT_EQ(real_address, address);
close(fd);
}
#endif
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/platform/api/quic_socket_address.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/platform/api/quic_socket_address_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
de101ed3-e456-4c60-85ce-bf326ecce6b6 | cpp | google/cel-cpp | parsed_repeated_field_value | common/values/parsed_repeated_field_value.cc | common/values/parsed_repeated_field_value_test.cc | #include "common/values/parsed_repeated_field_value.h"
#include <cstddef>
#include <memory>
#include <string>
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/status_macros.h"
#include "google/protobuf/message.h"
namespace cel {
std::string ParsedRepeatedFieldValue::DebugString() const {
if (ABSL_PREDICT_FALSE(field_ == nullptr)) {
return "INVALID";
}
return "VALID";
}
absl::Status ParsedRepeatedFieldValue::SerializeTo(
AnyToJsonConverter& converter, absl::Cord& value) const {
return absl::UnimplementedError("SerializeTo is not yet implemented");
}
absl::StatusOr<Json> ParsedRepeatedFieldValue::ConvertToJson(
AnyToJsonConverter& converter) const {
return absl::UnimplementedError("ConvertToJson is not yet implemented");
}
absl::StatusOr<JsonArray> ParsedRepeatedFieldValue::ConvertToJsonArray(
AnyToJsonConverter& converter) const {
return absl::UnimplementedError("ConvertToJsonArray is not yet implemented");
}
absl::Status ParsedRepeatedFieldValue::Equal(ValueManager& value_manager,
const Value& other,
Value& result) const {
return absl::UnimplementedError("Equal is not yet implemented");
}
absl::StatusOr<Value> ParsedRepeatedFieldValue::Equal(
ValueManager& value_manager, const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
bool ParsedRepeatedFieldValue::IsZeroValue() const { return IsEmpty(); }
bool ParsedRepeatedFieldValue::IsEmpty() const { return Size() == 0; }
size_t ParsedRepeatedFieldValue::Size() const {
ABSL_DCHECK(*this);
if (ABSL_PREDICT_FALSE(field_ == nullptr)) {
return 0;
}
return static_cast<size_t>(
GetReflectionOrDie()->FieldSize(*message_, field_));
}
absl::Status ParsedRepeatedFieldValue::Get(ValueManager& value_manager,
size_t index, Value& result) const {
return absl::UnimplementedError("Get is not yet implemented");
}
absl::StatusOr<Value> ParsedRepeatedFieldValue::Get(ValueManager& value_manager,
size_t index) const {
Value result;
CEL_RETURN_IF_ERROR(Get(value_manager, index, result));
return result;
}
absl::Status ParsedRepeatedFieldValue::ForEach(ValueManager& value_manager,
ForEachCallback callback) const {
return absl::UnimplementedError("ForEach is not yet implemented");
}
absl::Status ParsedRepeatedFieldValue::ForEach(
ValueManager& value_manager, ForEachWithIndexCallback callback) const {
return absl::UnimplementedError("ForEach is not yet implemented");
}
absl::StatusOr<absl::Nonnull<std::unique_ptr<ValueIterator>>>
ParsedRepeatedFieldValue::NewIterator(ValueManager& value_manager) const {
return absl::UnimplementedError("NewIterator is not yet implemented");
}
absl::Status ParsedRepeatedFieldValue::Contains(ValueManager& value_manager,
const Value& other,
Value& result) const {
return absl::UnimplementedError("Contains is not yet implemented");
}
absl::StatusOr<Value> ParsedRepeatedFieldValue::Contains(
ValueManager& value_manager, const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Contains(value_manager, other, result));
return result;
}
absl::Nonnull<const google::protobuf::Reflection*>
ParsedRepeatedFieldValue::GetReflectionOrDie() const {
return ABSL_DIE_IF_NULL(message_->GetReflection());
}
} | #include <cstddef>
#include "absl/base/nullability.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "common/value_manager.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel {
namespace {
using ::absl_testing::StatusIs;
using ::cel::internal::DynamicParseTextProto;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::testing::_;
using ::testing::PrintToStringParamName;
using ::testing::TestWithParam;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
class ParsedRepeatedFieldValueTest : public TestWithParam<AllocatorKind> {
public:
void SetUp() override {
switch (GetParam()) {
case AllocatorKind::kArena:
arena_.emplace();
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::Pooling(arena()),
NewThreadCompatibleTypeReflector(MemoryManager::Pooling(arena())));
break;
case AllocatorKind::kNewDelete:
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::ReferenceCounting(),
NewThreadCompatibleTypeReflector(
MemoryManager::ReferenceCounting()));
break;
}
}
void TearDown() override {
value_manager_.reset();
arena_.reset();
}
Allocator<> allocator() {
return arena_ ? ArenaAllocator(&*arena_) : NewDeleteAllocator();
}
absl::Nullable<google::protobuf::Arena*> arena() { return allocator().arena(); }
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() {
return GetTestingDescriptorPool();
}
absl::Nonnull<google::protobuf::MessageFactory*> message_factory() {
return GetTestingMessageFactory();
}
ValueManager& value_manager() { return **value_manager_; }
private:
absl::optional<google::protobuf::Arena> arena_;
absl::optional<Shared<ValueManager>> value_manager_;
};
TEST_P(ParsedRepeatedFieldValueTest, Default) {
ParsedRepeatedFieldValue value;
EXPECT_FALSE(value);
}
TEST_P(ParsedRepeatedFieldValueTest, Field) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_TRUE(value);
}
TEST_P(ParsedRepeatedFieldValueTest, Kind) {
ParsedRepeatedFieldValue value;
EXPECT_EQ(value.kind(), ParsedRepeatedFieldValue::kKind);
EXPECT_EQ(value.kind(), ValueKind::kList);
}
TEST_P(ParsedRepeatedFieldValueTest, GetTypeName) {
ParsedRepeatedFieldValue value;
EXPECT_EQ(value.GetTypeName(), ParsedRepeatedFieldValue::kName);
EXPECT_EQ(value.GetTypeName(), "list");
}
TEST_P(ParsedRepeatedFieldValueTest, GetRuntimeType) {
ParsedRepeatedFieldValue value;
EXPECT_EQ(value.GetRuntimeType(), ListType());
}
TEST_P(ParsedRepeatedFieldValueTest, DebugString) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_THAT(valid_value.DebugString(), _);
}
TEST_P(ParsedRepeatedFieldValueTest, IsZeroValue) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_TRUE(valid_value.IsZeroValue());
}
TEST_P(ParsedRepeatedFieldValueTest, SerializeTo) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
absl::Cord serialized;
EXPECT_THAT(valid_value.SerializeTo(value_manager(), serialized),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedRepeatedFieldValueTest, ConvertToJson) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_THAT(valid_value.ConvertToJson(value_manager()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedRepeatedFieldValueTest, Equal) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_THAT(valid_value.Equal(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedRepeatedFieldValueTest, Empty) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_TRUE(valid_value.IsEmpty());
}
TEST_P(ParsedRepeatedFieldValueTest, Size) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_EQ(valid_value.Size(), 0);
}
TEST_P(ParsedRepeatedFieldValueTest, Get) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_THAT(valid_value.Get(value_manager(), 0),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedRepeatedFieldValueTest, ForEach) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_THAT(valid_value.ForEach(
value_manager(),
[](const Value&) -> absl::StatusOr<bool> { return true; }),
StatusIs(absl::StatusCode::kUnimplemented));
EXPECT_THAT(
valid_value.ForEach(
value_manager(),
[](size_t, const Value&) -> absl::StatusOr<bool> { return true; }),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedRepeatedFieldValueTest, NewIterator) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_THAT(valid_value.NewIterator(value_manager()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedRepeatedFieldValueTest, Contains) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedRepeatedFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"repeated_int64")));
EXPECT_THAT(valid_value.Contains(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kUnimplemented));
}
INSTANTIATE_TEST_SUITE_P(ParsedRepeatedFieldValueTest,
ParsedRepeatedFieldValueTest,
::testing::Values(AllocatorKind::kArena,
AllocatorKind::kNewDelete),
PrintToStringParamName());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_repeated_field_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_repeated_field_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
440ad401-881b-4038-950a-c4db8e384c34 | cpp | tensorflow/tensorflow | latency_benchmark | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.cc | tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/latency_benchmark_test.cc | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.h"
#include <errno.h>
#include <sys/stat.h>
#include <fstream>
#include <iterator>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/proto/delegate_performance.pb.h"
namespace tflite {
namespace benchmark {
namespace latency {
namespace {
static constexpr char kBenchmarkToolName[] = "(BenchmarkModelAndroid)";
class DelegatePerformanceReportingListener : public BenchmarkListener {
public:
void OnBenchmarkStart(const BenchmarkParams& unused) override {
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_START);
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
ReportResult(results);
}
void ReportFailure(TfLiteStatus status) {
std::string status_msg =
status == kTfLiteError
? "TFLite error"
: (status == kTfLiteDelegateError ? "TFLite delegate error"
: "unexpected TFLite status");
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Benchmark failed due to %s with status code %d.",
status_msg.c_str(), status);
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_ERROR);
results_proto_.mutable_error()->mutable_error_code()->set_tflite_error(
status);
results_proto_.mutable_error()->set_error_message(status_msg);
}
const proto::benchmark::LatencyResults& GetResults() {
return results_proto_;
}
private:
void ReportResult(const BenchmarkResults& results) {
tensorflow::Stat<int64_t> warmup_us = results.warmup_time_us();
tensorflow::Stat<int64_t> inference_us = results.inference_time_us();
profiling::memory::MemoryUsage init_mem_usage = results.init_mem_usage();
profiling::memory::MemoryUsage overall_mem_usage =
results.overall_mem_usage();
if (results.model_size_mb() > 0) {
AddMetric("model_size_megabyte",
results.model_size_mb());
}
AddMetric("initialization_latency_us",
results.startup_latency_us());
AddMetric("warmup_latency_average_us", warmup_us.avg());
AddMetric("warmup_latency_min_us", warmup_us.min());
AddMetric("warmup_latency_max_us", warmup_us.max());
AddMetric("warmup_latency_standard_deviation_us",
warmup_us.std_deviation());
AddMetric("inference_latency_average_us",
inference_us.avg());
AddMetric("inference_latency_min_us",
inference_us.min());
AddMetric("inference_latency_max_us",
inference_us.max());
AddMetric("inference_latency_standard_deviation_us",
inference_us.std_deviation());
AddMetric("initialization_memory_max_rss_mebibyte",
init_mem_usage.mem_footprint_kb / 1024.0);
AddMetric("initialization_memory_total_non_mmapped_heap_mebibyte",
init_mem_usage.total_allocated_bytes / 1024.0 / 1024.0);
AddMetric(
"initialization_memory_in_use_heap_mebibyte",
init_mem_usage.in_use_allocated_bytes / 1024.0 / 1024.0);
AddMetric("overall_memory_max_rss_mebibyte",
overall_mem_usage.mem_footprint_kb / 1024.0);
AddMetric(
"overall_memory_total_non_mmapped_heap_mebibyte",
overall_mem_usage.total_allocated_bytes / 1024.0 / 1024.0);
AddMetric(
"overall_memory_in_use_heap_mebibyte",
overall_mem_usage.in_use_allocated_bytes / 1024.0 / 1024.0);
results_proto_.set_event_type(proto::benchmark::BENCHMARK_EVENT_TYPE_END);
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Benchmark finished.");
}
void AddMetric(std::string name, float value) {
proto::benchmark::BenchmarkMetric* metric = results_proto_.add_metrics();
metric->set_name(name);
metric->set_value(value);
}
proto::benchmark::LatencyResults results_proto_;
};
std::vector<std::string> ParseArgumentsFromTfLiteSettings(
const TFLiteSettings& tflite_settings,
const std::string& tflite_settings_path) {
std::vector<std::string> args;
if (tflite_settings_path.empty()) {
return args;
}
if (tflite_settings.stable_delegate_loader_settings()) {
args.push_back(absl::StrFormat("--stable_delegate_settings_file=%s",
tflite_settings_path));
return args;
}
switch (tflite_settings.delegate()) {
case Delegate_XNNPACK: {
args.push_back("--use_xnnpack=true");
if (tflite_settings.xnnpack_settings()) {
if (tflite_settings.xnnpack_settings()->num_threads()) {
args.push_back(absl::StrFormat(
"--num_threads=%d",
tflite_settings.xnnpack_settings()->num_threads()));
}
if (tflite_settings.xnnpack_settings()->flags() ==
XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16) {
args.push_back("--xnnpack_force_fp16=true");
}
}
return args;
}
case Delegate_GPU: {
args.push_back("--use_gpu=true");
const tflite::GPUSettings* gpu_settings = tflite_settings.gpu_settings();
if (gpu_settings) {
if (gpu_settings->is_precision_loss_allowed()) {
args.push_back("--gpu_precision_loss_allowed=true");
}
if (gpu_settings->enable_quantized_inference()) {
args.push_back("--gpu_experimental_enable_quant=true");
}
if (gpu_settings->inference_preference() ==
GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED) {
args.push_back("--gpu_inference_for_sustained_speed=true");
}
if (gpu_settings->force_backend() == GPUBackend_OPENCL) {
args.push_back("--gpu_backend=cl");
} else if (gpu_settings->force_backend() == GPUBackend_OPENGL) {
args.push_back("--gpu_backend=gl");
}
if (gpu_settings->cache_directory()) {
args.push_back(
absl::StrFormat("--delegate_serialize_dir=%s",
gpu_settings->cache_directory()->c_str()));
}
if (gpu_settings->model_token()) {
args.push_back(absl::StrFormat("--delegate_serialize_token=%s",
gpu_settings->model_token()->c_str()));
}
}
break;
}
case Delegate_EDGETPU: {
args.push_back("--use_edgetpu=true");
break;
}
default:
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Delegate type %s is not enabled by the latency module.",
EnumNameDelegate(tflite_settings.delegate()));
break;
}
if (tflite_settings.disable_default_delegates()) {
args.push_back("--use_xnnpack=false");
}
return args;
}
}
proto::benchmark::LatencyResults Benchmark(
const TFLiteSettings& tflite_settings,
const std::string& tflite_settings_path, int model_fd, size_t model_offset,
size_t model_size, const std::vector<std::string>& args) {
std::vector<char*> argv;
argv.push_back(const_cast<char*>(kBenchmarkToolName));
std::string arg_graph =
absl::StrCat("--graph=fd:", model_fd, ":", model_offset, ":", model_size);
argv.push_back(const_cast<char*>(arg_graph.data()));
std::vector<std::string> args_from_tflite_settings =
ParseArgumentsFromTfLiteSettings(tflite_settings, tflite_settings_path);
for (const std::string& arg : args_from_tflite_settings) {
argv.push_back(const_cast<char*>(arg.data()));
}
for (const std::string& arg : args) {
argv.push_back(const_cast<char*>(arg.data()));
}
BenchmarkTfLiteModel benchmark;
DelegatePerformanceReportingListener delegatePerformanceReporting;
benchmark.AddListener(&delegatePerformanceReporting);
TfLiteStatus status = benchmark.Run(argv.size(), argv.data());
if (status != kTfLiteOk) {
delegatePerformanceReporting.ReportFailure(status);
}
return delegatePerformanceReporting.GetResults();
}
}
}
} | #include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.h"
#include <fcntl.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#include "tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/proto/delegate_performance.pb.h"
namespace tflite {
namespace benchmark {
namespace latency {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
static constexpr char kSettingsFilePath[] =
"tensorflow/lite/tools/delegates/experimental/stable_delegate/"
"test_sample_stable_delegate_settings.json";
class LatencyBenchmarkTest : public ::testing::Test {
protected:
void SetUp() override {
model_fp_ = fopen(kModelPath, "rb");
ASSERT_TRUE(model_fp_ != nullptr);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_END), 0);
model_size_ = ftell(model_fp_);
ASSERT_NE(model_size_, -1);
ASSERT_EQ(fseek(model_fp_, 0, SEEK_SET), 0);
settings_ = parser_.Parse(kSettingsFilePath);
}
delegates::utils::TfLiteSettingsJsonParser parser_;
const TFLiteSettings* settings_;
size_t model_size_;
FILE* model_fp_;
std::vector<std::string> args_;
};
TEST_F(LatencyBenchmarkTest, FailedWithNullFileDescriptor) {
EXPECT_TRUE(Benchmark(*settings_, kSettingsFilePath,
0, 0,
0, args_)
.has_error());
}
TEST_F(LatencyBenchmarkTest, FailedWithInvalidNumThreadsSettings) {
flatbuffers::FlatBufferBuilder fbb;
flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings =
CreateXNNPackSettings(fbb, -3);
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_delegate(Delegate_XNNPACK);
tflite_settings_builder.add_xnnpack_settings(xnnpack_settings);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_TRUE(Benchmark(*settings,
"example_path",
fileno(model_fp_),
0, model_size_, args_)
.has_error());
}
TEST_F(LatencyBenchmarkTest, SucceedWithEmptyTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
TEST_F(LatencyBenchmarkTest, SucceedWithCpuTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_disable_default_delegates(true);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
#ifdef __ANDROID__
TEST_F(LatencyBenchmarkTest, SucceedWithGpuTfLiteSettings) {
flatbuffers::FlatBufferBuilder fbb;
TFLiteSettingsBuilder tflite_settings_builder(fbb);
tflite_settings_builder.add_delegate(Delegate_GPU);
fbb.Finish(tflite_settings_builder.Finish());
const TFLiteSettings* settings =
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer());
EXPECT_EQ(Benchmark(*settings, "example_path",
fileno(model_fp_), 0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
#endif
TEST_F(LatencyBenchmarkTest, SucceedWithSampleStableDelegate) {
EXPECT_EQ(Benchmark(*settings_, kSettingsFilePath, fileno(model_fp_),
0, model_size_, args_)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
TEST_F(LatencyBenchmarkTest,
SucceedWithSampleStableDelegateAndBenchmarkToolArguments) {
std::vector<std::string> args = {"--warmup_runs=10"};
EXPECT_EQ(Benchmark(*settings_, kSettingsFilePath, fileno(model_fp_),
0, model_size_, args)
.event_type(),
proto::benchmark::BENCHMARK_EVENT_TYPE_END);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/main/native/latency_benchmark.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/experimental/delegate_performance/android/src/test/native/latency_benchmark_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
732627c3-ac7c-45b8-b76f-447446958de6 | cpp | google/quiche | load_balancer_server_id_map | quiche/quic/load_balancer/load_balancer_server_id_map.h | quiche/quic/load_balancer/load_balancer_server_id_map_test.cc | #ifndef QUICHE_QUIC_LOAD_BALANCER_LOAD_BALANCER_SERVER_ID_MAP_H_
#define QUICHE_QUIC_LOAD_BALANCER_LOAD_BALANCER_SERVER_ID_MAP_H_
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/container/flat_hash_map.h"
#include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
namespace quic {
template <typename T>
class QUIC_EXPORT_PRIVATE LoadBalancerServerIdMap {
public:
static std::shared_ptr<LoadBalancerServerIdMap> Create(uint8_t server_id_len);
std::optional<const T> Lookup(LoadBalancerServerId server_id) const;
const T* LookupNoCopy(LoadBalancerServerId server_id) const;
void AddOrReplace(LoadBalancerServerId server_id, T value);
void Erase(const LoadBalancerServerId server_id) {
server_id_table_.erase(server_id);
}
uint8_t server_id_len() const { return server_id_len_; }
private:
LoadBalancerServerIdMap(uint8_t server_id_len)
: server_id_len_(server_id_len) {}
const uint8_t server_id_len_;
absl::flat_hash_map<LoadBalancerServerId, T> server_id_table_;
};
template <typename T>
std::shared_ptr<LoadBalancerServerIdMap<T>> LoadBalancerServerIdMap<T>::Create(
const uint8_t server_id_len) {
if (server_id_len == 0 || server_id_len > kLoadBalancerMaxServerIdLen) {
QUIC_BUG(quic_bug_434893339_01)
<< "Tried to configure map with server ID length "
<< static_cast<int>(server_id_len);
return nullptr;
}
return std::make_shared<LoadBalancerServerIdMap<T>>(
LoadBalancerServerIdMap(server_id_len));
}
template <typename T>
std::optional<const T> LoadBalancerServerIdMap<T>::Lookup(
const LoadBalancerServerId server_id) const {
if (server_id.length() != server_id_len_) {
QUIC_BUG(quic_bug_434893339_02)
<< "Lookup with a " << static_cast<int>(server_id.length())
<< " byte server ID, map requires " << static_cast<int>(server_id_len_);
return std::optional<T>();
}
auto it = server_id_table_.find(server_id);
return (it != server_id_table_.end()) ? it->second : std::optional<const T>();
}
template <typename T>
const T* LoadBalancerServerIdMap<T>::LookupNoCopy(
const LoadBalancerServerId server_id) const {
if (server_id.length() != server_id_len_) {
QUIC_BUG(quic_bug_434893339_02)
<< "Lookup with a " << static_cast<int>(server_id.length())
<< " byte server ID, map requires " << static_cast<int>(server_id_len_);
return nullptr;
}
auto it = server_id_table_.find(server_id);
return (it != server_id_table_.end()) ? &it->second : nullptr;
}
template <typename T>
void LoadBalancerServerIdMap<T>::AddOrReplace(
const LoadBalancerServerId server_id, T value) {
if (server_id.length() == server_id_len_) {
server_id_table_[server_id] = value;
} else {
QUIC_BUG(quic_bug_434893339_03)
<< "Server ID of " << static_cast<int>(server_id.length())
<< " bytes; this map requires " << static_cast<int>(server_id_len_);
}
}
}
#endif | #include "quiche/quic/load_balancer/load_balancer_server_id_map.h"
#include <cstdint>
#include <optional>
#include "absl/types/span.h"
#include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
constexpr uint8_t kServerId[] = {0xed, 0x79, 0x3a, 0x51};
class LoadBalancerServerIdMapTest : public QuicTest {
public:
const LoadBalancerServerId valid_server_id_ = LoadBalancerServerId(kServerId);
const LoadBalancerServerId invalid_server_id_ =
LoadBalancerServerId(absl::Span<const uint8_t>(kServerId, 3));
};
TEST_F(LoadBalancerServerIdMapTest, CreateWithBadServerIdLength) {
EXPECT_QUIC_BUG(EXPECT_EQ(LoadBalancerServerIdMap<int>::Create(0), nullptr),
"Tried to configure map with server ID length 0");
EXPECT_QUIC_BUG(EXPECT_EQ(LoadBalancerServerIdMap<int>::Create(16), nullptr),
"Tried to configure map with server ID length 16");
}
TEST_F(LoadBalancerServerIdMapTest, AddOrReplaceWithBadServerIdLength) {
int record = 1;
auto pool = LoadBalancerServerIdMap<int>::Create(4);
EXPECT_NE(pool, nullptr);
EXPECT_QUIC_BUG(pool->AddOrReplace(invalid_server_id_, record),
"Server ID of 3 bytes; this map requires 4");
}
TEST_F(LoadBalancerServerIdMapTest, LookupWithBadServerIdLength) {
int record = 1;
auto pool = LoadBalancerServerIdMap<int>::Create(4);
EXPECT_NE(pool, nullptr);
pool->AddOrReplace(valid_server_id_, record);
EXPECT_QUIC_BUG(EXPECT_FALSE(pool->Lookup(invalid_server_id_).has_value()),
"Lookup with a 3 byte server ID, map requires 4");
EXPECT_QUIC_BUG(EXPECT_EQ(pool->LookupNoCopy(invalid_server_id_), nullptr),
"Lookup with a 3 byte server ID, map requires 4");
}
TEST_F(LoadBalancerServerIdMapTest, LookupWhenEmpty) {
auto pool = LoadBalancerServerIdMap<int>::Create(4);
EXPECT_NE(pool, nullptr);
EXPECT_EQ(pool->LookupNoCopy(valid_server_id_), nullptr);
std::optional<int> result = pool->Lookup(valid_server_id_);
EXPECT_FALSE(result.has_value());
}
TEST_F(LoadBalancerServerIdMapTest, AddLookup) {
int record1 = 1, record2 = 2;
auto pool = LoadBalancerServerIdMap<int>::Create(4);
EXPECT_NE(pool, nullptr);
LoadBalancerServerId other_server_id({0x01, 0x02, 0x03, 0x04});
EXPECT_TRUE(other_server_id.IsValid());
pool->AddOrReplace(valid_server_id_, record1);
pool->AddOrReplace(other_server_id, record2);
std::optional<int> result = pool->Lookup(valid_server_id_);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, record1);
auto result_ptr = pool->LookupNoCopy(valid_server_id_);
EXPECT_NE(result_ptr, nullptr);
EXPECT_EQ(*result_ptr, record1);
result = pool->Lookup(other_server_id);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(*result, record2);
}
TEST_F(LoadBalancerServerIdMapTest, AddErase) {
int record = 1;
auto pool = LoadBalancerServerIdMap<int>::Create(4);
EXPECT_NE(pool, nullptr);
pool->AddOrReplace(valid_server_id_, record);
EXPECT_EQ(*pool->LookupNoCopy(valid_server_id_), record);
pool->Erase(valid_server_id_);
EXPECT_EQ(pool->LookupNoCopy(valid_server_id_), nullptr);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id_map.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/load_balancer/load_balancer_server_id_map_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
4ff22835-9a9a-423f-8510-b19d25979eff | cpp | google/cel-cpp | logical_functions | runtime/standard/logical_functions.cc | runtime/standard/logical_functions_test.cc | #include "runtime/standard/logical_functions.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "base/builtins.h"
#include "base/function_adapter.h"
#include "common/casting.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_registry.h"
#include "runtime/internal/errors.h"
#include "runtime/register_function_helper.h"
namespace cel {
namespace {
using ::cel::runtime_internal::CreateNoMatchingOverloadError;
Value NotStrictlyFalseImpl(ValueManager& value_factory, const Value& value) {
if (InstanceOf<BoolValue>(value)) {
return value;
}
if (InstanceOf<ErrorValue>(value) || InstanceOf<UnknownValue>(value)) {
return value_factory.CreateBoolValue(true);
}
return value_factory.CreateErrorValue(
CreateNoMatchingOverloadError(builtin::kNotStrictlyFalse));
}
}
absl::Status RegisterLogicalFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(
(RegisterHelper<UnaryFunctionAdapter<bool, bool>>::RegisterGlobalOverload(
builtin::kNot,
[](ValueManager&, bool value) -> bool { return !value; }, registry)));
using StrictnessHelper = RegisterHelper<UnaryFunctionAdapter<Value, Value>>;
CEL_RETURN_IF_ERROR(StrictnessHelper::RegisterNonStrictOverload(
builtin::kNotStrictlyFalse, &NotStrictlyFalseImpl, registry));
CEL_RETURN_IF_ERROR(StrictnessHelper::RegisterNonStrictOverload(
builtin::kNotStrictlyFalseDeprecated, &NotStrictlyFalseImpl, registry));
return absl::OkStatus();
}
} | #include "runtime/standard/logical_functions.h"
#include <functional>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "base/builtins.h"
#include "base/function.h"
#include "base/function_descriptor.h"
#include "base/kind.h"
#include "base/type_provider.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "internal/testing.h"
#include "runtime/function_overload_reference.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Matcher;
using ::testing::Truly;
MATCHER_P3(DescriptorIs, name, arg_kinds, is_receiver, "") {
const FunctionOverloadReference& ref = arg;
const FunctionDescriptor& descriptor = ref.descriptor;
return descriptor.name() == name &&
descriptor.ShapeMatches(is_receiver, arg_kinds);
}
MATCHER_P(IsBool, expected, "") {
const Value& value = arg;
return value->Is<BoolValue>() && value.GetBool().NativeValue() == expected;
}
absl::StatusOr<Value> TestDispatchToFunction(const FunctionRegistry& registry,
absl::string_view simple_name,
absl::Span<const Value> args,
ValueManager& value_factory) {
std::vector<Kind> arg_matcher_;
arg_matcher_.reserve(args.size());
for (const auto& value : args) {
arg_matcher_.push_back(ValueKindToKind(value->kind()));
}
std::vector<FunctionOverloadReference> refs = registry.FindStaticOverloads(
simple_name, false, arg_matcher_);
if (refs.size() != 1) {
return absl::InvalidArgumentError("ambiguous overloads");
}
Function::InvokeContext ctx(value_factory);
return refs[0].implementation.Invoke(ctx, args);
}
TEST(RegisterLogicalFunctions, NotStrictlyFalseRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterLogicalFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kNotStrictlyFalse,
false, {Kind::kAny}),
ElementsAre(DescriptorIs(builtin::kNotStrictlyFalse,
std::vector<Kind>{Kind::kBool}, false)));
}
TEST(RegisterLogicalFunctions, LogicalNotRegistered) {
FunctionRegistry registry;
RuntimeOptions options;
ASSERT_OK(RegisterLogicalFunctions(registry, options));
EXPECT_THAT(
registry.FindStaticOverloads(builtin::kNot,
false, {Kind::kAny}),
ElementsAre(
DescriptorIs(builtin::kNot, std::vector<Kind>{Kind::kBool}, false)));
}
struct TestCase {
using ArgumentFactory = std::function<std::vector<Value>(ValueManager&)>;
std::string function;
ArgumentFactory arguments;
absl::StatusOr<Matcher<Value>> result_matcher;
};
class LogicalFunctionsTest : public testing::TestWithParam<TestCase> {
public:
LogicalFunctionsTest()
: value_factory_(MemoryManagerRef::ReferenceCounting(),
TypeProvider::Builtin()) {}
protected:
common_internal::LegacyValueManager value_factory_;
};
TEST_P(LogicalFunctionsTest, Runner) {
const TestCase& test_case = GetParam();
cel::FunctionRegistry registry;
ASSERT_OK(RegisterLogicalFunctions(registry, RuntimeOptions()));
std::vector<Value> args = test_case.arguments(value_factory_);
absl::StatusOr<Value> result = TestDispatchToFunction(
registry, test_case.function, args, value_factory_);
EXPECT_EQ(result.ok(), test_case.result_matcher.ok());
if (!test_case.result_matcher.ok()) {
EXPECT_EQ(result.status().code(), test_case.result_matcher.status().code());
EXPECT_THAT(result.status().message(),
HasSubstr(test_case.result_matcher.status().message()));
} else {
ASSERT_TRUE(result.ok()) << "unexpected error" << result.status();
EXPECT_THAT(*result, *test_case.result_matcher);
}
}
INSTANTIATE_TEST_SUITE_P(
Cases, LogicalFunctionsTest,
testing::ValuesIn(std::vector<TestCase>{
TestCase{builtin::kNot,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateBoolValue(true)};
},
IsBool(false)},
TestCase{builtin::kNot,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateBoolValue(false)};
},
IsBool(true)},
TestCase{builtin::kNot,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateBoolValue(true),
value_factory.CreateBoolValue(false)};
},
absl::InvalidArgumentError("")},
TestCase{builtin::kNotStrictlyFalse,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateBoolValue(true)};
},
IsBool(true)},
TestCase{builtin::kNotStrictlyFalse,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateBoolValue(false)};
},
IsBool(false)},
TestCase{builtin::kNotStrictlyFalse,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateErrorValue(
absl::InternalError("test"))};
},
IsBool(true)},
TestCase{builtin::kNotStrictlyFalse,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateUnknownValue()};
},
IsBool(true)},
TestCase{builtin::kNotStrictlyFalse,
[](ValueManager& value_factory) -> std::vector<Value> {
return {value_factory.CreateIntValue(42)};
},
Truly([](const Value& v) {
return v->Is<ErrorValue>() &&
absl::StrContains(
v.GetError().NativeValue().message(),
"No matching overloads");
})},
}));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/logical_functions.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/standard/logical_functions_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
752315be-50a6-4e68-908c-f2f56b92567d | cpp | google/tensorstore | box | tensorstore/box.cc | tensorstore/box_test.cc | #include "tensorstore/box.h"
#include <algorithm>
#include <ostream>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_box {
std::string DescribeForCast(DimensionIndex rank) {
return tensorstore::StrCat("box with ",
StaticCastTraits<DimensionIndex>::Describe(rank));
}
std::ostream& PrintToOstream(std::ostream& os, const BoxView<>& view) {
return os << "{origin=" << view.origin() << ", shape=" << view.shape() << "}";
}
bool AreEqual(const BoxView<>& box_a, const BoxView<>& box_b) {
return box_a.rank() == box_b.rank() &&
std::equal(box_a.shape().begin(), box_a.shape().end(),
box_b.shape().begin()) &&
std::equal(box_a.origin().begin(), box_a.origin().end(),
box_b.origin().begin());
}
bool IsFinite(BoxView<> box) {
for (DimensionIndex i = 0; i < box.rank(); ++i) {
if (!IsFinite(box[i])) return false;
}
return true;
}
}
namespace serialization {
namespace internal_serialization {
bool EncodeBoxView(EncodeSink& sink, BoxView<> box) {
return serialization::EncodeTuple(sink, box.origin(), box.shape());
}
bool DecodeBoxView(DecodeSource& source, MutableBoxView<> box) {
return serialization::DecodeTuple(source, box.origin(), box.shape());
}
}
bool RankSerializer::Encode(EncodeSink& sink, DimensionIndex rank) {
assert(IsValidRank(rank));
return sink.writer().WriteByte(static_cast<uint8_t>(rank));
}
bool RankSerializer::Decode(DecodeSource& source, DimensionIndex& rank) {
uint8_t v;
if (!source.reader().ReadByte(v)) return false;
if (v > kMaxRank) {
source.Fail(DecodeError(
tensorstore::StrCat("Invalid rank value: ", static_cast<size_t>(v))));
}
rank = static_cast<DimensionIndex>(v);
return true;
}
}
} | #include "tensorstore/box.h"
#include <array>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::dynamic_rank;
using ::tensorstore::HasBoxDomain;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IsStaticCastConstructible;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MutableBoxView;
using ::tensorstore::StaticRankCast;
using ::tensorstore::SubBoxView;
using ::tensorstore::unchecked;
using ::tensorstore::serialization::TestSerializationRoundTrip;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
static_assert(std::is_convertible_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_constructible_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_assignable_v<BoxView<3>, BoxView<>>);
static_assert(!std::is_assignable_v<Box<3>, Box<>>);
static_assert(!std::is_constructible_v<Box<3>, Box<>>);
static_assert(!std::is_constructible_v<BoxView<3>, Box<>>);
static_assert(!std::is_constructible_v<MutableBoxView<3>, MutableBoxView<>>);
static_assert(!std::is_constructible_v<MutableBoxView<3>, Box<>>);
static_assert(std::is_constructible_v<MutableBoxView<3>, Box<3>&>);
static_assert(IsStaticCastConstructible<BoxView<3>, BoxView<>>);
static_assert(IsStaticCastConstructible<Box<3>, BoxView<>>);
static_assert(IsStaticCastConstructible<Box<3>, Box<>>);
static_assert(IsStaticCastConstructible<BoxView<>, BoxView<3>>);
static_assert(IsStaticCastConstructible<MutableBoxView<3>, Box<3>&>);
static_assert(!IsStaticCastConstructible<MutableBoxView<>, const Box<3>&>);
static_assert(!IsStaticCastConstructible<BoxView<2>, BoxView<3>>);
static_assert(!IsStaticCastConstructible<BoxView<2>, Box<3>>);
static_assert(!IsStaticCastConstructible<Box<3>, Box<2>>);
TEST(BoxTest, DefaultConstructDynamic) {
Box<> box;
EXPECT_EQ(0, box.rank());
}
TEST(BoxTest, DefaultConstructStatic) {
Box<3> box;
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, RankPointersConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> box(3, origin, shape);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(4, 5, 6));
}
TEST(BoxTest, SizeConstruct) {
Box<> box(3);
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, ShapeArrayConstruct) {
std::array<Index, 3> shape{{1, 2, 3}};
Box<> box(shape);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(1, 2, 3));
}
TEST(BoxTest, DynamicRankSpanConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<> box{tensorstore::span(origin), tensorstore::span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin, ElementsAreArray(origin));
EXPECT_THAT(shape, ElementsAreArray(shape));
}
TEST(BoxTest, ConstructFromArrays) {
Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(4, 5, 6));
}
TEST(BoxTest, ConstructFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
BoxView<> view(origin, shape);
Box<> box(view);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, DeduceFromShapeArray) {
const Index shape[] = {3, 4, 5};
auto box = Box(shape);
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromShapeSpanStatic) {
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromShapeSpanDynamic) {
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), Box<>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeArrays) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(origin, shape);
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeSpansStatic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span(origin), tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), Box<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromOriginAndShapeDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = Box(tensorstore::span<const Index>(origin),
tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), Box<>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxTest, DeduceFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
auto box2 = Box(box);
static_assert(std::is_same_v<decltype(box2), Box<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<3> box(origin, shape);
auto box2 = Box(box);
static_assert(std::is_same_v<decltype(box2), Box<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
BoxView<> view(origin, shape);
Box<> box;
box = view;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> other(origin, shape);
Box<> box;
box = other;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignDynamicBoxFromStaticBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<3> other(origin, shape);
Box<> box;
box = other;
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
box.Fill();
box = BoxView<3>(other);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, AssignStaticBoxFromDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {4, 5, 6};
Box<> other(origin, shape);
Box<3> box;
box = StaticRankCast<3, unchecked>(other);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.shape(), ElementsAreArray(shape));
EXPECT_THAT(box.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, SetRank) {
Box<> box;
box.set_rank(3);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxTest, Accessors) {
Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_EQ(4 * 5 * 6, box.num_elements());
EXPECT_EQ(IndexInterval::UncheckedSized(1, 4), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 5), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 6), box[2]);
}
TEST(BoxTest, ConstAccessors) {
const Box<> box({1, 2, 3}, {4, 5, 6});
EXPECT_EQ(4 * 5 * 6, box.num_elements());
EXPECT_EQ(IndexInterval::UncheckedSized(1, 4), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 5), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 6), box[2]);
}
TEST(BoxTest, SubscriptAssignment) {
Box<> box(2);
box[1] = IndexInterval::UncheckedSized(1, 5);
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, 1));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, 5));
}
TEST(BoxTest, Fill) {
Box<> box(2);
box.Fill(IndexInterval::UncheckedSized(1, 5));
EXPECT_THAT(box.origin(), ElementsAre(1, 1));
EXPECT_THAT(box.shape(), ElementsAre(5, 5));
}
TEST(BoxTest, IsEmpty) {
Box<> box(3);
EXPECT_FALSE(box.is_empty());
box.Fill(IndexInterval::UncheckedSized(0, 0));
EXPECT_TRUE(box.is_empty());
}
TEST(BoxViewTest, StaticRankDefaultConstruct) {
BoxView<3> box;
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxViewTest, DynamicRankDefaultConstruct) {
BoxView<> box;
EXPECT_EQ(0, box.rank());
}
TEST(BoxViewTest, DynamicRankSizeConstruct) {
BoxView<> box(3);
EXPECT_EQ(3, box.rank());
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(BoxViewTest, DynamicRankSpanConstruct) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box{tensorstore::span(origin), tensorstore::span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_EQ(&origin[0], box.origin().data());
EXPECT_EQ(&shape[0], box.shape().data());
}
TEST(BoxViewTest, DeduceFromShapeArray) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(shape);
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromShapeSpanStatic) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromShapeSpanDynamic) {
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), BoxView<>>);
EXPECT_THAT(box.origin(), ElementsAre(0, 0, 0));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeArrays) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(origin, shape);
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeSpansStatic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span(origin), tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), BoxView<3>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromOriginAndShapeDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span<const Index>(origin),
tensorstore::span<const Index>(shape));
static_assert(std::is_same_v<decltype(box), BoxView<>>);
EXPECT_THAT(box.origin(), ElementsAre(1, 2, 3));
EXPECT_THAT(box.shape(), ElementsAre(3, 4, 5));
}
TEST(BoxViewTest, DeduceFromBoxView) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
const Box<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, Subscript) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
EXPECT_EQ(IndexInterval::UncheckedSized(1, 3), box[0]);
EXPECT_EQ(IndexInterval::UncheckedSized(2, 4), box[1]);
EXPECT_EQ(IndexInterval::UncheckedSized(3, 5), box[2]);
}
TEST(BoxViewTest, NumElements) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
EXPECT_EQ(3 * 4 * 5, box.num_elements());
}
TEST(BoxViewTest, StaticToDynamicConversion) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<> dynamic_box = box;
EXPECT_EQ(3, dynamic_box.rank());
EXPECT_THAT(dynamic_box.shape(), ElementsAreArray(shape));
EXPECT_THAT(dynamic_box.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DefaultAssignment) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<3> box2;
box2 = box;
EXPECT_EQ(3, box2.rank());
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, DefaultAssignmentStaticToDynamic) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<3> box(origin, shape);
BoxView<> box2;
box2 = box;
EXPECT_EQ(3, box2.rank());
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, StaticRankCast) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
BoxView<> box(origin, shape);
auto box2 = StaticRankCast<3, unchecked>(box);
EXPECT_THAT(
StaticRankCast<2>(box),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast box with rank of 3 to box with rank of 2"));
static_assert(std::is_same_v<decltype(box2), BoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxViewTest, ConstructFromDynamicBox) {
Box<> box({1, 2}, {3, 4});
BoxView<> box_view = box;
EXPECT_EQ(2, box_view.rank());
EXPECT_EQ(box.shape().data(), box_view.shape().data());
EXPECT_EQ(box.origin().data(), box_view.origin().data());
}
TEST(BoxViewTest, ConstructFromStaticBox) {
Box<2> box({1, 2}, {3, 4});
BoxView<> box_view = box;
EXPECT_EQ(2, box_view.rank());
EXPECT_EQ(box.shape().data(), box_view.shape().data());
EXPECT_EQ(box.origin().data(), box_view.origin().data());
}
TEST(MutableBoxViewTest, RankPointersConstruct) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(3, origin, shape);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DynamicRankSpanConstruct) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<> box{tensorstore::span(origin), tensorstore::span(shape)};
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeArrays) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(origin, shape);
static_assert(std::is_same_v<decltype(box), MutableBoxView<3>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeSpansStatic) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span(origin), tensorstore::span(shape));
static_assert(std::is_same_v<decltype(box), MutableBoxView<3>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromOriginAndShapeDynamic) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
auto box = BoxView(tensorstore::span<Index>(origin),
tensorstore::span<Index>(shape));
static_assert(std::is_same_v<decltype(box), MutableBoxView<>>);
EXPECT_EQ(3, box.rank());
EXPECT_EQ(box.origin().data(), origin);
EXPECT_EQ(box.shape().data(), shape);
}
TEST(MutableBoxViewTest, DeduceFromBox) {
const Index origin[] = {1, 2, 3};
const Index shape[] = {3, 4, 5};
Box<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_EQ(box2.shape().data(), box.shape().data());
EXPECT_EQ(box2.origin().data(), box.origin().data());
}
TEST(MutableBoxViewTest, DeduceFromMutableBoxView) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<3> box(origin, shape);
auto box2 = BoxView(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_EQ(box2.shape().data(), box.shape().data());
EXPECT_EQ(box2.origin().data(), box.origin().data());
}
TEST(MutableBoxViewTest, AssignFromBoxView) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
const Index origin2[] = {10, 20, 30};
const Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(BoxView(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, AssignFromBox) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
const Index origin2[] = {10, 20, 30};
const Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(Box(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, CopyAssign) {
Index origin1[] = {1, 2, 3};
Index shape1[] = {4, 5, 6};
Index origin2[] = {10, 20, 30};
Index shape2[] = {40, 50, 60};
MutableBoxView<> box(origin1, shape1);
box.DeepAssign(MutableBoxView<>(origin2, shape2));
EXPECT_EQ(3, box.rank());
EXPECT_THAT(origin1, ElementsAreArray(origin2));
EXPECT_THAT(shape1, ElementsAreArray(shape2));
}
TEST(MutableBoxViewTest, SubscriptAssignment) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(origin, shape);
box[1] = IndexInterval::UncheckedSized(1, 7);
EXPECT_THAT(origin, ElementsAre(1, 1, 3));
EXPECT_THAT(shape, ElementsAre(4, 7, 6));
}
TEST(MutableBoxViewTest, Fill) {
Index origin[] = {1, 2, 3};
Index shape[] = {4, 5, 6};
MutableBoxView<> box(origin, shape);
box.Fill(IndexInterval::UncheckedSized(1, 5));
EXPECT_THAT(box.origin(), ElementsAre(1, 1, 1));
EXPECT_THAT(box.shape(), ElementsAre(5, 5, 5));
box.Fill();
EXPECT_THAT(box.origin(), ElementsAre(-kInfIndex, -kInfIndex, -kInfIndex));
EXPECT_THAT(box.shape(), ElementsAre(kInfSize, kInfSize, kInfSize));
}
TEST(MutableBoxViewTest, StaticRankCast) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
MutableBoxView<> box(origin, shape);
auto box2 = StaticRankCast<3, unchecked>(box);
static_assert(std::is_same_v<decltype(box2), MutableBoxView<3>>);
EXPECT_THAT(box2.shape(), ElementsAreArray(shape));
EXPECT_THAT(box2.origin(), ElementsAreArray(origin));
}
TEST(BoxTest, Comparison) {
const Index origin1[] = {1, 2, 3};
const Index shape1[] = {4, 5, 6};
const Index origin2[] = {1, 2, 3};
const Index shape2[] = {4, 5, 6};
const Index origin3[] = {1, 2, 4};
const Index shape3[] = {4, 5, 7};
const Index origin4[] = {1, 2};
const Index shape4[] = {4, 5};
BoxView<> view1(origin1, shape1);
Box<> box1(view1);
BoxView<> view2(origin2, shape2);
Box<> box2(view2);
BoxView<> view3(origin3, shape3);
Box<> box3(view3);
BoxView<> view4(origin4, shape4);
Box<> box4(view4);
EXPECT_EQ(box1, view1);
EXPECT_EQ(box2, view2);
EXPECT_EQ(box3, view3);
EXPECT_EQ(box4, view4);
EXPECT_EQ(view1, view2);
EXPECT_EQ(view1, box2);
EXPECT_EQ(box1, view2);
EXPECT_EQ(box1, box2);
EXPECT_NE(view1, view3);
EXPECT_NE(view1, box3);
EXPECT_NE(box1, view3);
EXPECT_NE(box1, box3);
EXPECT_NE(view1, view4);
EXPECT_NE(view1, box4);
EXPECT_NE(box1, view4);
EXPECT_NE(box1, box4);
}
TEST(BoxTest, Print) {
Index origin[] = {1, 2, 3};
Index shape[] = {3, 4, 5};
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(BoxView<>(origin, shape)));
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(Box<>(origin, shape)));
EXPECT_EQ("{origin={1, 2, 3}, shape={3, 4, 5}}",
tensorstore::StrCat(MutableBoxView<>(origin, shape)));
}
TEST(BoxTest, Contains) {
const Index origin1[] = {1, 2};
const Index shape1[] = {4, 5};
const Index origin2[] = {2, 2};
const Index shape2[] = {3, 5};
const Index origin3[] = {1, 2};
const Index shape3[] = {4, 6};
const Index origin4[] = {1};
const Index shape4[] = {4};
const Index indices1[] = {2, 3};
const Index indices2[] = {0, 3};
const Index indices3[] = {0};
Index indices4[] = {2};
auto span1 = tensorstore::span(indices1);
auto span2 = tensorstore::span(indices2);
auto span3 = tensorstore::span(indices3);
auto span4 = tensorstore::span(indices4);
BoxView<> view1(origin1, shape1);
BoxView<> view2(origin2, shape2);
BoxView<> view3(origin3, shape3);
BoxView<> view4(origin4, shape4);
Box<> box1(origin1, shape1);
Box<> box2(origin2, shape2);
Box<> box3(origin3, shape3);
Box<> box4(origin4, shape4);
EXPECT_TRUE(Contains(view1, indices1));
EXPECT_TRUE(ContainsPartial(view1, indices1));
EXPECT_TRUE(ContainsPartial(view1, indices4));
EXPECT_FALSE(Contains(view1, indices2));
EXPECT_FALSE(Contains(view1, indices3));
EXPECT_FALSE(ContainsPartial(view1, indices2));
EXPECT_FALSE(ContainsPartial(view1, indices3));
EXPECT_TRUE(Contains(view1, span1));
EXPECT_TRUE(ContainsPartial(view1, span1));
EXPECT_FALSE(Contains(view1, span2));
EXPECT_FALSE(ContainsPartial(view1, span2));
EXPECT_FALSE(Contains(view1, span3));
EXPECT_FALSE(ContainsPartial(view1, span3));
EXPECT_TRUE(ContainsPartial(view1, span4));
EXPECT_TRUE(Contains(box1, indices1));
EXPECT_TRUE(ContainsPartial(box1, indices1));
EXPECT_FALSE(Contains(box1, indices2));
EXPECT_FALSE(Contains(box1, indices3));
EXPECT_TRUE(Contains(box1, span1));
EXPECT_FALSE(Contains(box1, span2));
EXPECT_FALSE(Contains(box1, span3));
EXPECT_TRUE(Contains(view1, view2));
EXPECT_FALSE(Contains(view1, view3));
EXPECT_FALSE(Contains(view1, view4));
EXPECT_TRUE(Contains(view1, box2));
EXPECT_FALSE(Contains(view1, box3));
EXPECT_FALSE(Contains(view1, box4));
EXPECT_TRUE(Contains(box1, view2));
EXPECT_FALSE(Contains(box1, view3));
EXPECT_FALSE(Contains(box1, view4));
EXPECT_TRUE(Contains(box1, box2));
EXPECT_FALSE(Contains(box1, box3));
EXPECT_FALSE(Contains(box1, box4));
}
TEST(BoxTest, GetBoxDomainOf) {
static_assert(!HasBoxDomain<int>);
static_assert(HasBoxDomain<BoxView<>>);
static_assert(HasBoxDomain<Box<>>);
static_assert(HasBoxDomain<MutableBoxView<>>);
Box<> box({1, 2}, {3, 4});
BoxView<> view = box;
EXPECT_EQ(box, GetBoxDomainOf(box));
EXPECT_EQ(box, GetBoxDomainOf(view));
}
TEST(BoxTest, InlineSize) {
Box<dynamic_rank(2)> box({1, 2}, {3, 4});
BoxView<dynamic_rank> v = box;
EXPECT_EQ(v, box);
MutableBoxView<dynamic_rank> v2 = box;
EXPECT_EQ(v2, box);
}
TEST(BoxTest, DeductionGuides) {
auto box = Box({1, 2}, {3, 4});
static_assert(std::is_same_v<decltype(box), Box<2>>);
static_assert(std::is_same_v<decltype(BoxView({1, 2}, {3, 4})), BoxView<2>>);
static_assert(decltype(box)::static_rank == 2);
auto box_view = BoxView(box);
static_assert(std::is_same_v<decltype(box_view), MutableBoxView<2>>);
}
TEST(BoxTest, IsFinite) {
EXPECT_TRUE(IsFinite(Box<>()));
EXPECT_TRUE(IsFinite(BoxView<>()));
EXPECT_FALSE(IsFinite(Box<>(1)));
EXPECT_FALSE(IsFinite(Box<1>()));
EXPECT_FALSE(IsFinite(BoxView<>(1)));
EXPECT_FALSE(IsFinite(BoxView<>(2)));
EXPECT_FALSE(IsFinite(BoxView<2>()));
EXPECT_TRUE(IsFinite(Box<3>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(BoxView<3>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(Box<>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(BoxView<>({1, 2, 3}, {4, 5, 6})));
EXPECT_TRUE(IsFinite(Box<1>({1}, {4})));
EXPECT_FALSE(IsFinite(Box<3>({1, -kInfIndex, 3}, {4, 5, 6})));
EXPECT_FALSE(IsFinite(Box<3>({1, kInfIndex - 5, 3}, {4, 6, 6})));
}
TEST(BoxSerializationTest, StaticRank) {
TestSerializationRoundTrip(Box<0>());
TestSerializationRoundTrip(Box<3>({1, 2, 3}, {4, 5, 6}));
}
TEST(BoxSerializationTest, DynamicRank) {
TestSerializationRoundTrip(Box<>());
TestSerializationRoundTrip(Box({1, 2, 3}, {4, 5, 6}));
}
TEST(BoxTest, SubBoxView) {
Box<> b({1, 2, 3}, {4, 5, 6});
const Box<>& b_const = b;
BoxView<> b_view = b;
MutableBoxView<> b_mut_view = b;
EXPECT_EQ(Box<>({2, 3}, {5, 6}), SubBoxView(b, 1));
EXPECT_EQ(Box<>({2}, {5}), SubBoxView(b, 1, 2));
static_assert(std::is_same_v<decltype(SubBoxView(b, 1)), MutableBoxView<>>);
static_assert(std::is_same_v<decltype(SubBoxView(b_const, 1)), BoxView<>>);
static_assert(std::is_same_v<decltype(SubBoxView(b_view, 1)), BoxView<>>);
static_assert(
std::is_same_v<decltype(SubBoxView(b_mut_view, 1)), MutableBoxView<>>);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/box.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/box_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6a00ebe9-a063-4714-bc93-24ea5bf8ed70 | cpp | tensorflow/tensorflow | fusion_constant_sinking | third_party/xla/xla/service/fusion_constant_sinking.cc | third_party/xla/xla/service/fusion_constant_sinking_test.cc | #include "xla/service/fusion_constant_sinking.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
bool CanSink(HloInstruction* fusion, const HloInstruction* operand) {
if (!fusion->IsLoopFusion() && !fusion->IsOutputFusion()) {
return false;
}
if (fusion->operand_count() == 1) {
return false;
}
if (!ShapeUtil::IsScalar(operand->shape()) || !operand->IsConstant()) {
return false;
}
int64_t operand_idx = fusion->operand_index(operand);
HloInstruction* fused_param = fusion->fused_parameter(operand_idx);
for (HloInstruction* user : fused_param->users()) {
if (user->opcode() == HloOpcode::kFusion && user->operand_count() == 1) {
return false;
}
}
return true;
}
bool ProcessScalar(HloInstruction* scalar) {
if (!ShapeUtil::IsScalar(scalar->shape()) || !scalar->IsConstant()) {
return false;
}
bool processed = false;
std::vector<HloInstruction*> sinkable_users;
for (HloInstruction* use : scalar->users()) {
if (CanSink(use, scalar)) {
sinkable_users.push_back(use);
}
}
for (HloInstruction* use : sinkable_users) {
HloInstruction* fused_scalar = use->FuseInstruction(scalar);
processed = true;
ProcessScalar(fused_scalar);
}
return processed;
}
absl::StatusOr<bool> FusionConstantSinking::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(3) << "HLO module before FusionConstantSinking:";
XLA_VLOG_LINES(3, module->ToString());
bool changed = false;
for (HloComputation* c : module->MakeNonfusionComputations()) {
for (HloInstruction* i : c->MakeInstructionPostOrder()) {
changed |= ProcessScalar(i);
}
}
if (changed) {
TF_ASSIGN_OR_RETURN(bool dce, HloDCE{}.Run(module, execution_threads));
changed |= dce;
}
VLOG(3) << "HLO module after FusionConstantSinking:";
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
} | #include "xla/service/fusion_constant_sinking.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using FusionConstantSinkingTest = HloTestBase;
TEST_F(FusionConstantSinkingTest, SinkConstant) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.slice (param_0.51117: s8[56,4096,4096], param_1: s32[]) -> s8[1,4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%constant.85694 = s32[]{:T(128)} constant(0)
ROOT %dynamic-slice.22040 = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} dynamic-slice(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117, s32[]{:T(128)} p1, s32[]{:T(128)} %constant.85694, s32[]{:T(128)} %constant.85694), dynamic_slice_sizes={1,4096,4096}
}
ENTRY main {
p0 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c), kind=kLoop, calls=%fused_computation.slice
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(
module->GetComputationWithName("fused_computation.slice")
->root_instruction(),
GmockMatch(match::DynamicSlice(match::Parameter(0), match::Constant(),
match::Constant(), match::Constant())));
}
TEST_F(FusionConstantSinkingTest, SingleOperandFusionNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation (param_1: s8[]) -> s8[1,4096,4096] {
param0 = s8[] parameter(0)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} broadcast(param0), dimensions={}
}
ENTRY main {
c = s8[]{:T(128)} constant(10)
ROOT out = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[]{:T(128)} c), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, SingleOperandUserNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.inner (param_1: s32[]) -> s32[] {
p1 = s32[]{:T(128)} parameter(0)
%constant.85694 = s32[]{:T(128)} constant(10)
ROOT out = s32[] add(p1, %constant.85694)
}
%fused_computation (param_0.51117: s32[4096,4096], param_1:
s32[]) -> s32[4096,4096] {
%param_0.51117 = s32[4096,4096]{1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%inner.fusion = s32[] fusion(s32[]{:T(128)} p1), kind=kLoop, calls=%fused_computation.inner
%broadcast = s32[4096,4096]{1,0:T(8,128)(4,1)} broadcast(%inner.fusion), dimensions={}
ROOT out = s32[4096,4096] add(%broadcast, %param_0.51117)
}
ENTRY main {
p0 = s32[4096,4096]{1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s32[4096,4096]{1,0:T(8,128)(4,1)}
fusion(s32[4096,4096]{1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, NonScalarNoSink) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation (param_1: s8[2], p1: s8[2,4096,4096]) -> s8[2,4096,4096] {
param0 = s8[2] parameter(0)
param1 = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(1)
bcast = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} broadcast(param0), dimensions={0}
ROOT out = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} add(param1, bcast)
}
ENTRY main {
p = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s8[2]{0:T(128)} constant({10,20})
ROOT out = s8[2,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[2]{0:T(128)} c, p), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_FALSE(result);
}
TEST_F(FusionConstantSinkingTest, SinkConstantNested) {
std::string hlo_string = R"(
HloModule SimpleLoop
%fused_computation.inner (param_0.51117: s8[56,4096,4096], param_1:
s32[]) -> s8[1,4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%constant.85694 = s32[]{:T(128)} constant(0)
ROOT %dynamic-slice.22040 = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)}
dynamic-slice(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117,
s32[]{:T(128)} p1, s32[]{:T(128)} %constant.85694, s32[]{:T(128)}
%constant.85694), dynamic_slice_sizes={1,4096,4096}
}
%fused_computation (param_0.51117: s8[56,4096,4096], param_1:
s32[]) -> s8[4096,4096] {
%param_0.51117 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
p1 = s32[]{:T(128)} parameter(1)
%inner.fusion = s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} %param_0.51117, s32[]{:T(128)} p1), kind=kLoop, calls=%fused_computation.inner
ROOT %bitcast = s8[4096,4096]{1,0:T(8,128)(4,1)} bitcast(s8[1,4096,4096]{2,1,0:T(8,128)(4,1)} %inner.fusion)
}
ENTRY main {
p0 = s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} parameter(0)
c = s32[]{:T(128)} constant(10)
ROOT out = s8[4096,4096]{1,0:T(8,128)(4,1)}
fusion(s8[56,4096,4096]{2,1,0:T(8,128)(4,1)} p0, s32[]{:T(128)} c),
kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
FusionConstantSinking constant_sinking;
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&constant_sinking, module.get()));
EXPECT_TRUE(result);
EXPECT_THAT(
module->GetComputationWithName("fused_computation")->num_parameters(), 1);
EXPECT_THAT(module->GetComputationWithName("fused_computation.inner")
->num_parameters(),
1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_constant_sinking.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_constant_sinking_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
912ad902-faee-4d7f-9487-783e291566d2 | cpp | tensorflow/tensorflow | const_analysis | tensorflow/compiler/tf2xla/const_analysis.cc | tensorflow/compiler/tf2xla/const_analysis_test.cc | #include "tensorflow/compiler/tf2xla/const_analysis.h"
#include <unordered_map>
#include <unordered_set>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status GetFunctionBody(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_attr_name,
const FunctionBody** fbody) {
NameAttrList name_attr_list;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_attr_name, &name_attr_list));
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()), &func_handle));
*fbody = flib_runtime->GetFunctionBody(func_handle);
return absl::OkStatus();
}
Status GetFunctionBodies(FunctionLibraryRuntime* flib_runtime,
const NodeDef& node, StringPiece func_list_attr_name,
std::vector<const FunctionBody*>* fbodies) {
std::vector<NameAttrList> name_attr_lists;
TF_RETURN_IF_ERROR(GetNodeAttr(node, func_list_attr_name, &name_attr_lists));
for (const NameAttrList& name_attr_list : name_attr_lists) {
FunctionLibraryRuntime::Handle func_handle;
TF_RETURN_IF_ERROR(flib_runtime->Instantiate(
name_attr_list.name(), AttrSlice(&name_attr_list.attr()),
&func_handle));
fbodies->push_back(flib_runtime->GetFunctionBody(func_handle));
}
return absl::OkStatus();
}
Status CondConstInputIndices(
absl::Span<const FunctionBody* const> branch_bodies,
std::vector<int>* const_input_idxs, FunctionLibraryRuntime* flib_runtime) {
TF_RET_CHECK(!branch_bodies.empty());
TF_RET_CHECK(branch_bodies[0] != nullptr);
int num_inputs =
branch_bodies[0]->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
for (auto fbody : branch_bodies) {
TF_RET_CHECK(fbody != nullptr);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
}
for (int i = 0, end = compile_time_const_arg_indices.size(); i < end; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i + 1);
}
}
return absl::OkStatus();
}
Status GetCompileTimeConstInputs(const NodeDef& node, const OpKernel* op_kernel,
const OpDef* op_def,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
DCHECK(op_def != nullptr || op_kernel != nullptr);
if (node.op() == "While" || node.op() == "StatelessWhile") {
const FunctionBody* fcond = nullptr;
const FunctionBody* fbody = nullptr;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "cond", &fcond));
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "body", &fbody));
TF_RET_CHECK(fcond);
TF_RET_CHECK(fbody);
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fcond->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
TF_ASSIGN_OR_RETURN(
bool is_loop_invariant,
IsLoopInvariant(fbody, i,
flib_runtime->GetFunctionLibraryDefinition()));
if (is_loop_invariant) {
const_input_idxs->push_back(i);
} else {
Node* arg_i = fbody->arg_nodes[i];
Node* ret_i = fbody->ret_nodes[i];
VLOG(1) << "Argument " << i << " to while-loop " << node.name()
<< " has to be constant, but it's not a loop invariant, "
"cluster compilation likely to fail at compile time: "
<< arg_i->DebugString() << " vs. " << ret_i->DebugString();
VLOG(1) << node.ShortDebugString();
}
}
}
return absl::OkStatus();
} else if (node.op() == "If" || node.op() == "StatelessIf") {
const FunctionBody* fthen = nullptr;
const FunctionBody* felse = nullptr;
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "then_branch", &fthen));
TF_RETURN_IF_ERROR(
GetFunctionBody(flib_runtime, node, "else_branch", &felse));
return CondConstInputIndices({fthen, felse}, const_input_idxs,
flib_runtime);
} else if (node.op() == "Case" || node.op() == "StatelessCase") {
std::vector<const FunctionBody*> branch_bodies;
TF_RETURN_IF_ERROR(
GetFunctionBodies(flib_runtime, node, "branches", &branch_bodies));
return CondConstInputIndices(branch_bodies, const_input_idxs, flib_runtime);
} else if (node.op() == "PartitionedCall" ||
node.op() == "StatefulPartitionedCall") {
const FunctionBody* fbody;
TF_RETURN_IF_ERROR(GetFunctionBody(flib_runtime, node, "f", &fbody));
int num_inputs = fbody->record->fdef().signature().input_arg_size();
std::vector<bool> compile_time_const_arg_indices(num_inputs);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*(fbody->graph), &compile_time_const_arg_indices,
nullptr, flib_runtime));
for (int i = 0; i < num_inputs; i++) {
if (compile_time_const_arg_indices[i]) {
const_input_idxs->push_back(i);
}
}
return absl::OkStatus();
} else if (op_def != nullptr) {
return XlaOpRegistry::CompileTimeConstantInputs(node, *op_def,
const_input_idxs);
} else {
return XlaOpRegistry::CompileTimeConstantInputs(*op_kernel,
const_input_idxs);
}
}
Status GetCompileTimeConstInputs(const Node* node,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(node->def(), nullptr,
&node->op_def(), const_input_idxs,
flib_runtime);
}
}
Status BackwardsConstAnalysis(
const Graph& g, std::vector<bool>* compile_time_const_arg_indices,
std::vector<bool>* compile_time_const_nodes,
FunctionLibraryRuntime* flib_runtime,
std::function<bool(const Edge&)> edge_filter_input) {
if (!compile_time_const_nodes && g.GetConstArgIndicesCache().has_value() &&
!edge_filter_input) {
VLOG(5) << "Using cached argument indices on graph " << &g;
*compile_time_const_arg_indices = g.GetConstArgIndicesCache().value();
return absl::OkStatus();
}
auto edge_filter = [&](const Edge& e) {
return edge_filter_input ? edge_filter_input(e) : true;
};
std::vector<bool> compile_time_const_nodes_impl;
if (compile_time_const_nodes) {
CHECK_EQ(compile_time_const_nodes->size(), g.num_node_ids());
} else {
compile_time_const_nodes_impl.resize(g.num_node_ids());
compile_time_const_nodes = &compile_time_const_nodes_impl;
}
Status status;
auto visit = [&](Node* node) {
if (!status.ok()) return;
if (XlaOpRegistry::IsMetadataOp(node->type_string())) {
VLOG(3) << "must-be-const node is metadata op: " << node->name();
return;
}
if ((*compile_time_const_nodes)[node->id()]) {
VLOG(3) << "marking consts for must-be-const node " << node->name();
if (node->type_string() == "_Arg") {
int index;
status = GetNodeAttr(node->attrs(), "index", &index);
if (!status.ok()) return;
if (compile_time_const_arg_indices) {
(*compile_time_const_arg_indices)[index] = true;
}
VLOG(3) << " const _Arg " << index << ": " << node->name();
return;
}
for (const Edge* pred : node->in_edges()) {
if (!pred->IsControlEdge() && edge_filter(*pred)) {
while (edge_filter(*pred) && IsConstTraversableOpType(pred->src())) {
status = pred->src()->input_edge(pred->src_output(), &pred);
if (!status.ok()) return;
}
if (edge_filter(*pred)) {
VLOG(4) << " " << pred->src()->name() << " must be const (is "
<< pred->src()->type_string() << ")";
(*compile_time_const_nodes)[pred->src()->id()] = true;
}
}
}
return;
}
std::vector<int> const_input_idxs;
status = GetCompileTimeConstInputs(node, &const_input_idxs, flib_runtime);
if (!status.ok() || const_input_idxs.empty()) {
return;
}
VLOG(3) << "marking consts for must-be-const inputs of " << node->name();
for (Edge const* edge : node->in_edges()) {
if (!edge->IsControlEdge() &&
absl::c_binary_search(const_input_idxs, edge->dst_input()) &&
edge_filter(*edge)) {
while (edge_filter(*edge) && IsConstTraversableOpType(edge->src())) {
status = edge->src()->input_edge(edge->src_output(), &edge);
if (!status.ok()) return;
}
if (edge_filter(*edge)) {
VLOG(4) << " input " << edge->dst_input() << ": "
<< edge->src()->name() << " must be const (is "
<< edge->src()->type_string() << ")";
(*compile_time_const_nodes)[edge->src()->id()] = true;
}
}
}
};
DFS(g, {}, visit, NodeComparatorName{},
[](const Edge& edge) { return !edge.src()->IsNextIteration(); });
if (compile_time_const_arg_indices && !edge_filter_input) {
VLOG(5) << "Setting the cache on the graph: " << &g;
g.GetConstArgIndicesCache() = *compile_time_const_arg_indices;
}
return status;
}
Status GetCompileTimeConstInputs(const OpKernel* op_kernel,
std::vector<int>* const_input_idxs,
FunctionLibraryRuntime* flib_runtime) {
return GetCompileTimeConstInputs(op_kernel->def(), op_kernel,
nullptr, const_input_idxs,
flib_runtime);
}
} | #include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
TEST(ConstAnalysisTest, Basics) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto arg3 = ops::_Arg(root.WithOpName("Arg3"), DT_INT32, 3);
auto a = ops::Shape(root, arg0);
auto b = ops::Add(root, a, arg1);
auto c = ops::Reshape(root, arg2, b);
auto d = ops::Mul(root, c, ops::Sum(root, arg3, arg3));
FixupSourceAndSinkEdges(root.graph());
std::vector<bool> const_args(4, false);
std::vector<bool> const_nodes(root.graph()->num_node_ids(), false);
TF_ASSERT_OK(BackwardsConstAnalysis(*root.graph(), &const_args, &const_nodes,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true, false, true}));
EXPECT_FALSE(const_nodes[arg0.node()->id()]);
EXPECT_TRUE(const_nodes[arg1.node()->id()]);
EXPECT_FALSE(const_nodes[arg2.node()->id()]);
EXPECT_TRUE(const_nodes[arg3.node()->id()]);
}
TEST(ConstAnalysisTest, TopologicalOrder) {
for (bool order : {false, true}) {
Scope root = Scope::NewRootScope();
auto arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
auto arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
auto arg2 = ops::_Arg(root.WithOpName("Arg2"), DT_INT32, 2);
auto a = ops::Reshape(root, arg0, arg1);
auto b = ops::Reshape(root, arg2, a);
if (order) {
std::swap(a, b);
}
auto c = ops::Add(root, a, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(3, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true, true, false}));
}
}
void TestFunctionCall(bool is_stateful_partitioned_call) {
FunctionDef callee = FunctionDefHelper::Define(
"Callee", {"t:float", "shape:int32"}, {"result:float"}, {},
{{{"result"}, "Reshape", {"t", "shape"}, {{"T", DT_FLOAT}}}});
FunctionDefLibrary flib;
*flib.add_function() = callee;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Scope root = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(root.WithOpName("tensor"), DT_FLOAT, 0);
auto arg1 = ops::_Arg(root.WithOpName("shape"), DT_INT32, 1);
NameAttrList call_attrs;
call_attrs.set_name("Callee");
if (is_stateful_partitioned_call) {
ops::StatefulPartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
} else {
ops::PartitionedCall b(root.WithOpName("Call"),
{Output(arg0), Output(arg1)}, {DT_FLOAT},
call_attrs);
}
Graph graph(&flib_def);
TF_ASSERT_OK(root.ToGraph(&graph));
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(nullptr, Env::Default(),
nullptr,
TF_GRAPH_DEF_VERSION, &flib_def, opts));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
lib_runtime));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, PartitionedCall) {
TestFunctionCall(false);
}
TEST(ConstAnalysisTest, StatefulPartitionedCall) {
TestFunctionCall(true);
}
TEST(ConstAnalysisTest, DontFollowControlDependencies) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, true}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_0) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output arg1 = ops::_Arg(root.WithOpName("Arg1"), DT_INT32, 1);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg1, c1);
Output reshape = ops::Reshape(root, arg1, add);
reshape.node()->AddAttr(kXlaCompileTimeConstantInputsAttr,
std::vector<string>());
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(2, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({false, false}));
}
TEST(ConstAnalysisTest, RespectExplicitAttr_1) {
Scope root = Scope::NewRootScope();
Output arg0 = ops::_Arg(root.WithOpName("Arg0"), DT_INT32, 0);
Output c1 =
ops::Const(root.WithOpName("c1").WithControlDependencies(arg0), 1, {1});
Output add = ops::Add(root, arg0, c1);
std::vector<string> add_constant_inputs;
add_constant_inputs.push_back("x");
add.node()->AddAttr(kXlaCompileTimeConstantInputsAttr, add_constant_inputs);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph));
std::vector<bool> const_args(1, false);
TF_ASSERT_OK(BackwardsConstAnalysis(graph, &const_args,
nullptr,
nullptr));
EXPECT_EQ(const_args, std::vector<bool>({true}));
}
static bool Initialized = [] {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
return true;
}();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/const_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/const_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
29b5e76f-177d-4976-9b2a-49a08a665656 | cpp | google/tensorstore | byte_range | tensorstore/kvstore/byte_range.cc | tensorstore/kvstore/byte_range_test.cc | #include "tensorstore/kvstore/byte_range.h"
#include <cassert>
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_optional.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
std::ostream& operator<<(std::ostream& os, const OptionalByteRangeRequest& r) {
os << "[" << r.inclusive_min << ", ";
if (r.exclusive_max != -1) {
os << r.exclusive_max;
} else {
os << "?";
}
os << ")";
return os;
}
std::ostream& operator<<(std::ostream& os, const ByteRange& r) {
return os << "[" << r.inclusive_min << ", " << r.exclusive_max << ")";
}
Result<ByteRange> OptionalByteRangeRequest::Validate(int64_t size) const {
assert(SatisfiesInvariants());
int64_t inclusive_min = this->inclusive_min;
int64_t exclusive_max = this->exclusive_max;
if (exclusive_max == -1) exclusive_max = size;
if (inclusive_min < 0) {
inclusive_min += size;
}
if (inclusive_min < 0 || exclusive_max > size ||
inclusive_min > exclusive_max) {
return absl::OutOfRangeError(
tensorstore::StrCat("Requested byte range ", *this,
" is not valid for value of size ", size));
}
return ByteRange{inclusive_min, exclusive_max};
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::ByteRange, tensorstore::serialization::ApplyMembersSerializer<
tensorstore::ByteRange>())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::OptionalByteRangeRequest,
tensorstore::serialization::ApplyMembersSerializer<
tensorstore::OptionalByteRangeRequest>()) | #include "tensorstore/kvstore/byte_range.h"
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::ByteRange;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::StrCat;
using ::tensorstore::internal::GetSubCord;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(ByteRangeTest, SatisfiesInvariants) {
EXPECT_TRUE((ByteRange{0, 0}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{0, 1}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{0, 100}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{10, 100}).SatisfiesInvariants());
EXPECT_TRUE((ByteRange{100, 100}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{100, 99}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{100, 0}).SatisfiesInvariants());
EXPECT_FALSE((ByteRange{-100, 0}).SatisfiesInvariants());
}
TEST(ByteRangeTest, Size) {
EXPECT_EQ(5, (ByteRange{2, 7}.size()));
EXPECT_EQ(0, (ByteRange{2, 2}.size()));
}
TEST(ByteRangeTest, Comparison) {
ByteRange a{1, 2};
ByteRange b{1, 3};
ByteRange c{2, 3};
EXPECT_TRUE(a == a);
EXPECT_TRUE(b == b);
EXPECT_TRUE(c == c);
EXPECT_FALSE(a != a);
EXPECT_FALSE(b != b);
EXPECT_FALSE(c != c);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_NE(a, c);
EXPECT_NE(b, c);
}
TEST(ByteRangeTest, Ostream) {
EXPECT_EQ("[1, 10)", tensorstore::StrCat(ByteRange{1, 10}));
}
TEST(OptionalByteRangeRequestTest, DefaultConstruct) {
OptionalByteRangeRequest r;
EXPECT_EQ(0, r.inclusive_min);
EXPECT_EQ(-1, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructInclusiveMin) {
OptionalByteRangeRequest r(5);
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(-1, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructInclusiveMinExclusiveMax) {
OptionalByteRangeRequest r(5, 10);
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(10, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, ConstructByteRange) {
OptionalByteRangeRequest r(ByteRange{5, 10});
EXPECT_EQ(5, r.inclusive_min);
EXPECT_EQ(10, r.exclusive_max);
}
TEST(OptionalByteRangeRequestTest, Comparison) {
OptionalByteRangeRequest a{1, 2};
OptionalByteRangeRequest b{1, 3};
OptionalByteRangeRequest c{2, 3};
OptionalByteRangeRequest d{1, -1};
EXPECT_TRUE(a == a);
EXPECT_TRUE(b == b);
EXPECT_TRUE(c == c);
EXPECT_TRUE(d == d);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a == b);
EXPECT_TRUE(a != b);
EXPECT_TRUE(a != c);
EXPECT_TRUE(a != d);
EXPECT_TRUE(b != d);
EXPECT_TRUE(c != d);
}
TEST(OptionalByteRangeRequestTest, SatisfiesInvariants) {
EXPECT_TRUE(OptionalByteRangeRequest().SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(10).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 1).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 0).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(0, 100).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(10, 100).SatisfiesInvariants());
EXPECT_TRUE(OptionalByteRangeRequest(100, 100).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(100, 99).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(100, 0).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(-5, 0).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(-5, 3).SatisfiesInvariants());
EXPECT_FALSE(OptionalByteRangeRequest(3, -2).SatisfiesInvariants());
}
TEST(OptionalByteRangeRequestTest, Ostream) {
EXPECT_EQ("[5, 10)", StrCat(OptionalByteRangeRequest(5, 10)));
EXPECT_EQ("[5, ?)", StrCat(OptionalByteRangeRequest(5)));
}
TEST(OptionalByteRangeRequestTest, Validate) {
EXPECT_THAT(OptionalByteRangeRequest().Validate(0),
::testing::Optional(ByteRange{0, 0}));
EXPECT_THAT(OptionalByteRangeRequest().Validate(1),
::testing::Optional(ByteRange{0, 1}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(20),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(10),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5).Validate(10),
::testing::Optional(ByteRange{5, 10}));
EXPECT_THAT(OptionalByteRangeRequest(-3).Validate(10),
::testing::Optional(ByteRange{7, 10}));
EXPECT_THAT(OptionalByteRangeRequest(-10).Validate(10),
::testing::Optional(ByteRange{0, 10}));
EXPECT_THAT(OptionalByteRangeRequest(5, 10).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[5, 10\\) is not valid for "
"value of size 9"));
EXPECT_THAT(
OptionalByteRangeRequest(10, 15).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[10, 15\\) is not valid for "
"value of size 9"));
EXPECT_THAT(
OptionalByteRangeRequest(-10).Validate(9),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Requested byte range \\[-10, \\?\\) is not valid for "
"value of size 9"));
}
TEST(GetSubStringTest, Basic) {
EXPECT_EQ("bcd", GetSubCord(absl::Cord("abcde"), {1, 4}));
EXPECT_EQ("bcd", GetSubCord(absl::Cord("abcde"), {1, 4}));
EXPECT_EQ("abcde", GetSubCord(absl::Cord("abcde"), {0, 5}));
}
TEST(ByteRangeSerializationTest, Basic) {
TestSerializationRoundTrip(ByteRange{1, 5});
}
TEST(OptionalByteRangeRequestSerializationTest, Basic) {
TestSerializationRoundTrip(OptionalByteRangeRequest{1, 5});
TestSerializationRoundTrip(OptionalByteRangeRequest{1});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/byte_range.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/byte_range_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
74347d5a-f256-4f23-8442-5f6ae96ccda2 | cpp | tensorflow/tensorflow | rank | tensorflow/lite/kernels/rank.cc | tensorflow/lite/kernels/rank_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace rank {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteInt32;
SetTensorToPersistentRo(output);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(0);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size));
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0);
if (output->type == kTfLiteInt32) {
int32_t* output_data = GetTensorData<int32_t>(output);
*output_data = NumDimensions(input);
} else {
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RANK() {
static TfLiteRegistration r = {nullptr, nullptr, rank::Prepare, rank::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class RankOpModel : public SingleOpModel {
public:
RankOpModel(std::initializer_list<int> input_shape, TensorType input_type) {
TensorType output_type = TensorType_INT32;
input_ = AddInput(input_type);
output_ = AddOutput(output_type);
SetBuiltinOp(BuiltinOperator_RANK, BuiltinOptions_RankOptions,
CreateRankOptions(builder_).Union());
BuildInterpreter({input_shape});
}
TfLiteStatus InvokeWithResult() { return interpreter_->Invoke(); }
int input() { return input_; }
std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
TfLiteAllocationType GetOutputAllocationType() const {
return interpreter_->tensor(interpreter_->outputs()[0])->allocation_type;
}
private:
int input_;
int output_;
};
TEST(RankOpTest, InputTypeFloat) {
RankOpModel model({1, 3, 1, 3, 5}, TensorType_FLOAT32);
ASSERT_EQ(model.GetOutputAllocationType(), kTfLitePersistentRo);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_TRUE(model.GetOutputShape().empty());
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
TEST(RankOpTest, InputTypeInt) {
RankOpModel model({1, 3, 1, 3, 5}, TensorType_INT32);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({5}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
TEST(RankOpTest, ScalarTensor) {
RankOpModel model({}, TensorType_FLOAT32);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
TEST(RankOpTest, EmptyTensor) {
RankOpModel model({1, 0}, TensorType_FLOAT32);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({2}));
EXPECT_TRUE(model.GetOutputShape().empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rank.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rank_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b8e2996a-a6e1-486c-8094-9fb6020f1f32 | cpp | abseil/abseil-cpp | pcg_engine | absl/random/internal/pcg_engine.h | absl/random/internal/pcg_engine_test.cc | #ifndef ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_
#define ABSL_RANDOM_INTERNAL_PCG_ENGINE_H_
#include <type_traits>
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/iostream_state_saver.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
template <typename Params, typename Mix>
class pcg_engine {
static_assert(std::is_same<typename Params::state_type,
typename Mix::state_type>::value,
"Class-template absl::pcg_engine must be parameterized by "
"Params and Mix with identical state_type");
static_assert(std::is_unsigned<typename Mix::result_type>::value,
"Class-template absl::pcg_engine must be parameterized by "
"an unsigned Mix::result_type");
using params_type = Params;
using mix_type = Mix;
using state_type = typename Mix::state_type;
public:
using result_type = typename Mix::result_type;
static constexpr result_type(min)() {
return (std::numeric_limits<result_type>::min)();
}
static constexpr result_type(max)() {
return (std::numeric_limits<result_type>::max)();
}
explicit pcg_engine(uint64_t seed_value = 0) { seed(seed_value); }
template <class SeedSequence,
typename = typename absl::enable_if_t<
!std::is_same<SeedSequence, pcg_engine>::value>>
explicit pcg_engine(SeedSequence&& seq) {
seed(seq);
}
pcg_engine(const pcg_engine&) = default;
pcg_engine& operator=(const pcg_engine&) = default;
pcg_engine(pcg_engine&&) = default;
pcg_engine& operator=(pcg_engine&&) = default;
result_type operator()() {
state_ = lcg(state_);
return Mix{}(state_);
}
void seed(uint64_t seed_value = 0) {
state_type tmp = seed_value;
state_ = lcg(tmp + Params::increment());
}
template <class SeedSequence>
typename absl::enable_if_t<
!std::is_convertible<SeedSequence, uint64_t>::value, void>
seed(SeedSequence&& seq) {
reseed(seq);
}
void discard(uint64_t count) { state_ = advance(state_, count); }
bool operator==(const pcg_engine& other) const {
return state_ == other.state_;
}
bool operator!=(const pcg_engine& other) const { return !(*this == other); }
template <class CharT, class Traits>
friend typename absl::enable_if_t<(sizeof(state_type) == 16),
std::basic_ostream<CharT, Traits>&>
operator<<(
std::basic_ostream<CharT, Traits>& os,
const pcg_engine& engine) {
auto saver = random_internal::make_ostream_state_saver(os);
random_internal::stream_u128_helper<state_type> helper;
helper.write(pcg_engine::params_type::multiplier(), os);
os << os.fill();
helper.write(pcg_engine::params_type::increment(), os);
os << os.fill();
helper.write(engine.state_, os);
return os;
}
template <class CharT, class Traits>
friend typename absl::enable_if_t<(sizeof(state_type) <= 8),
std::basic_ostream<CharT, Traits>&>
operator<<(
std::basic_ostream<CharT, Traits>& os,
const pcg_engine& engine) {
auto saver = random_internal::make_ostream_state_saver(os);
os << pcg_engine::params_type::multiplier() << os.fill();
os << pcg_engine::params_type::increment() << os.fill();
os << engine.state_;
return os;
}
template <class CharT, class Traits>
friend typename absl::enable_if_t<(sizeof(state_type) == 16),
std::basic_istream<CharT, Traits>&>
operator>>(
std::basic_istream<CharT, Traits>& is,
pcg_engine& engine) {
random_internal::stream_u128_helper<state_type> helper;
auto mult = helper.read(is);
auto inc = helper.read(is);
auto tmp = helper.read(is);
if (mult != pcg_engine::params_type::multiplier() ||
inc != pcg_engine::params_type::increment()) {
is.setstate(is.rdstate() | std::ios_base::failbit);
}
if (!is.fail()) {
engine.state_ = tmp;
}
return is;
}
template <class CharT, class Traits>
friend typename absl::enable_if_t<(sizeof(state_type) <= 8),
std::basic_istream<CharT, Traits>&>
operator>>(
std::basic_istream<CharT, Traits>& is,
pcg_engine& engine) {
state_type mult{}, inc{}, tmp{};
is >> mult >> inc >> tmp;
if (mult != pcg_engine::params_type::multiplier() ||
inc != pcg_engine::params_type::increment()) {
is.setstate(is.rdstate() | std::ios_base::failbit);
}
if (!is.fail()) {
engine.state_ = tmp;
}
return is;
}
private:
state_type state_;
static inline constexpr state_type lcg(state_type s) {
return s * Params::multiplier() + Params::increment();
}
inline state_type advance(state_type s, uint64_t n) const {
state_type mult = Params::multiplier();
state_type inc = Params::increment();
state_type m = 1;
state_type i = 0;
while (n > 0) {
if (n & 1) {
m *= mult;
i = i * mult + inc;
}
inc = (mult + 1) * inc;
mult *= mult;
n >>= 1;
}
return m * s + i;
}
template <class SeedSequence>
void reseed(SeedSequence& seq) {
using sequence_result_type = typename SeedSequence::result_type;
constexpr size_t kBufferSize =
sizeof(state_type) / sizeof(sequence_result_type);
sequence_result_type buffer[kBufferSize];
seq.generate(std::begin(buffer), std::end(buffer));
state_type tmp = buffer[0];
for (size_t i = 1; i < kBufferSize; i++) {
tmp <<= (sizeof(sequence_result_type) * 8);
tmp |= buffer[i];
}
state_ = lcg(tmp + params_type::increment());
}
};
template <uint64_t kMultA, uint64_t kMultB, uint64_t kIncA, uint64_t kIncB>
class pcg128_params {
public:
using state_type = absl::uint128;
static inline constexpr state_type multiplier() {
return absl::MakeUint128(kMultA, kMultB);
}
static inline constexpr state_type increment() {
return absl::MakeUint128(kIncA, kIncB);
}
};
struct pcg_xsl_rr_128_64 {
using state_type = absl::uint128;
using result_type = uint64_t;
inline uint64_t operator()(state_type state) {
uint64_t rotate = static_cast<uint64_t>(state >> 122u);
state ^= state >> 64;
uint64_t s = static_cast<uint64_t>(state);
return rotr(s, static_cast<int>(rotate));
}
};
template <uint64_t kMult, uint64_t kInc>
class pcg64_params {
public:
using state_type = uint64_t;
static inline constexpr state_type multiplier() { return kMult; }
static inline constexpr state_type increment() { return kInc; }
};
struct pcg_xsh_rr_64_32 {
using state_type = uint64_t;
using result_type = uint32_t;
inline uint32_t operator()(uint64_t state) {
return rotr(static_cast<uint32_t>(((state >> 18) ^ state) >> 27),
state >> 59);
}
};
using pcg64_2018_engine = pcg_engine<
random_internal::pcg128_params<0x2360ed051fc65da4ull, 0x4385df649fccf645ull,
0x5851f42d4c957f2d, 0x14057b7ef767814f>,
random_internal::pcg_xsl_rr_128_64>;
using pcg32_2018_engine = pcg_engine<
random_internal::pcg64_params<0x5851f42d4c957f2dull, 0x14057b7ef767814full>,
random_internal::pcg_xsh_rr_64_32>;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/pcg_engine.h"
#include <algorithm>
#include <bitset>
#include <random>
#include <sstream>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/internal/explicit_seed_seq.h"
#include "absl/time/clock.h"
#define UPDATE_GOLDEN 0
namespace {
using absl::random_internal::ExplicitSeedSeq;
using absl::random_internal::pcg32_2018_engine;
using absl::random_internal::pcg64_2018_engine;
template <typename EngineType>
class PCGEngineTest : public ::testing::Test {};
using EngineTypes = ::testing::Types<pcg64_2018_engine, pcg32_2018_engine>;
TYPED_TEST_SUITE(PCGEngineTest, EngineTypes);
TYPED_TEST(PCGEngineTest, VerifyReseedChangesAllValues) {
using engine_type = TypeParam;
using result_type = typename engine_type::result_type;
const size_t kNumOutputs = 16;
engine_type engine;
{
std::seed_seq seq1{1, 2, 3, 4, 5, 6, 7};
engine.seed(seq1);
}
result_type a[kNumOutputs];
std::generate(std::begin(a), std::end(a), std::ref(engine));
{
std::random_device rd;
std::seed_seq seq2{rd(), rd(), rd()};
engine.seed(seq2);
}
result_type b[kNumOutputs];
std::generate(std::begin(b), std::end(b), std::ref(engine));
size_t changed_bits = 0;
size_t unchanged_bits = 0;
size_t total_set = 0;
size_t total_bits = 0;
size_t equal_count = 0;
for (size_t i = 0; i < kNumOutputs; ++i) {
equal_count += (a[i] == b[i]) ? 1 : 0;
std::bitset<sizeof(result_type) * 8> bitset(a[i] ^ b[i]);
changed_bits += bitset.count();
unchanged_bits += bitset.size() - bitset.count();
std::bitset<sizeof(result_type) * 8> a_set(a[i]);
std::bitset<sizeof(result_type) * 8> b_set(b[i]);
total_set += a_set.count() + b_set.count();
total_bits += 2 * 8 * sizeof(result_type);
}
EXPECT_LE(changed_bits, 0.60 * (changed_bits + unchanged_bits));
EXPECT_GE(changed_bits, 0.40 * (changed_bits + unchanged_bits));
EXPECT_NEAR(total_set, total_bits * 0.5, 4 * std::sqrt(total_bits))
<< "@" << total_set / static_cast<double>(total_bits);
const double kExpected = kNumOutputs / (1.0 * sizeof(result_type) * 8);
EXPECT_LE(equal_count, 1.0 + kExpected);
}
constexpr size_t kTwoBufferValues = 16;
TYPED_TEST(PCGEngineTest, VerifyDiscard) {
using engine_type = TypeParam;
for (size_t num_used = 0; num_used < kTwoBufferValues; ++num_used) {
engine_type engine_used;
for (size_t i = 0; i < num_used; ++i) {
engine_used();
}
for (size_t num_discard = 0; num_discard < kTwoBufferValues;
++num_discard) {
engine_type engine1 = engine_used;
engine_type engine2 = engine_used;
for (size_t i = 0; i < num_discard; ++i) {
engine1();
}
engine2.discard(num_discard);
for (size_t i = 0; i < kTwoBufferValues; ++i) {
const auto r1 = engine1();
const auto r2 = engine2();
ASSERT_EQ(r1, r2) << "used=" << num_used << " discard=" << num_discard;
}
}
}
}
TYPED_TEST(PCGEngineTest, StreamOperatorsResult) {
using engine_type = TypeParam;
std::wostringstream os;
std::wistringstream is;
engine_type engine;
EXPECT_EQ(&(os << engine), &os);
EXPECT_EQ(&(is >> engine), &is);
}
TYPED_TEST(PCGEngineTest, StreamSerialization) {
using engine_type = TypeParam;
for (size_t discard = 0; discard < kTwoBufferValues; ++discard) {
ExplicitSeedSeq seed_sequence{12, 34, 56};
engine_type engine(seed_sequence);
engine.discard(discard);
std::stringstream stream;
stream << engine;
engine_type new_engine;
stream >> new_engine;
for (size_t i = 0; i < 64; ++i) {
EXPECT_EQ(engine(), new_engine()) << " " << i;
}
}
}
constexpr size_t kNumGoldenOutputs = 127;
TYPED_TEST(PCGEngineTest, RandomNumberEngineInterface) {
using engine_type = TypeParam;
using E = engine_type;
using T = typename E::result_type;
static_assert(std::is_copy_constructible<E>::value,
"engine_type must be copy constructible");
static_assert(absl::is_copy_assignable<E>::value,
"engine_type must be copy assignable");
static_assert(std::is_move_constructible<E>::value,
"engine_type must be move constructible");
static_assert(absl::is_move_assignable<E>::value,
"engine_type must be move assignable");
static_assert(std::is_same<decltype(std::declval<E>()()), T>::value,
"return type of operator() must be result_type");
E e, v;
const E x, y;
T s = 1;
std::seed_seq q{1, 2, 3};
unsigned long long z = 1;
std::wostringstream os;
std::wistringstream is;
E{};
E{x};
E{s};
E{q};
e.seed();
EXPECT_TRUE(e == x);
e.seed(q);
{
E tmp(q);
EXPECT_TRUE(e == tmp);
}
e();
{
E tmp(q);
EXPECT_TRUE(e != tmp);
}
e.discard(z);
static_assert(std::is_same<decltype(x == y), bool>::value,
"return type of operator== must be bool");
static_assert(std::is_same<decltype(x != y), bool>::value,
"return type of operator== must be bool");
}
TYPED_TEST(PCGEngineTest, RandenEngineSFINAETest) {
using engine_type = TypeParam;
using result_type = typename engine_type::result_type;
{
engine_type engine(result_type(1));
engine.seed(result_type(1));
}
{
result_type n = 1;
engine_type engine(n);
engine.seed(n);
}
{
engine_type engine(1);
engine.seed(1);
}
{
int n = 1;
engine_type engine(n);
engine.seed(n);
}
{
std::seed_seq seed_seq;
engine_type engine(seed_seq);
engine.seed(seed_seq);
}
{
engine_type engine{std::seed_seq()};
engine.seed(std::seed_seq());
}
}
TEST(PCG642018EngineTest, VerifyGolden) {
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0x01070196e695f8f1, 0x703ec840c59f4493, 0xe54954914b3a44fa,
0x96130ff204b9285e, 0x7d9fdef535ceb21a, 0x666feed42e1219a0,
0x981f685721c8326f, 0xad80710d6eab4dda, 0xe202c480b037a029,
0x5d3390eaedd907e2, 0x0756befb39c6b8aa, 0x1fb44ba6634d62a3,
0x8d20423662426642, 0x34ea910167a39fb4, 0x93010b43a80d0ab6,
0x663db08a98fc568a, 0x720b0a1335956fae, 0x2c35483e31e1d3ba,
0x429f39776337409d, 0xb46d99e638687344, 0x105370b96aedcaee,
0x3999e92f811cff71, 0xd230f8bcb591cfc9, 0x0dce3db2ba7bdea5,
0xcf2f52c91eec99af, 0x2bc7c24a8b998a39, 0xbd8af1b0d599a19c,
0x56bc45abc66059f5, 0x170a46dc170f7f1e, 0xc25daf5277b85fad,
0xe629c2e0c948eadb, 0x1720a796915542ed, 0x22fb0caa4f909951,
0x7e0c0f4175acd83d, 0xd9fcab37ff2a860c, 0xab2280fb2054bad1,
0x58e8a06f37fa9e99, 0xc3a52a30b06528c7, 0x0175f773a13fc1bd,
0x731cfc584b00e840, 0x404cc7b2648069cb, 0x5bc29153b0b7f783,
0x771310a38cc999d1, 0x766a572f0a71a916, 0x90f450fb4fc48348,
0xf080ea3e1c7b1a0d, 0x15471a4507d66a44, 0x7d58e55a78f3df69,
0x0130a094576ac99c, 0x46669cb2d04b1d87, 0x17ab5bed20191840,
0x95b177d260adff3e, 0x025fb624b6ee4c07, 0xb35de4330154a95f,
0xe8510fff67e24c79, 0x132c3cbcd76ed2d3, 0x35e7cc145a093904,
0x9f5b5b5f81583b79, 0x3ee749a533966233, 0x4af85886cdeda8cd,
0x0ca5380ecb3ef3aa, 0x4f674eb7661d3192, 0x88a29aad00cd7733,
0x70b627ca045ffac6, 0x5912b43ea887623d, 0x95dc9fc6f62cf221,
0x926081a12a5c905b, 0x9c57d4cd7dfce651, 0x85ab2cbf23e3bb5d,
0xc5cd669f63023152, 0x3067be0fad5d898e, 0x12b56f444cb53d05,
0xbc2e5a640c3434fc, 0x9280bff0e4613fe1, 0x98819094c528743e,
0x999d1c98d829df33, 0x9ff82a012dc89242, 0xf99183ed39c8be94,
0xf0f59161cd421c55, 0x3c705730c2f6c48d, 0x66ad85c6e9278a61,
0x2a3428e4a428d5d0, 0x79207d68fd04940d, 0xea7f2b402edc8430,
0xa06b419ac857f63b, 0xcb1dd0e6fbc47e1c, 0x4f55229200ada6a4,
0x9647b5e6359c927f, 0x30bf8f9197c7efe5, 0xa79519529cc384d0,
0xbb22c4f339ad6497, 0xd7b9782f59d14175, 0x0dff12fff2ec0118,
0xa331ad8305343a7c, 0x48dad7e3f17e0862, 0x324c6fb3fd3c9665,
0xf0e4350e7933dfc4, 0x7ccda2f30b8b03b6, 0xa0afc6179005de40,
0xee65da6d063b3a30, 0xb9506f42f2bfe87a, 0xc9a2e26b0ef5baa0,
0x39fa9d4f495011d6, 0xbecc21a45d023948, 0x6bf484c6593f737f,
0x8065e0070cadc3b7, 0x9ef617ed8d419799, 0xac692cf8c233dd15,
0xd2ed87583c4ebb98, 0xad95ba1bebfedc62, 0x9b60b160a8264e43,
0x0bc8c45f71fcf25b, 0x4a78035cdf1c9931, 0x4602dc106667e029,
0xb335a3c250498ac8, 0x0256ebc4df20cab8, 0x0c61efd153f0c8d9,
0xe5d0150a4f806f88, 0x99d6521d351e7d87, 0x8d4888c9f80f4325,
0x106c5735c1ba868d, 0x73414881b880a878, 0x808a9a58a3064751,
0x339a29f3746de3d5, 0x5410d7fa4f873896, 0xd84623c81d7b8a03,
0x1f7c7e7a7f47f462,
};
pcg64_2018_engine engine(0);
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%016lx, ", engine());
if (i % 3 == 2) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed();
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(PCG642018EngineTest, VerifyGoldenSeeded) {
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0xb03988f1e39691ee, 0xbd2a1eb5ac31e97a, 0x8f00d6d433634d02,
0x1823c28d483d5776, 0x000c3ee3e1aeb74a, 0xfa82ef27a4f3df9c,
0xc6f382308654e454, 0x414afb1a238996c2, 0x4703a4bc252eb411,
0x99d64f62c8f7f654, 0xbb07ebe11a34fa44, 0x79eb06a363c06131,
0xf66ad3756f1c6b21, 0x130c01d5e869f457, 0x5ca2b9963aecbc81,
0xfef7bebc1de27e6c, 0x1d174faa5ed2cdbf, 0xd75b7a773f2bb889,
0xc35c872327a170a5, 0x46da6d88646a42fe, 0x4622985e0442dae2,
0xbe3cbd67297f1f9b, 0xe7c37b4a4798bfd1, 0x173d5dfad15a25c3,
0x0eb6849ba2961522, 0xb0ff7246e6700d73, 0x88cb9c42d3afa577,
0xb609731dbd94d917, 0xd3941cda04b40081, 0x28d140f7409bea3a,
0x3c96699a920a124a, 0xdb28be521958b2fd, 0x0a3f44db3d4c5124,
0x7ac8e60ba13b70d2, 0x75f03a41ded5195a, 0xaed10ac7c4e4825d,
0xb92a3b18aadb7adc, 0xda45e0081f2bca46, 0x74d39ab3753143fc,
0xb686038018fac9ca, 0x4cc309fe99542dbb, 0xf3e1a4fcb311097c,
0x58763d6fa698d69d, 0xd11c365dbecd8d60, 0x2c15d55725b1dee7,
0x89805f254d85658c, 0x2374c44dfc62158b, 0x9a8350fa7995328d,
0x198f838970cf91da, 0x96aff569562c0e53, 0xd76c8c52b7ec6e3f,
0x23a01cd9ae4baa81, 0x3adb366b6d02a893, 0xb3313e2a4c5b333f,
0x04c11230b96a5425, 0x1f7f7af04787d571, 0xaddb019365275ec7,
0x5c960468ccb09f42, 0x8438db698c69a44a, 0x492be1e46111637e,
0x9c6c01e18100c610, 0xbfe48e75b7d0aceb, 0xb5e0b89ec1ce6a00,
0x9d280ecbc2fe8997, 0x290d9e991ba5fcab, 0xeec5bec7d9d2a4f0,
0x726e81488f19150e, 0x1a6df7955a7e462c, 0x37a12d174ba46bb5,
0x3cdcdffd96b1b5c5, 0x2c5d5ac10661a26e, 0xa742ed18f22e50c4,
0x00e0ed88ff0d8a35, 0x3d3c1718cb1efc0b, 0x1d70c51ffbccbf11,
0xfbbb895132a4092f, 0x619d27f2fb095f24, 0x69af68200985e5c4,
0xbee4885f57373f8d, 0x10b7a6bfe0587e40, 0xa885e6cf2f7e5f0a,
0x59f879464f767550, 0x24e805d69056990d, 0x860970b911095891,
0xca3189954f84170d, 0x6652a5edd4590134, 0x5e1008cef76174bf,
0xcbd417881f2bcfe5, 0xfd49fc9d706ecd17, 0xeebf540221ebd066,
0x46af7679464504cb, 0xd4028486946956f1, 0xd4f41864b86c2103,
0x7af090e751583372, 0x98cdaa09278cb642, 0xffd42b921215602f,
0x1d05bec8466b1740, 0xf036fa78a0132044, 0x787880589d1ecc78,
0x5644552cfef33230, 0x0a97e275fe06884b, 0x96d1b13333d470b5,
0xc8b3cdad52d3b034, 0x091357b9db7376fd, 0xa5fe4232555edf8c,
0x3371bc3b6ada76b5, 0x7deeb2300477c995, 0x6fc6d4244f2849c1,
0x750e8cc797ca340a, 0x81728613cd79899f, 0x3467f4ee6f9aeb93,
0x5ef0a905f58c640f, 0x432db85e5101c98a, 0x6488e96f46ac80c2,
0x22fddb282625048c, 0x15b287a0bc2d4c5d, 0xa7e2343ef1f28bce,
0xc87ee1aa89bed09e, 0x220610107812c5e9, 0xcbdab6fcd640f586,
0x8d41047970928784, 0x1aa431509ec1ade0, 0xac3f0be53f518ddc,
0x16f4428ad81d0cbb, 0x675b13c2736fc4bb, 0x6db073afdd87e32d,
0x572f3ca2f1a078c6,
};
ExplicitSeedSeq seed_sequence{12, 34, 56};
pcg64_2018_engine engine(seed_sequence);
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%016lx, ", engine());
if (i % 3 == 2) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed(seed_sequence);
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(PCG642018EngineTest, VerifyGoldenFromDeserializedEngine) {
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0xdd425b47b4113dea, 0x1b07176479d444b0, 0x6b391027586f2e42,
0xa166f2b15f4a2143, 0xffb6dbd7a179ee97, 0xb2c00035365bf0b1,
0x8fbb518b45855521, 0xfc789a55ddf87c3b, 0x429531f0f17ff355,
0xbe708560d603d283, 0x5bff415175c5cb6b, 0xe813491f4ad45394,
0xa853f4506d55880d, 0x7e538453e568172e, 0xe101f1e098ddd0ec,
0x6ee31266ee4c766d, 0xa8786d92d66b39d7, 0xfee622a2acf5e5b0,
0x5fe8e82c102fa7b3, 0x01f10be4cdb53c9d, 0xbe0545366f857022,
0x12e74f010a339bca, 0xb10d85ca40d5ce34, 0xe80d6feba5054875,
0x2b7c1ee6d567d4ee, 0x2a9cd043bfd03b66, 0x5cfc531bd239f3f1,
0x1c4734e4647d70f5, 0x85a8f60f006b5760, 0x6a4239ce76dca387,
0x8da0f86d7339335c, 0xf055b0468551374d, 0x486e8567e9bea9a0,
0x4cb531b8405192dd, 0xf813b1ee3157110b, 0x214c2a664a875d8e,
0x74531237b29b35f7, 0xa6f0267bb77a771e, 0x64b552bff54184a4,
0xa2d6f7af2d75b6fc, 0x460a10018e03b5ab, 0x76fd1fdcb81d0800,
0x76f5f81805070d9d, 0x1fb75cb1a70b289a, 0x9dfd25a022c4b27f,
0x9a31a14a80528e9e, 0x910dc565ddc25820, 0xd6aef8e2b0936c10,
0xe1773c507fe70225, 0xe027fd7aadd632bc, 0xc1fecb427089c8b8,
0xb5c74c69fa9dbf26, 0x71bf9b0e4670227d, 0x25f48fad205dcfdd,
0x905248ec4d689c56, 0x5c2b7631b0de5c9d, 0x9f2ee0f8f485036c,
0xfd6ce4ebb90bf7ea, 0xd435d20046085574, 0x6b7eadcb0625f986,
0x679d7d44b48be89e, 0x49683b8e1cdc49de, 0x4366cf76e9a2f4ca,
0x54026ec1cdad7bed, 0xa9a04385207f28d3, 0xc8e66de4eba074b2,
0x40b08c42de0f4cc0, 0x1d4c5e0e93c5bbc0, 0x19b80792e470ae2d,
0x6fcaaeaa4c2a5bd9, 0xa92cb07c4238438e, 0x8bb5c918a007e298,
0x7cd671e944874cf4, 0x88166470b1ba3cac, 0xd013d476eaeeade6,
0xcee416947189b3c3, 0x5d7c16ab0dce6088, 0xd3578a5c32b13d27,
0x3875db5adc9cc973, 0xfbdaba01c5b5dc56, 0xffc4fdd391b231c3,
0x2334520ecb164fec, 0x361c115e7b6de1fa, 0xeee58106cc3563d7,
0x8b7f35a8db25ebb8, 0xb29d00211e2cafa6, 0x22a39fe4614b646b,
0x92ca6de8b998506d, 0x40922fe3d388d1db, 0x9da47f1e540f802a,
0x811dceebf16a25db, 0xf6524ae22e0e53a9, 0x52d9e780a16eb99d,
0x4f504286bb830207, 0xf6654d4786bd5cc3, 0x00bd98316003a7e1,
0xefda054a6ab8f5f3, 0x46cfb0f4c1872827, 0xc22b316965c0f3b2,
0xd1a28087c7e7562a, 0xaa4f6a094b7f5cff, 0xfe2bc853a041f7da,
0xe9d531402a83c3ba, 0xe545d8663d3ce4dd, 0xfa2dcd7d91a13fa8,
0xda1a080e52a127b8, 0x19c98f1f809c3d84, 0x2cef109af4678c88,
0x53462accab3b9132, 0x176b13a80415394e, 0xea70047ef6bc178b,
0x57bca80506d6dcdf, 0xd853ba09ff09f5c4, 0x75f4df3a7ddd4775,
0x209c367ade62f4fe, 0xa9a0bbc74d5f4682, 0x5dfe34bada86c21a,
0xc2c05bbcd38566d1, 0x6de8088e348c916a, 0x6a7001c6000c2196,
0xd9fb51865fc4a367, 0x12f320e444ece8ff, 0x6d56f7f793d65035,
0x138f31b7a865f8aa, 0x58fc68b4026b9adf, 0xcd48954b79fb6436,
0x27dfce4a0232af87,
};
#if UPDATE_GOLDEN
(void)kGolden;
std::seed_seq seed_sequence{1, 2, 3};
pcg64_2018_engine engine(seed_sequence);
std::ostringstream stream;
stream << engine;
auto str = stream.str();
printf("%s\n\n", str.c_str());
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%016lx, ", engine());
if (i % 3 == 2) {
printf("\n");
}
}
printf("\n\n\n");
#else
pcg64_2018_engine engine;
std::istringstream stream(
"2549297995355413924 4865540595714422341 6364136223846793005 "
"1442695040888963407 18088519957565336995 4845369368158826708");
stream >> engine;
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(PCG322018EngineTest, VerifyGolden) {
constexpr uint32_t kGolden[kNumGoldenOutputs] = {
0x7a7ecbd9, 0x89fd6c06, 0xae646aa8, 0xcd3cf945, 0x6204b303, 0x198c8585,
0x49fce611, 0xd1e9297a, 0x142d9440, 0xee75f56b, 0x473a9117, 0xe3a45903,
0xbce807a1, 0xe54e5f4d, 0x497d6c51, 0x61829166, 0xa740474b, 0x031912a8,
0x9de3defa, 0xd266dbf1, 0x0f38bebb, 0xec3c4f65, 0x07c5057d, 0xbbce03c8,
0xfd2ac7a8, 0xffcf4773, 0x5b10affb, 0xede1c842, 0xe22b01b7, 0xda133c8c,
0xaf89b0f4, 0x25d1b8bc, 0x9f625482, 0x7bfd6882, 0x2e2210c0, 0x2c8fb9a6,
0x42cb3b83, 0x40ce0dab, 0x644a3510, 0x36230ef2, 0xe2cb6d43, 0x1012b343,
0x746c6c9f, 0x36714cf8, 0xed1f5026, 0x8bbbf83e, 0xe98710f4, 0x8a2afa36,
0x09035349, 0x6dc1a487, 0x682b634b, 0xc106794f, 0x7dd78beb, 0x628c262b,
0x852fb232, 0xb153ac4c, 0x4f169d1b, 0xa69ab774, 0x4bd4b6f2, 0xdc351dd3,
0x93ff3c8c, 0xa30819ab, 0xff07758c, 0x5ab13c62, 0xd16d7fb5, 0xc4950ffa,
0xd309ae49, 0xb9677a87, 0x4464e317, 0x90dc44f1, 0xc694c1d4, 0x1d5e1168,
0xadf37a2d, 0xda38990d, 0x1ec4bd33, 0x36ca25ce, 0xfa0dc76a, 0x968a9d43,
0x6950ac39, 0xdd3276bc, 0x06d5a71e, 0x1f6f282d, 0x5c626c62, 0xdde3fc31,
0x152194ce, 0xc35ed14c, 0xb1f7224e, 0x47f76bb8, 0xb34fdd08, 0x7011395e,
0x162d2a49, 0x0d1bf09f, 0x9428a952, 0x03c5c344, 0xd3525616, 0x7816fff3,
0x6bceb8a8, 0x8345a081, 0x366420fd, 0x182abeda, 0x70f82745, 0xaf15ded8,
0xc7f52ca2, 0xa98db9c5, 0x919d99ba, 0x9c376c1c, 0xed8d34c2, 0x716ae9f5,
0xef062fa5, 0xee3b6c56, 0x52325658, 0x61afa9c3, 0xfdaf02f0, 0x961cf3ab,
0x9f291565, 0x4fbf3045, 0x0590c899, 0xde901385, 0x45005ffb, 0x509db162,
0x262fa941, 0x4c421653, 0x4b17c21e, 0xea0d1530, 0xde803845, 0x61bfd515,
0x438523ef,
};
pcg32_2018_engine engine(0);
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%08x, ", engine());
if (i % 6 == 5) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed();
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(PCG322018EngineTest, VerifyGoldenSeeded) {
constexpr uint32_t kGolden[kNumGoldenOutputs] = {
0x60b5a64c, 0x978502f9, 0x80a75f60, 0x241f1158, 0xa4cd1dbb, 0xe7284017,
0x3b678da5, 0x5223ec99, 0xe4bdd5d9, 0x72190e6d, 0xe6e702c9, 0xff80c768,
0xcf126ed3, 0x1fbd20ab, 0x60980489, 0xbc72bf89, 0x407ac6c0, 0x00bf3c51,
0xf9087897, 0x172e4eb6, 0xe9e4f443, 0x1a6098bf, 0xbf44f8c2, 0xdd84a0e5,
0xd9a52364, 0xc0e2e786, 0x061ae2ba, 0x9facb8e3, 0x6109432d, 0xd4e0a013,
0xbd8eb9a6, 0x7e86c3b6, 0x629c0e68, 0x05337430, 0xb495b9f4, 0x11ccd65d,
0xb578db25, 0x66f1246d, 0x6ef20a7f, 0x5e429812, 0x11772130, 0xb944b5c2,
0x01624128, 0xa2385ab7, 0xd3e10d35, 0xbe570ec3, 0xc951656f, 0xbe8944a0,
0x7be41062, 0x5709f919, 0xd745feda, 0x9870b9ae, 0xb44b8168, 0x19e7683b,
0xded8017f, 0xc6e4d544, 0x91ae4225, 0xd6745fba, 0xb992f284, 0x65b12b33,
0xa9d5fdb4, 0xf105ce1a, 0x35ca1a6e, 0x2ff70dd0, 0xd8335e49, 0xfb71ddf2,
0xcaeabb89, 0x5c6f5f84, 0x9a811a7d, 0xbcecbbd1, 0x0f661ba0, 0x9ad93b9d,
0xedd23e0b, 0x42062f48, 0xd38dd7e4, 0x6cd63c9c, 0x640b98ae, 0x4bff5653,
0x12626371, 0x13266017, 0xe7a698d8, 0x39c74667, 0xe8fdf2e3, 0x52803bf8,
0x2af6895b, 0x91335b7b, 0x699e4961, 0x00a40fff, 0x253ff2b6, 0x4a6cf672,
0x9584e85f, 0xf2a5000c, 0x4d58aba8, 0xb8513e6a, 0x767fad65, 0x8e326f9e,
0x182f15a1, 0x163dab52, 0xdf99c780, 0x047282a1, 0xee4f90dd, 0xd50394ae,
0x6c9fd5f0, 0xb06a9194, 0x387e3840, 0x04a9487b, 0xf678a4c2, 0xd0a78810,
0xd502c97e, 0xd6a9b12a, 0x4accc5dc, 0x416ed53e, 0x50411536, 0xeeb89c24,
0x813a7902, 0x034ebca6, 0xffa52e7c, 0x7ecd3d0e, 0xfa37a0d2, 0xb1fbe2c1,
0xb7efc6d1, 0xefa4ccee, 0xf6f80424, 0x2283f3d9, 0x68732284, 0x94f3b5c8,
0xbbdeceb9,
};
ExplicitSeedSeq seed_sequence{12, 34, 56};
pcg32_2018_engine engine(seed_sequence);
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%08x, ", engine());
if (i % 6 == 5) {
printf("\n");
}
}
printf("\n\n\n");
#else
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
engine.seed(seed_sequence);
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
TEST(PCG322018EngineTest, VerifyGoldenFromDeserializedEngine) {
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0x780f7042, 0xba137215, 0x43ab6f22, 0x0cb55f46, 0x44b2627d, 0x835597af,
0xea973ea1, 0x0d2abd35, 0x4fdd601c, 0xac4342fe, 0x7db7e93c, 0xe56ebcaf,
0x3596470a, 0x7770a9ad, 0x9b893320, 0x57db3415, 0xb432de54, 0xa02baf71,
0xa256aadb, 0x88921fc7, 0xa35fa6b3, 0xde3eca46, 0x605739a7, 0xa890b82b,
0xe457b7ad, 0x335fb903, 0xeb06790c, 0xb3c54bf6, 0x6141e442, 0xa599a482,
0xb78987cc, 0xc61dfe9d, 0x0f1d6ace, 0x17460594, 0x8f6a5061, 0x083dc354,
0xe9c337fb, 0xcfd105f7, 0x926764b6, 0x638d24dc, 0xeaac650a, 0x67d2cb9c,
0xd807733c, 0x205fc52e, 0xf5399e2e, 0x6c46ddcc, 0xb603e875, 0xce113a25,
0x3c8d4813, 0xfb584db8, 0xf6d255ff, 0xea80954f, 0x42e8be85, 0xb2feee72,
0x62bd8d16, 0x1be4a142, 0x97dca1a4, 0xdd6e7333, 0xb2caa20e, 0xa12b1588,
0xeb3a5a1a, 0x6fa5ba89, 0x077ea931, 0x8ddb1713, 0x0dd03079, 0x2c2ba965,
0xa77fac17, 0xc8325742, 0x8bb893bf, 0xc2315741, 0xeaceee92, 0x81dd2ee2,
0xe5214216, 0x1b9b8fb2, 0x01646d03, 0x24facc25, 0xd8c0e0bb, 0xa33fe106,
0xf34fe976, 0xb3b4b44e, 0x65618fed, 0x032c6192, 0xa9dd72ce, 0xf391887b,
0xf41c6a6e, 0x05c4bd6d, 0x37fa260e, 0x46b05659, 0xb5f6348a, 0x62d26d89,
0x39f6452d, 0xb17b30a2, 0xbdd82743, 0x38ecae3b, 0xfe90f0a2, 0xcb2d226d,
0xcf8a0b1c, 0x0eed3d4d, 0xa1f69cfc, 0xd7ac3ba5, 0xce9d9a6b, 0x121deb4c,
0x4a0d03f3, 0xc1821ed1, 0x59c249ac, 0xc0abb474, 0x28149985, 0xfd9a82ba,
0x5960c3b2, 0xeff00cba, 0x6073aa17, 0x25dc0919, 0x9976626e, 0xdd2ccc33,
0x39ecb6ec, 0xc6e15d13, 0xfac94cfd, 0x28cfd34f, 0xf2d2c32d, 0x51c23d08,
0x4fdb2f48, 0x97baa807, 0xf2c1004c, 0xc4ae8136, 0x71f31c94, 0x8c92d601,
0x36caf5cd,
};
#if UPDATE_GOLDEN
(void)kGolden;
std::seed_seq seed_sequence{1, 2, 3};
pcg32_2018_engine engine(seed_sequence);
std::ostringstream stream;
stream << engine;
auto str = stream.str();
printf("%s\n\n", str.c_str());
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
printf("0x%08x, ", engine());
if (i % 6 == 5) {
printf("\n");
}
}
printf("\n\n\n");
EXPECT_FALSE(true);
#else
pcg32_2018_engine engine;
std::istringstream stream(
"6364136223846793005 1442695040888963407 6537028157270659894");
stream >> engine;
for (const auto& elem : kGolden) {
EXPECT_EQ(elem, engine());
}
#endif
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/pcg_engine.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/pcg_engine_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
885df65a-587e-4769-8329-7922bd1595c9 | cpp | tensorflow/tensorflow | verify_no_outside_compilation_markers_pass | tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass.cc | tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass_test.cc | #include <memory>
#include <string>
#include "mlir/Pass/Pass.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
namespace mlir {
namespace TFDevice {
namespace {
constexpr char kXlaOutsideCompilationAttr[] = "_xla_outside_compilation";
#define GEN_PASS_DEF_VERIFYNOOUTSIDECOMPILATIONMARKERSPASS
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.h.inc"
class VerifyNoOutsideCompilationMarkersPass
: public impl::VerifyNoOutsideCompilationMarkersPassBase<
VerifyNoOutsideCompilationMarkersPass> {
public:
void runOnOperation() override;
};
bool IsLaunchOp(Operation& op) {
return dyn_cast<tf_device::LaunchOp>(op) != nullptr;
}
bool IsDeviceClusterOp(Operation& op) {
return dyn_cast<tf_device::ClusterOp>(op) != nullptr;
}
bool HasChildLaunchDeviceOp(Operation& op) {
auto cluster_op = dyn_cast<tf_device::ClusterOp>(op);
if (cluster_op == nullptr) return false;
auto walk_result = cluster_op->walk([&](Operation* op) {
if (IsLaunchOp(*op)) return WalkResult::interrupt();
return WalkResult::advance();
});
return walk_result.wasInterrupted();
}
bool HasXlaOutsideCompilationMarker(Operation& op) {
return op.getAttrOfType<StringAttr>(kXlaOutsideCompilationAttr) != nullptr;
}
void VerifyNoOutsideCompilationMarkersPass::runOnOperation() {
Operation* func_op = getOperation();
auto walk_result = func_op->walk([&](Operation* op) {
if (IsDeviceClusterOp(*op) && HasChildLaunchDeviceOp(*op)) {
std::string launch_error =
absl::StrCat("Node `", op->getName().getStringRef().str(), "` ",
"is a launch op which should have been removed by "
"outside compilation");
op->emitError() << launch_error;
LOG(ERROR) << launch_error;
return WalkResult::interrupt();
}
if (HasXlaOutsideCompilationMarker(*op)) {
std::string outside_compilation_error = absl::StrCat(
"Node `", op->getName().getStringRef().str(), "` ",
"has _xla_outside_compilation set which should have been removed by "
"outside compilation");
op->emitError() << outside_compilation_error;
LOG(ERROR) << outside_compilation_error;
return WalkResult::interrupt();
}
return WalkResult::advance();
});
if (walk_result.wasInterrupted()) {
signalPassFailure();
}
}
}
std::unique_ptr<mlir::OperationPass<func::FuncOp>>
CreateVerifyNoOutsideCompilationMarkersPass() {
return std::make_unique<VerifyNoOutsideCompilationMarkersPass>();
}
}
} | #include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFDevice {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::mhlo::test::GetMlirModuleFromString;
class VerifyNoOutsideCompilationMarkersPassTest : public ::testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<func::FuncOp>(
CreateVerifyNoOutsideCompilationMarkersPass());
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(VerifyNoOutsideCompilationMarkersPassTest, PassesValidOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
}
TEST_F(VerifyNoOutsideCompilationMarkersPassTest,
FailsXlaOutsideCompilationMarkers) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
"tf.B"() {_xla_outside_compilation = "cluster1"} : () -> ()
func.return
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
}
TEST_F(VerifyNoOutsideCompilationMarkersPassTest,
FailsWithLaunchOpsInsideCluster) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = "tf_device.cluster"() ({
"tf_device.launch"() ({
"tf.B"() : () -> ()
tf_device.return
}) {device = "/job:worker/replica:0/task:0/device:CPU:0"} : () -> ()
tf_device.return
}) {cluster_attr = "cluster_attr"} : () -> tensor<*xi32>
func.return
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
}
TEST_F(VerifyNoOutsideCompilationMarkersPassTest,
PassesWithLaunchOpsOutsideCluster) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
"tf_device.launch"() ({
"tf.B"() : () -> ()
tf_device.return
}) {device = "/job:worker/replica:0/task:0/device:CPU:0"} : () -> ()
func.return
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
220965a0-a8d8-4037-8c3b-77ddd8fe7f52 | cpp | tensorflow/tensorflow | image_ops | tensorflow/compiler/tf2xla/kernels/image_ops.cc | tensorflow/core/ops/image_ops_test.cc | #include <array>
#include <numeric>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/kernels/gather_op_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/dynamic_shaped_ops.h"
#include "xla/hlo/builder/lib/loops.h"
#include "xla/hlo/builder/lib/sorting.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
std::array<xla::XlaOp, 3> RGBToHSV(XlaOpKernelContext* ctx, xla::XlaBuilder* b,
const std::array<xla::XlaOp, 3>& rgb,
DataType dtype, const TensorShape& shape) {
auto zero = XlaHelpers::Zero(b, dtype);
auto one = XlaHelpers::One(b, dtype);
auto red = rgb[0];
auto green = rgb[1];
auto blue = rgb[2];
auto value = xla::Max(xla::Max(red, green), blue);
auto minimum = xla::Min(xla::Min(red, green), blue);
auto range = xla::Sub(value, minimum);
auto zeros = xla::Broadcast(zero, shape.dim_sizes());
auto saturation =
xla::Select(xla::Gt(value, zero), xla::Div(range, value), zeros);
auto norm = xla::Div(XlaHelpers::FloatLiteral(b, dtype, 1.0 / 6.0), range);
auto hue =
xla::Select(xla::Eq(green, value),
xla::Add(xla::Mul(norm, xla::Sub(blue, red)),
XlaHelpers::FloatLiteral(b, dtype, 2.0 / 6.0)),
xla::Add(xla::Mul(norm, xla::Sub(red, green)),
XlaHelpers::FloatLiteral(b, dtype, 4.0 / 6.0)));
hue = xla::Select(xla::Eq(red, value), xla::Mul(norm, xla::Sub(green, blue)),
hue);
hue = xla::Select(xla::Gt(range, zero), hue, zeros);
hue = xla::Select(xla::Lt(hue, zero), xla::Add(hue, one), hue);
return {hue, saturation, value};
}
std::array<xla::XlaOp, 3> HSVToRGB(xla::XlaBuilder* b,
const std::array<xla::XlaOp, 3>& hsv,
DataType dtype) {
xla::XlaOp hue = hsv[0];
xla::XlaOp saturation = hsv[1];
xla::XlaOp value = hsv[2];
auto zero = XlaHelpers::Zero(b, dtype);
auto one = XlaHelpers::FloatLiteral(b, dtype, 1.0);
auto two = XlaHelpers::FloatLiteral(b, dtype, 2.0);
auto three = XlaHelpers::FloatLiteral(b, dtype, 3.0);
auto four = XlaHelpers::FloatLiteral(b, dtype, 4.0);
auto six = XlaHelpers::FloatLiteral(b, dtype, 6.0);
auto dh = xla::Mul(hue, six);
auto dr = xla::Clamp(zero, xla::Sub(xla::Abs(xla::Sub(dh, three)), one), one);
auto dg = xla::Clamp(zero, xla::Sub(two, xla::Abs(xla::Sub(dh, two))), one);
auto db = xla::Clamp(zero, xla::Sub(two, xla::Abs(xla::Sub(dh, four))), one);
auto one_minus_s = xla::Sub(one, saturation);
auto red = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, dr)), value);
auto green = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, dg)), value);
auto blue = xla::Mul(xla::Add(one_minus_s, xla::Mul(saturation, db)), value);
return {red, green, blue};
}
class RGBToHSVOp : public XlaOpKernel {
public:
explicit RGBToHSVOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape input_shape = context->InputShape(0);
OP_REQUIRES(context, input_shape.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input_shape.DebugString()));
int channel_dim = input_shape.dims() - 1;
int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::FailedPrecondition("input must have 3 channels but input has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input = context->Input(0);
xla::XlaOp red = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp green = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp blue = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv = RGBToHSV(context, b, {red, green, blue}, context->input_type(0),
channel_shape);
context->SetOutput(0, xla::ConcatInDim(b, hsv, channel_dim));
}
};
REGISTER_XLA_OP(Name("RGBToHSV"), RGBToHSVOp);
class HSVToRGBOp : public XlaOpKernel {
public:
explicit HSVToRGBOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape input_shape = context->InputShape(0);
OP_REQUIRES(context, input_shape.dims() >= 1,
errors::InvalidArgument("input must be at least 1D",
input_shape.DebugString()));
int channel_dim = input_shape.dims() - 1;
int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::FailedPrecondition("input must have 3 channels but input has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input = context->Input(0);
xla::XlaOp hue = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp saturation = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp value = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
auto rgb = HSVToRGB(context->builder(), {hue, saturation, value},
context->input_type(0));
context->SetOutput(0, xla::ConcatInDim(b, rgb, channel_dim));
}
};
REGISTER_XLA_OP(Name("HSVToRGB"), HSVToRGBOp);
class AdjustContrastOpV2 : public XlaOpKernel {
public:
explicit AdjustContrastOpV2(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape& input_shape = context->InputShape(0);
const TensorShape& factor_shape = context->InputShape(1);
OP_REQUIRES(context, input_shape.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input_shape.DebugString()));
int height_dim = input_shape.dims() - 3;
int width_dim = input_shape.dims() - 2;
int channel_dim = input_shape.dims() - 1;
const int64_t height = input_shape.dim_size(height_dim);
const int64_t width = input_shape.dim_size(width_dim);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(factor_shape),
errors::InvalidArgument("contrast_factor must be scalar: ",
factor_shape.DebugString()));
xla::XlaBuilder* b = context->builder();
DataType type = context->input_type(0);
xla::XlaOp input = context->Input(0);
xla::XlaOp factor = XlaHelpers::ConvertElementType(context->Input(1), type);
const DataType accumulation_type = XlaHelpers::SumAccumulationType(type);
auto converted = XlaHelpers::ConvertElementType(input, accumulation_type);
auto reduce = xla::Reduce(converted, XlaHelpers::Zero(b, accumulation_type),
*context->GetOrCreateAdd(accumulation_type),
{height_dim, width_dim});
auto output = xla::Div(
reduce, XlaHelpers::FloatLiteral(b, accumulation_type, height * width));
output = XlaHelpers::ConvertElementType(output, type);
std::vector<int64_t> broadcast_dims(input_shape.dims() - 2);
std::iota(broadcast_dims.begin(), broadcast_dims.end(), 0);
broadcast_dims.back() = channel_dim;
output =
xla::Add(xla::Mul(input, factor),
xla::Mul(output, xla::Sub(XlaHelpers::One(b, type), factor)),
broadcast_dims);
context->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("AdjustContrastv2"), AdjustContrastOpV2);
class AdjustSaturationOp : public XlaOpKernel {
public:
explicit AdjustSaturationOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape& input_shape = context->InputShape(0);
const TensorShape& scale_shape = context->InputShape(1);
OP_REQUIRES(context, input_shape.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input_shape.DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(scale_shape),
errors::InvalidArgument("scale must be scalar: ",
scale_shape.DebugString()));
const int channel_dim = input_shape.dims() - 1;
const int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::InvalidArgument("input must have 3 channels but instead has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input =
XlaHelpers::ConvertElementType(context->Input(0), DT_FLOAT);
xla::XlaOp scale =
XlaHelpers::ConvertElementType(context->Input(1), DT_FLOAT);
DataType type = context->input_type(0);
xla::XlaOp red = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp green = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp blue = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv =
RGBToHSV(context, b, {red, green, blue}, DT_FLOAT, channel_shape);
hsv[1] = xla::Clamp(XlaHelpers::Zero(b, DT_FLOAT), xla::Mul(hsv[1], scale),
XlaHelpers::One(b, DT_FLOAT));
auto rgb = HSVToRGB(context->builder(), hsv, DT_FLOAT);
auto output = XlaHelpers::ConvertElementType(
xla::ConcatInDim(b, rgb, channel_dim), type);
context->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("AdjustSaturation"), AdjustSaturationOp);
class AdjustHueOp : public XlaOpKernel {
public:
explicit AdjustHueOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape& input_shape = context->InputShape(0);
const TensorShape& delta_shape = context->InputShape(1);
OP_REQUIRES(context, input_shape.dims() >= 3,
errors::InvalidArgument("input must be at least 3-D, got shape",
input_shape.DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(delta_shape),
errors::InvalidArgument("delta must be scalar: ",
delta_shape.DebugString()));
const int channel_dim = input_shape.dims() - 1;
const int64_t channels = input_shape.dim_size(channel_dim);
OP_REQUIRES(
context, channels == 3,
errors::InvalidArgument("input must have 3 channels but instead has ",
channels, " channels."));
xla::XlaBuilder* b = context->builder();
xla::XlaOp input =
XlaHelpers::ConvertElementType(context->Input(0), DT_FLOAT);
xla::XlaOp delta =
XlaHelpers::ConvertElementType(context->Input(1), DT_FLOAT);
DataType type = context->input_type(0);
xla::XlaOp red = xla::SliceInDim(input, 0,
1, 1,
channel_dim);
xla::XlaOp green = xla::SliceInDim(input, 1,
2, 1,
channel_dim);
xla::XlaOp blue = xla::SliceInDim(input, 2,
3, 1,
channel_dim);
TensorShape channel_shape = input_shape;
channel_shape.set_dim(channel_dim, 1);
auto hsv =
RGBToHSV(context, b, {red, green, blue}, DT_FLOAT, channel_shape);
auto zero = XlaHelpers::Zero(b, DT_FLOAT);
auto one = XlaHelpers::One(b, DT_FLOAT);
auto& hue = hsv[0];
hue = xla::Rem(xla::Add(hsv[0], delta), one);
hue =
xla::Select(xla::Lt(hue, zero), xla::Rem(xla::Add(one, hue), one), hue);
auto rgb = HSVToRGB(context->builder(), hsv, DT_FLOAT);
auto output = XlaHelpers::ConvertElementType(
xla::ConcatInDim(b, rgb, channel_dim), type);
context->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("AdjustHue"), AdjustHueOp);
struct WhileCondFn {
const int64_t num_boxes;
const int64_t output_size;
explicit WhileCondFn(int64_t num_boxes, int64_t output_size)
: num_boxes(num_boxes), output_size(output_size) {}
absl::StatusOr<xla::XlaOp> operator()(absl::Span<const xla::XlaOp> values,
xla::XlaBuilder* cond_builder) const {
xla::XlaOp row_idx = values[0];
xla::XlaOp row_in_bounds =
xla::Lt(row_idx, xla::ConstantR0<int32>(cond_builder, num_boxes));
xla::XlaOp num_outputs_so_far = values[1];
xla::XlaOp results_not_full = xla::Lt(
num_outputs_so_far, xla::ConstantR0<int32>(cond_builder, output_size));
return xla::And(row_in_bounds, results_not_full);
}
};
struct SuppressBodyFn {
const int64_t num_boxes;
explicit SuppressBodyFn(int64_t num_boxes) : num_boxes(num_boxes) {}
absl::StatusOr<std::vector<xla::XlaOp>> operator()(
absl::Span<const xla::XlaOp> values, xla::XlaBuilder* builder) const {
auto row_idx = values[0];
auto num_outputs_so_far = values[1];
auto iou_mask = values[2];
auto included_iou = values[3];
auto zero = xla::ConstantR0<int32>(builder, 0);
std::vector<xla::XlaOp> row_idx_vector = {row_idx};
auto active_elem = xla::DynamicSlice(included_iou, row_idx_vector, {1});
active_elem = xla::Reshape(active_elem, {});
num_outputs_so_far = xla::Select(
active_elem, num_outputs_so_far + xla::ConstantR0<int32>(builder, 1),
num_outputs_so_far);
auto row_iou = xla::DynamicSlice(iou_mask, {row_idx, zero}, {1, num_boxes});
TF_ASSIGN_OR_RETURN(auto iou_shape, builder->GetShape(iou_mask));
auto boxes_runtime_size = xla::GetDimensionSize(row_iou, 1);
if (iou_shape.is_dynamic_dimension(1)) {
row_iou = xla::SetDimensionSize(row_iou, boxes_runtime_size, 1);
}
row_iou = xla::DynamicUpdateSlice(
row_iou, xla::ConstantR2FromArray2D<bool>(builder, {{false}}),
{zero, row_idx});
row_iou = xla::Reshape(row_iou, {num_boxes});
auto supp_mask = xla::Not(row_iou);
auto cond = xla::Broadcast(active_elem, {num_boxes});
if (iou_shape.is_dynamic_dimension(1)) {
cond = xla::SetDimensionSize(cond, boxes_runtime_size, 0);
}
included_iou =
xla::Select(cond, xla::And(included_iou, supp_mask), included_iou);
row_idx = row_idx + xla::ConstantR0<int32>(builder, 1);
return std::vector<xla::XlaOp>{row_idx, num_outputs_so_far, iou_mask,
included_iou};
}
};
class NonMaxSuppressionOp : public XlaOpKernel {
public:
explicit NonMaxSuppressionOp(OpKernelConstruction* context)
: XlaOpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("pad_to_max_output_size",
&pad_to_max_output_size_));
}
void Compile(XlaOpKernelContext* context) override {
OP_REQUIRES(context, pad_to_max_output_size_,
errors::Unimplemented(
"XLA compilation requires pad_to_max_output_size == True"));
xla::XlaOp selected_indices, num_valid;
ComputeResult(context, pad_to_max_output_size_);
}
static void ComputeResult(XlaOpKernelContext* context,
bool pad_to_max_output_size = false) {
const TensorShape& boxes_shape = context->InputShape("boxes");
OP_REQUIRES(
context, TensorShapeUtils::IsMatrix(boxes_shape),
errors::InvalidArgument("boxes must be 2-D, currently: [",
std::to_string(boxes_shape.dim_size(0)), ",",
std::to_string(boxes_shape.dim_size(1)), "]"));
const int64_t num_boxes = boxes_shape.dim_size(0);
OP_REQUIRES(
context, boxes_shape.dim_size(1) == 4,
errors::InvalidArgument("boxes must have 4 columns, currently: ",
std::to_string(boxes_shape.dim_size(1))));
const TensorShape& scores_shape = context->InputShape("scores");
OP_REQUIRES(context, TensorShapeUtils::IsVector(scores_shape),
errors::InvalidArgument("scores must be 1-D, currently: ",
scores_shape.DebugString()));
OP_REQUIRES(context, scores_shape.dim_size(0) == num_boxes,
errors::InvalidArgument(
"scores size ", std::to_string(scores_shape.dim_size(0)),
" must equal number of boxes ", std::to_string(num_boxes)));
OP_REQUIRES(context, num_boxes <= kint32max,
errors::InvalidArgument("XLA compilation requires number of "
"boxes to be <= kint32max, got ",
num_boxes));
xla::PrimitiveType boxes_xla_type = context->InputXlaType("boxes");
xla::PrimitiveType scores_xla_type = context->InputXlaType("scores");
const xla::XlaOp boxes_input = context->Input("boxes");
const xla::XlaOp scores_input = context->Input("scores");
int64_t output_size;
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(context->InputShape("max_output_size")),
errors::InvalidArgument("Max Output Size isn't a scalar"));
OP_REQUIRES(
context,
TensorShapeUtils::IsScalar(context->InputShape("iou_threshold")),
errors::InvalidArgument("IOU Threshold isn't a scalar"));
OP_REQUIRES_OK(context, context->ConstantInputAsIntScalar(2, &output_size));
OP_REQUIRES(
context, output_size >= 0,
errors::InvalidArgument("Need output_size >= 0, got ", output_size));
OP_REQUIRES(context, output_size <= kint32max,
errors::InvalidArgument("Need output_size <= kint32Max, got ",
output_size));
const xla::XlaOp score_thresh = context->Input("score_threshold");
const xla::XlaOp iou_thresh = context->Input("iou_threshold");
xla::XlaBuilder* const builder = context->builder();
const xla::XlaOp boxes = xla::Transpose(boxes_input, {1, 0});
const xla::XlaOp boxes_sorted = xla::GetTupleElement(
xla::Sort({xla::Broadcast(scores_input, {4}), boxes},
xla::CreateScalarGtComputation(
{scores_xla_type, boxes_xla_type}, builder),
1),
1);
const xla::XlaOp iota_indices = xla::Iota(builder, xla::S32, num_boxes);
const xla::XlaOp indices_sort = xla::Sort(
{scores_input, iota_indices},
xla::CreateScalarGtComputation({scores_xla_type, xla::S32}, builder));
const xla::XlaOp indices_sorted = xla::GetTupleElement(indices_sort, 1);
const xla::XlaOp scores = xla::GetTupleElement(indices_sort, 0);
const xla::XlaOp c_y0 = xla::Reshape(xla::SliceInDim(boxes_sorted,
0,
1,
1,
0),
{num_boxes});
const xla::XlaOp c_x0 = xla::Reshape(xla::SliceInDim(boxes_sorted,
1,
2,
1,
0),
{num_boxes});
const xla::XlaOp c_y1 = xla::Reshape(xla::SliceInDim(boxes_sorted,
2,
3,
1,
0),
{num_boxes});
const xla::XlaOp c_x1 = xla::Reshape(xla::SliceInDim(boxes_sorted,
3,
4,
1,
0),
{num_boxes});
xla::XlaOp y1 = xla::Select(xla::Le(c_y0, c_y1), c_y0, c_y1);
xla::XlaOp y2 = xla::Select(xla::Le(c_y0, c_y1), c_y1, c_y0);
xla::XlaOp x1 = xla::Select(xla::Le(c_x0, c_x1), c_x0, c_x1);
xla::XlaOp x2 = xla::Select(xla::Le(c_x0, c_x1), c_x1, c_x0);
xla::XlaOp area = (y2 - y1) * (x2 - x1);
y1 = xla::Broadcast(y1, {1});
y2 = xla::Broadcast(y2, {1});
x1 = xla::Broadcast(x1, {1});
x2 = xla::Broadcast(x2, {1});
area = xla::Broadcast(area, {1});
xla::XlaOp i_xmin = xla::Max(x1, xla::Transpose(x1, {1, 0}));
xla::XlaOp i_ymin = xla::Max(y1, xla::Transpose(y1, {1, 0}));
xla::XlaOp i_xmax = xla::Min(x2, xla::Transpose(x2, {1, 0}));
xla::XlaOp i_ymax = xla::Min(y2, xla::Transpose(y2, {1, 0}));
auto square_zero = xla::ZerosLike(i_xmin);
xla::XlaOp i_area = xla::Max(i_xmax - i_xmin, square_zero) *
xla::Max(i_ymax - i_ymin, square_zero);
xla::XlaOp u_area = area + xla::Transpose(area, {1, 0}) - i_area;
xla::XlaOp iou = i_area / u_area;
xla::XlaOp iou_thresh_mask = xla::Gt(iou, iou_thresh + square_zero);
xla::XlaOp included_iou =
xla::Broadcast(xla::ConstantR0<bool>(builder, true), {num_boxes});
auto iou_shape_or = builder->GetShape(iou_thresh_mask);
OP_REQUIRES_OK(context, iou_shape_or.status());
auto boxes_runtime_size = xla::GetDimensionSize(iou_thresh_mask, 1);
if (iou_shape_or.value().is_dynamic_dimension(1)) {
included_iou = xla::SetDimensionSize(included_iou, boxes_runtime_size, 0);
}
std::vector<xla::XlaOp> init_values;
init_values.reserve(4);
init_values.push_back(xla::ConstantR0<int32>(builder, 0));
init_values.push_back(xla::ConstantR0<int32>(builder, 0));
init_values.push_back(iou_thresh_mask);
init_values.push_back(included_iou);
auto suppress_loop_result =
xla::WhileLoopHelper(WhileCondFn(num_boxes, output_size),
SuppressBodyFn(num_boxes), init_values,
"suppress_loop", builder)
.value();
xla::XlaOp included_score =
xla::Gt(scores, xla::Broadcast(score_thresh, {num_boxes}));
xla::XlaOp included = xla::And(included_score, suppress_loop_result[3]);
auto valid_elem = xla::Lt(
iota_indices, xla::Broadcast(suppress_loop_result[0], {num_boxes}));
included = xla::And(included, valid_elem);
xla::XlaOp neg_inf =
xla::Broadcast(xla::MinValue(builder, boxes_xla_type), {num_boxes});
xla::XlaOp scores_included = xla::Select(included, scores, neg_inf);
xla::XlaOp output_tuple = TopK(scores_included, output_size);
xla::XlaOp selected_indices_sorted = xla::GetTupleElement(output_tuple, 1);
xla::XlaOp ones_included = xla::Select(
included,
xla::Broadcast(xla::ConstantR0<int32>(builder, 1), {num_boxes}),
xla::Broadcast(xla::ConstantR0<int32>(builder, 0), {num_boxes}));
xla::XlaOp num_valid_total = xla::Reduce(
ones_included,
xla::ConstantR0<int>(builder, 0),
CreateScalarAddComputation(xla::S32, builder),
{0});
xla::XlaOp num_valid =
xla::Min(num_valid_total, xla::ConstantR0<int32>(builder, output_size));
xla::XlaOp selected_indices;
DataType gather_type = context->expected_output_dtype(0);
OP_REQUIRES_OK(
context,
XlaGather(indices_sorted, scores_shape, selected_indices_sorted,
TensorShape({output_size}),
0,
false,
gather_type, DT_INT32, builder, &selected_indices));
if (!pad_to_max_output_size) {
absl::StatusOr<xla::XlaOp> rebounded_result =
xla::SetDimensionSizeWithRebound(&context->value_inference(),
selected_indices, num_valid, 0);
if (rebounded_result.ok()) {
selected_indices = *rebounded_result;
} else {
selected_indices =
xla::SetDimensionSize(selected_indices, num_valid, 0);
}
}
context->SetOutput(0, selected_indices);
if (pad_to_max_output_size) context->SetOutput(1, num_valid);
}
private:
bool pad_to_max_output_size_;
};
REGISTER_XLA_OP(
Name("NonMaxSuppressionV4").CompileTimeConstantInput("max_output_size"),
NonMaxSuppressionOp);
class NonMaxSuppressionV3Op : public XlaOpKernel {
public:
explicit NonMaxSuppressionV3Op(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
xla::XlaOp selected_indices, num_valid;
NonMaxSuppressionOp::ComputeResult(context);
}
};
REGISTER_XLA_OP(
Name("NonMaxSuppressionV3").CompileTimeConstantInput("max_output_size"),
NonMaxSuppressionV3Op);
}
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(ImageOpsTest, SampleDistortedBoundingBox_ShapeFn) {
ShapeInferenceTestOp op("SampleDistortedBoundingBox");
INFER_OK(op, "?;?", "[3];[3];[1,1,4]");
}
TEST(ImageOpsTest, Resize_ShapeFn) {
for (const char* op_name : {"ResizeArea", "ResizeBicubic", "ResizeBilinear",
"ResizeNearestNeighbor"}) {
ShapeInferenceTestOp op(op_name);
op.input_tensors.resize(2);
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "?;[3]");
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,20,30,d0_3]");
}
}
TEST(ImageOpsTest, DecodeGif) {
ShapeInferenceTestOp op("DecodeGif");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
INFER_OK(op, "?", "[?,?,?,3]");
INFER_OK(op, "[]", "[?,?,?,3]");
}
TEST(ImageOpTest, DecodeImage) {
ShapeInferenceTestOp op("DecodeImage");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("expand_animations", false)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("expand_animations", true)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "?");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeImage")
.Input({"img", 0, DT_STRING})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[]");
}
TEST(ImageOpsTest, DecodeImage_ShapeFn) {
for (const char* op_name : {"DecodeJpeg", "DecodePng"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Attr("channels", 4)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?,4]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"a", 0, DT_STRING})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[]");
}
}
TEST(ImageOpsTest, DecodeAndCropJpeg_ShapeFn) {
const char* op_name = "DecodeAndCropJpeg";
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Wrong number of inputs passed: 1 while 2 expected", op, "[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,?]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Attr("channels", 4)
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,4]");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Attr("channels", -1)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -1", op, "[];[]");
}
TEST(ImageOpsTest, DecodeAndCropJpeg_InvalidCropWindow) {
const char* op_name = "DecodeAndCropJpeg";
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Wrong number of inputs passed: 1 while 2 expected", op, "[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?");
TF_ASSERT_OK(NodeDefBuilder("test", op_name)
.Input({"img", 0, DT_STRING})
.Input({"crop_window", 1, DT_INT32})
.Finalize(&op.node_def));
INFER_OK(op, "[];[?]", "[?,?,?]");
}
TEST(ImageOpsTest, EncodeImage_ShapeFn) {
for (const char* op_name : {"EncodeJpeg"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[1,?,3]", "[]");
}
}
TEST(ImageOpsTest, BatchedEncodeImage_ShapeFn) {
for (const char* op_name : {"EncodePng"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2]");
INFER_OK(op, "[1,?,3]", "[]");
INFER_OK(op, "[?,1,?,3]", "[d0_0]");
INFER_OK(op, "[4,5,1,?,3]", "[d0_0,d0_1]");
}
}
TEST(ImageOpsTest, ExtractJpegShape_ShapeFn) {
ShapeInferenceTestOp op("ExtractJpegShape");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
INFER_OK(op, "?", "[3]");
}
TEST(ImageOpsTest, Colorspace_ShapeFn) {
for (const char* op_name : {"HSVToRGB", "RGBToHSV"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 3 but is 4", op, "[1,2,4]");
INFER_OK(op, "[1,2,3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[1,2,?]", "[d0_0,d0_1,3]");
INFER_OK(op, "?", "?");
}
}
TEST(ImageOpsTest, ExtractGlimpse_ShapeFn) {
ShapeInferenceTestOp op("ExtractGlimpse");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "ExtractGlimpse")
.Input({"input", 0, DT_FLOAT})
.Input({"size", 1, DT_INT32})
.Input({"offsets", 2, DT_FLOAT})
.Attr("uniform_noise", true)
.Attr("noise", "")
.Finalize(&op.node_def));
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[];?");
INFER_ERROR("Dimension must be 2 but is 3", op, "?;[3];?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;[1,2,3]");
INFER_OK(op, "[1,?,3,?];[2];?", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2];?", "[d0_0,20,30,d0_3]");
INFER_OK(op, "[?,?,3,?];[2];[1,?]", "[d2_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[2];[1,?]", "[d0_0|d2_0,20,30,d_0|d0_3]");
INFER_ERROR("Dimensions must be equal, but are 10 and 1", op,
"[10,?,?,?];?;[1,2]");
}
TEST(ImageOpsTest, CropAndResize_ShapeFn) {
ShapeInferenceTestOp op("CropAndResize");
op.input_tensors.resize(4);
INFER_ERROR("Shape must be rank 4 but is rank 5", op, "[1,2,3,4,5];?;?;?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3];?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;[1,2];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;?;[1,2]");
INFER_ERROR("Dimension must be 2 but is 1", op, "?;?;?;[1]");
INFER_OK(op, "[1,?,3,?];?;?;[2]", "[?,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[3] = &size_tensor;
INFER_OK(op, "[1,?,3,?];?;?;[2]", "[?,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[2,4];?;[2]", "[d1_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];?;[2];[2]", "[d2_0,20,30,d0_3]");
INFER_OK(op, "[1,?,3,?];[?,4];[?];[2]", "[d1_0|d3_0,20,30,d0_3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 1", op, "?;[2,?];[1];?");
INFER_ERROR("Dimension must be 4 but is 3", op, "?;[?,3];?;?");
}
TEST(ImageOpsTest, ResizeNearestNeighborGrad_ShapeFn) {
ShapeInferenceTestOp op("ResizeNearestNeighborGrad");
op.input_tensors.resize(2);
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]")
INFER_ERROR("Dimension must be 2 but is 1", op, "?;[1]");
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,?,?,d0_3]");
Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors[1] = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2]", "[d0_0,20,30,d0_3]");
}
TEST(ImageOpsTest, CropAndResizeGradImage_ShapeFn) {
ShapeInferenceTestOp op("CropAndResizeGradImage");
op.input_tensors.resize(4);
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;?;?;[1,2]");
INFER_OK(op, "?;?;?;?", "[?,?,?,?]");
Tensor image_size = test::AsTensor<int32>({10, 20, 30, 40});
op.input_tensors[3] = &image_size;
INFER_OK(op, "?;?;?;[1]", "[10, 20, 30, 40]");
}
TEST(ImageOpsTest, RandomCrop_ShapeFn) {
ShapeInferenceTestOp op("RandomCrop");
op.input_tensors.resize(2);
INFER_ERROR("must be rank 3", op, "[1,2];?");
INFER_ERROR("must be equal", op, "?;[3]");
INFER_ERROR("must be equal", op, "?;[1,2]");
INFER_OK(op, "[?,?,?];[2]", "[?,?,d0_2]");
Tensor size = test::AsTensor<int64_t>({10, 20});
op.input_tensors[1] = &size;
INFER_OK(op, "[?,?,?];[2]", "[10,20,d0_2]");
}
TEST(ImageOpsTest, QuantizedResizeBilinear_ShapeFn) {
ShapeInferenceTestOp op("QuantizedResizeBilinear");
op.input_tensors.resize(4);
NodeDefBuilder builder =
NodeDefBuilder("test", "QuantizedResizeBilinear")
.Input(NodeDefBuilder::NodeOut{"images", 0, DT_QINT32})
.Input(NodeDefBuilder::NodeOut{"size", 0, DT_INT32})
.Input(NodeDefBuilder::NodeOut{"min", 0, DT_FLOAT})
.Input(NodeDefBuilder::NodeOut{"max", 0, DT_FLOAT})
.Attr("T", DT_QINT32)
.Attr("Toutput", DT_QINT32);
TF_ASSERT_OK(builder.Finalize(&op.node_def));
INFER_OK(op, "[1,?,3,?];[2];[];[]",
"[d0_0,?,?,d0_3];[];[]");
INFER_ERROR("must be rank 0", op, "[1,?,3,?];[2];[?];[]");
INFER_ERROR("must be rank 0", op, "[1,?,3,?];[2];[];[?]");
const Tensor size_tensor = test::AsTensor<int32>({20, 30});
op.input_tensors.at(1) = &size_tensor;
INFER_OK(op, "[1,?,3,?];[2];[];[]", "[d0_0,20,30,d0_3];[];[]");
}
TEST(ImageOpsTest, DrawBoundingBoxes_ShapeFn) {
ShapeInferenceTestOp op("DrawBoundingBoxes");
op.input_tensors.resize(2);
INFER_ERROR("must be rank 4", op, "[1,?,3];?");
INFER_ERROR("should be either 1 (GRY), 3 (RGB), or 4 (RGBA)", op,
"[1,?,?,5];?");
INFER_ERROR("must be rank 3", op, "[1,?,?,4];[1,4]");
INFER_ERROR("Dimension must be 4", op, "[1,?,?,4];[1,2,2]");
INFER_OK(op, "[4,?,?,4];?", "in0");
INFER_OK(op, "[?,?,?,?];[?,?,?]", "in0");
INFER_OK(op, "[4,?,?,4];[?,?,?]", "in0");
INFER_OK(op, "[4,?,?,4];[?,?,4]", "in0");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/image_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/image_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
850ceba7-2580-4f7f-8fe5-1dbc2dc1582c | cpp | google/tensorstore | util | tensorstore/kvstore/file/util.cc | tensorstore/kvstore/file/util_test.cc | #include "tensorstore/kvstore/file/util.h"
#include <stddef.h>
#include <string_view>
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "tensorstore/kvstore/key_range.h"
namespace tensorstore {
namespace internal_file_util {
bool IsKeyValid(std::string_view key, std::string_view lock_suffix) {
if (absl::StrContains(key, '\0')) return false;
if (key.empty()) return false;
if (key.back() == '/' || key.back() == '\\') {
return false;
}
if (key.front() == '/' || key.front() == '\\') {
key = key.substr(1);
}
for (std::string_view component :
absl::StrSplit(key, absl::ByAnyChar("/\\"))) {
if (component.empty()) return false;
if (component == ".") return false;
if (component == "..") return false;
if (!lock_suffix.empty() && component.size() >= lock_suffix.size() &&
absl::EndsWith(component, lock_suffix)) {
return false;
}
}
return true;
}
std::string_view LongestDirectoryPrefix(const KeyRange& range) {
std::string_view prefix = tensorstore::LongestPrefix(range);
const size_t i = prefix.rfind('/');
if (i == std::string_view::npos) return {};
return prefix.substr(0, i);
}
}
} | #include "tensorstore/kvstore/file/util.h"
#include <string_view>
#include <gtest/gtest.h>
#include "tensorstore/kvstore/key_range.h"
namespace {
using ::tensorstore::KeyRange;
using ::tensorstore::internal_file_util::IsKeyValid;
using ::tensorstore::internal_file_util::LongestDirectoryPrefix;
TEST(IsKeyValid, Basic) {
EXPECT_TRUE(IsKeyValid("tmp/root", ""));
EXPECT_TRUE(IsKeyValid("a", ""));
EXPECT_TRUE(IsKeyValid("a/b", ""));
EXPECT_TRUE(IsKeyValid("/tmp/root", ""));
EXPECT_TRUE(IsKeyValid("a\\b", ""));
EXPECT_FALSE(IsKeyValid("", ""));
EXPECT_FALSE(IsKeyValid("/", ""));
EXPECT_FALSE(IsKeyValid("
EXPECT_FALSE(IsKeyValid("
EXPECT_FALSE(IsKeyValid("/tmp/root/", ""));
EXPECT_FALSE(IsKeyValid("tmp
EXPECT_FALSE(IsKeyValid("tmp/./root", ""));
EXPECT_FALSE(IsKeyValid("tmp/../root", ""));
EXPECT_FALSE(IsKeyValid("tmp/root/", ""));
EXPECT_FALSE(IsKeyValid("tmp/.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid("tmp/foo.lock/a", ".lock"));
EXPECT_FALSE(IsKeyValid("\\", ""));
EXPECT_FALSE(IsKeyValid("tmp\\..\\root", ""));
EXPECT_FALSE(IsKeyValid("tmp\\root\\", ""));
EXPECT_FALSE(IsKeyValid(std::string_view("tmp/\0bar", 8), ""));
}
TEST(LongestDirectoryPrefix, Basic) {
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"a", "b"}));
EXPECT_EQ("", LongestDirectoryPrefix(KeyRange{"/a", "/b"}));
EXPECT_EQ("/a", LongestDirectoryPrefix(KeyRange{"/a/a", "/a/b"}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c9708396-efe5-4491-9f3f-40517e42f7ee | cpp | tensorflow/tensorflow | port | tensorflow/core/util/port.cc | third_party/xla/third_party/tsl/tsl/platform/port_test.cc | #include "tensorflow/core/util/port.h"
#include "absl/base/call_once.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
bool IsGoogleCudaEnabled() {
#if GOOGLE_CUDA
return true;
#else
return false;
#endif
}
bool IsBuiltWithROCm() {
#if TENSORFLOW_USE_ROCM
return true;
#else
return false;
#endif
}
bool IsBuiltWithXLA() {
#if TENSORFLOW_USE_XLA
return true;
#else
return false;
#endif
}
bool IsBuiltWithNvcc() {
#if TENSORFLOW_USE_NVCC
return true;
#else
return false;
#endif
}
bool IsAArch32Available() {
#if TF_LLVM_AARCH32_AVAILABLE
return true;
#else
return false;
#endif
}
bool IsAArch64Available() {
#if TF_LLVM_AARCH64_AVAILABLE
return true;
#else
return false;
#endif
}
bool IsPowerPCAvailable() {
#if TF_LLVM_POWERPC_AVAILABLE
return true;
#else
return false;
#endif
}
bool IsSystemZAvailable() {
#if TF_LLVM_S390X_AVAILABLE
return true;
#else
return false;
#endif
}
bool IsX86Available() {
#if TF_LLVM_X86_AVAILABLE
return true;
#else
return false;
#endif
}
bool GpuSupportsHalfMatMulAndConv() {
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
return true;
#else
return false;
#endif
}
inline bool DefaultOneDnnPolicy() {
#if !defined(INTEL_MKL)
return false;
#elif defined(PLATFORM_GOOGLE)
return true;
#elif defined(PLATFORM_WINDOWS) && defined(PLATFORM_IS_X86)
return true;
#elif defined(__linux__)
return port::TestCPUFeature(port::CPUFeature::AVX512_VNNI) ||
port::TestCPUFeature(port::CPUFeature::AVX512_BF16) ||
port::TestCPUFeature(port::CPUFeature::AVX_VNNI) ||
port::TestCPUFeature(port::CPUFeature::AMX_TILE) ||
port::TestCPUFeature(port::CPUFeature::AMX_INT8) ||
port::TestCPUFeature(port::CPUFeature::AMX_BF16) ||
port::TestAarch64CPU(
port::Aarch64CPU::ARM_NEOVERSE_V1);
#else
return false;
#endif
}
bool IsMklEnabled() {
#ifndef INTEL_MKL
return false;
#endif
static absl::once_flag once;
#ifdef ENABLE_MKL
static bool oneDNN_disabled = false;
absl::call_once(once, [&] {
TF_CHECK_OK(ReadBoolFromEnvVar("TF_DISABLE_MKL", false, &oneDNN_disabled));
if (oneDNN_disabled) VLOG(2) << "TF-MKL: Disabling oneDNN";
});
return (!oneDNN_disabled);
#else
static bool oneDNN_enabled = DefaultOneDnnPolicy();
absl::call_once(once, [&] {
auto status = ReadBoolFromEnvVar("TF_ENABLE_ONEDNN_OPTS", oneDNN_enabled,
&oneDNN_enabled);
if (!status.ok()) {
LOG(WARNING) << "TF_ENABLE_ONEDNN_OPTS is not set to either '0', 'false',"
<< " '1', or 'true'. Using the default setting: "
<< oneDNN_enabled;
}
if (oneDNN_enabled) {
LOG(INFO) << "oneDNN custom operations are on. "
<< "You may see slightly different numerical results due to "
<< "floating-point round-off errors from different computation "
<< "orders. To turn them off, set the environment variable "
<< "`TF_ENABLE_ONEDNN_OPTS=0`.";
}
});
return oneDNN_enabled;
#endif
}
bool IsZenDnnEnabled() {
#ifndef AMD_ZENDNN
return false;
#else
static absl::once_flag once;
static bool ZenDNN_enabled = false;
absl::call_once(once, [&] {
auto status = ReadBoolFromEnvVar("TF_ENABLE_ZENDNN_OPTS", ZenDNN_enabled,
&ZenDNN_enabled);
if (!status.ok()) {
LOG(WARNING) << "TF_ENABLE_ZENDNN_OPTS is not set to either '0', 'false',"
<< " '1', or 'true'. Using the default setting: "
<< ZenDNN_enabled;
}
if (ZenDNN_enabled) {
LOG(INFO) << "ZenDNN custom operations are on. "
<< "You may see slightly different numerical results due to "
<< "floating-point round-off errors from different computation "
<< "orders. To turn them off, set the environment variable "
<< "`TF_ENABLE_ZENDNN_OPTS=0`.";
}
});
return ZenDNN_enabled;
#endif
}
} | #include <condition_variable>
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace port {
TEST(Port, AlignedMalloc) {
for (size_t alignment = 1; alignment <= 1 << 20; alignment <<= 1) {
void* p = AlignedMalloc(1, alignment);
ASSERT_TRUE(p != nullptr) << "AlignedMalloc(1, " << alignment << ")";
uintptr_t pval = reinterpret_cast<uintptr_t>(p);
EXPECT_EQ(pval % alignment, 0);
AlignedFree(p);
}
}
TEST(Port, GetCurrentCPU) {
const int cpu = GetCurrentCPU();
#if !defined(__APPLE__)
EXPECT_GE(cpu, 0);
EXPECT_LT(cpu, NumTotalCPUs());
#endif
}
TEST(ConditionVariable, WaitForMilliseconds_Timeout) {
mutex m;
mutex_lock l(m);
condition_variable cv;
ConditionResult result = tsl::kCond_MaybeNotified;
time_t start = time(nullptr);
while (result == tsl::kCond_MaybeNotified) {
result = WaitForMilliseconds(&l, &cv, 3000);
}
EXPECT_EQ(result, tsl::kCond_Timeout);
time_t finish = time(nullptr);
EXPECT_GE(finish - start, 3);
}
TEST(ConditionVariable, WaitForMilliseconds_Signalled) {
thread::ThreadPool pool(Env::Default(), "test", 1);
mutex m;
mutex_lock l(m);
condition_variable cv;
time_t start = time(nullptr);
pool.Schedule([&m, &cv]() {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
mutex_lock l(m);
cv.notify_all();
});
EXPECT_EQ(WaitForMilliseconds(&l, &cv, 3000), tsl::kCond_MaybeNotified);
time_t finish = time(nullptr);
EXPECT_LT(finish - start, 3);
}
TEST(ConditionalCriticalSections, AwaitWithDeadline_Timeout) {
bool always_false = false;
mutex m;
m.lock();
time_t start = time(nullptr);
bool result =
m.AwaitWithDeadline(Condition(&always_false),
EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos);
time_t finish = time(nullptr);
m.unlock();
EXPECT_EQ(result, false);
EXPECT_GE(finish - start, 3);
}
TEST(ConditionalCriticalSections, AwaitWithDeadline_Woken) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool woken = false;
mutex m;
m.lock();
time_t start = time(nullptr);
pool.Schedule([&m, &woken]() {
Env::Default()->SleepForMicroseconds(1 * 1000 * 1000);
m.lock();
woken = true;
m.unlock();
});
bool result = m.AwaitWithDeadline(
Condition(&woken), EnvTime::NowNanos() + 3 * EnvTime::kSecondsToNanos);
time_t finish = time(nullptr);
m.unlock();
EXPECT_EQ(result, true);
EXPECT_LT(finish - start, 3);
}
static bool Invert(bool* b) { return !*b; }
class InvertClass {
public:
explicit InvertClass(bool* value) : value_(value) {}
bool Value() { return !*this->value_; }
private:
InvertClass();
bool* value_;
};
TEST(ConditionalCriticalSections, Await_PingPong) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool ping_pong = false;
bool done = false;
mutex m;
pool.Schedule([&m, &ping_pong, &done]() {
m.lock();
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&ping_pong));
ping_pong = false;
}
done = true;
m.unlock();
});
m.lock();
InvertClass invert(&ping_pong);
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&Invert, &ping_pong));
ping_pong = true;
}
m.Await(Condition(&done));
m.unlock();
}
TEST(ConditionalCriticalSections, Await_PingPongMethod) {
thread::ThreadPool pool(Env::Default(), "test", 1);
bool ping_pong = false;
bool done = false;
mutex m;
pool.Schedule([&m, &ping_pong, &done]() {
m.lock();
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&ping_pong));
ping_pong = false;
}
done = true;
m.unlock();
});
m.lock();
InvertClass invert(&ping_pong);
for (int i = 0; i != 1000; i++) {
m.Await(Condition(&invert, &InvertClass::Value));
ping_pong = true;
}
m.Await(Condition(&done));
m.unlock();
}
TEST(TestCPUFeature, TestFeature) {
const bool has_avx = TestCPUFeature(CPUFeature::AVX);
LOG(INFO) << "has_avx = " << has_avx;
const bool has_avx2 = TestCPUFeature(CPUFeature::AVX2);
LOG(INFO) << "has_avx2 = " << has_avx2;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/port.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/port_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65156ca6-0b0f-4ed2-aba1-0ec9220c7da4 | cpp | tensorflow/tensorflow | array_ops | tensorflow/c/experimental/ops/array_ops.cc | tensorflow/core/ops/array_ops_test.cc | #include "tensorflow/c/experimental/ops/array_ops.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/tracing_utils.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
using tensorflow::tracing::MaybeSetOpName;
namespace tensorflow {
namespace ops {
Status Identity(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Identity", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status IdentityN(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> input,
absl::Span<AbstractTensorHandle*> output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("IdentityN", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInputList(input));
int num_retvals = output.size();
return op_ptr->Execute(output, &num_retvals);
}
Status ZerosLike(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("ZerosLike", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
Status Shape(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle** output, DataType out_type, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("Shape", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
TF_RETURN_IF_ERROR(op_ptr->SetAttrType("out_type", out_type));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status ExpandDims(AbstractContext* ctx, AbstractTensorHandle* const input,
AbstractTensorHandle* const dim,
AbstractTensorHandle** output, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("ExpandDims", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(input));
TF_RETURN_IF_ERROR(op_ptr->AddInput(dim));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(output, 1), &num_retvals);
}
Status OnesLike(AbstractContext* ctx, AbstractTensorHandle* const x,
AbstractTensorHandle** y, const char* name,
const char* raw_device_name) {
AbstractOperationPtr op_ptr(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op_ptr->Reset("OnesLike", raw_device_name));
TF_RETURN_IF_ERROR(MaybeSetOpName(op_ptr.get(), name));
TF_RETURN_IF_ERROR(op_ptr->AddInput(x));
int num_retvals = 1;
return op_ptr->Execute(absl::MakeSpan(y, 1), &num_retvals);
}
}
} | #include "tensorflow/core/common_runtime/type_inference.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
TEST(ArrayOpsTest, TensorScatterUpdate_ShapeFn) {
ShapeInferenceTestOp op("TensorScatterUpdate");
INFER_OK(op, "[4,3];[8,2];[8]", "in0");
INFER_OK(op, "[?,?];[?,2];[?]", "in0");
INFER_OK(op, "[?];[?];[?]", "in0");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op,
"[];[?,2];[?]");
INFER_ERROR("Indices and updates specified for empty input", op,
"[0,2,2];[8,2];[8]");
INFER_ERROR(
"Dimensions [0,1) of indices[shape=[8,2]] = [8] must match "
"dimensions [0,1) of updates[shape=[9]] = [9]",
op, "[?,?];[8,2];[9]");
INFER_ERROR(
"Dimensions [2,2) of input[shape=[?,?]] = [] must match "
"dimensions [1,2) of updates[shape=[?,1]] = [1]",
op, "[?,?];[?,2];[?,1]");
}
TEST(ArrayOpsTest, ScatterNd_ShapeFn) {
ShapeInferenceTestOp op("ScatterNd");
INFER_OK(op, "[8,2];[8];[2]", "[?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[?,2];[?];[]");
INFER_ERROR(
"Dimensions [0,1) of indices[shape=[8,2]] = [8] must match "
"dimensions [0,1) of updates[shape=[9]] = [9]",
op, "[8,2];[9];[?]");
}
TEST(ArrayOpsTest, UnravelIndex_ShapeFn) {
ShapeInferenceTestOp op("UnravelIndex");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[];[?]", "[d1_0]");
INFER_OK(op, "[4,5];[?]", "[d1_0,20]");
INFER_OK(op, "[2,3,4];[?]", "[d1_0,24]");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[?];[?]", "[d1_0,?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,1]");
}
TEST(ArrayOpsTest, Pack_ShapeFn) {
ShapeInferenceTestOp op("Pack");
auto set_axis = [&op](int axis) {
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Pack")
.Input(src_list)
.Attr("N", n)
.Attr("axis", axis)
.Finalize(&op.node_def));
};
set_axis(0);
INFER_OK(op, "?;?;?", "?");
for (int axis : {0, -3}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[3,d0_0|d1_0,d0_1|d1_1]");
INFER_OK(op, "[?,3];[1,3];?", "[3,d1_0,d0_1|d1_1]");
INFER_OK(op, "[?,?];[1,3];?", "[3,d1_0,d1_1]");
}
for (int axis : {1, -2}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[d0_0|d1_0,3,d0_1|d1_1]");
INFER_OK(op, "[?,3];[1,3];?", "[d1_0,3,d0_1|d1_1]");
INFER_OK(op, "[?,?];[1,3];?", "[d1_0,3,d1_1]");
}
for (int axis : {2, -1}) {
set_axis(axis);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,3];[1,3];?", "[d0_0|d1_0,d0_1|d1_1,3]");
INFER_OK(op, "[?,3];[1,3];?", "[d1_0,d0_1|d1_1,3]");
INFER_OK(op, "[?,?];[1,3];?", "[d1_0,d1_1,3]");
}
set_axis(-4);
INFER_ERROR("Invalid axis: -4; must be in [-3,3)", op, "[1,3];[1,3];?");
set_axis(3);
INFER_ERROR("Invalid axis: 3; must be in [-3,3)", op, "[1,3];[1,3];?");
set_axis(0);
INFER_ERROR("Shapes must be equal rank, but are 3 and 2", op,
"[1,2,3];?;[1,4]");
INFER_ERROR("From merging shape 0 with other shapes.", op, "[1,2,3];?;[1,4]");
}
TEST(ArrayOpsTest, UnPack_ShapeFn) {
ShapeInferenceTestOp op("Unpack");
auto set_axis_and_num = [&op](int axis, int num) {
TF_ASSERT_OK(NodeDefBuilder("test", "Unpack")
.Input("a", 0, DT_FLOAT)
.Attr("axis", axis)
.Attr("num", num)
.Finalize(&op.node_def));
};
set_axis_and_num(0, 1);
INFER_OK(op, "?", "?");
for (int axis : {0, -3}) {
set_axis_and_num(axis, 1);
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,2,3]", "[d0_1,d0_2]");
INFER_OK(op, "[?,?,?]", "[d0_1,d0_2]");
}
for (int axis : {1, -2}) {
set_axis_and_num(axis, 2);
INFER_OK(op, "[1,2,3]", "[d0_0,d0_2];[d0_0,d0_2]");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_2];[d0_0,d0_2]");
}
for (int axis : {2, -1}) {
set_axis_and_num(axis, 3);
INFER_OK(op, "[1,2,3]", "[d0_0,d0_1];[d0_0,d0_1];[d0_0,d0_1]");
INFER_OK(op, "[?,?,?]", "[d0_0,d0_1];[d0_0,d0_1];[d0_0,d0_1]");
}
set_axis_and_num(2, 2);
INFER_ERROR("Dimension must be 2 but is 3", op, "[1,2,3]");
set_axis_and_num(-4, 3);
INFER_ERROR("Invalid axis: -4; must be in [-3,3)", op, "[1,2,3]");
set_axis_and_num(3, 3);
INFER_ERROR("Invalid axis: 3; must be in [-3,3)", op, "[1,2,3]");
}
TEST(ArrayOpsTest, Const_ShapeFn) {
ShapeInferenceTestOp op("Const");
TensorProto tensor_proto;
auto* shape_proto = tensor_proto.mutable_tensor_shape();
auto rebuild_node_def = [&op, &tensor_proto]() {
TF_ASSERT_OK(NodeDefBuilder("test", "Const")
.Attr("value", tensor_proto)
.Finalize(&op.node_def));
};
TensorShape{}.AsProto(shape_proto);
rebuild_node_def();
INFER_OK(op, "", "[]");
TensorShape{1, 2, 3, 4}.AsProto(shape_proto);
rebuild_node_def();
INFER_OK(op, "", "[1,2,3,4]");
shape_proto->add_dim()->set_size(-1);
rebuild_node_def();
INFER_ERROR("Shape [1,2,3,4,?] is not fully defined", op, "");
}
TEST(ArrayOpsTest, UnchangedShapes_ShapeFn) {
for (const char* op_name : {
"CheckNumerics",
"Identity",
"RefIdentity",
"QuantizeAndDequantize",
"StopGradient",
"ZerosLike",
"OnesLike",
}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "in0");
INFER_OK(op, "[]", "in0");
INFER_OK(op, "[1,2,?,4,5]", "in0");
}
ShapeInferenceTestOp op("MatrixBandPart");
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[];?;?", "in0");
INFER_OK(op, "[1,2,?,4,5];?;?", "in0");
}
TEST(ArrayOpsTest, GuaranteeConst_ShapeFn) {
ShapeInferenceTestOp op("GuaranteeConst");
INFER_OK(op, "?", "in0");
INFER_OK(op, "[]", "in0");
INFER_OK(op, "[1,2,?,4,5]", "in0");
}
TEST(ArrayOpsTest, Identity_ShapeFnHandles) {
const char* op_name = "Identity";
ShapeInferenceTestOp op(op_name);
const OpRegistrationData* op_reg_data;
TF_ASSERT_OK(OpRegistry::Global()->LookUp(op.name, &op_reg_data));
std::vector<
std::unique_ptr<std::vector<std::pair<PartialTensorShape, DataType>>>>
handle_data;
handle_data.emplace_back(
new std::vector<std::pair<PartialTensorShape, DataType>>(
{{PartialTensorShape(), DT_BOOL}}));
shape_inference::InferenceContext c(
TF_GRAPH_DEF_VERSION, op.node_def, op_reg_data->op_def,
{PartialTensorShape()}, {}, {}, handle_data);
TF_ASSERT_OK(c.construction_status());
ASSERT_TRUE(op_reg_data->shape_inference_fn != nullptr);
TF_ASSERT_OK(c.Run(op_reg_data->shape_inference_fn));
const auto* shapes_and_types = c.output_handle_shapes_and_types(0);
ASSERT_TRUE(shapes_and_types != nullptr);
ASSERT_EQ(1, shapes_and_types->size());
EXPECT_EQ((*shapes_and_types)[0].dtype, DT_BOOL);
}
TEST(ArrayOpsTest, Diag_ShapeFn) {
ShapeInferenceTestOp op("Diag");
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,?,3]", "[d0_0,d0_1,d0_2,d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,1,2,3]", "[d0_0,d0_1,d0_2,d0_3,d0_0,d0_1,d0_2,d0_3]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
}
TEST(ArrayOpsTest, DiagPart_ShapeFn) {
ShapeInferenceTestOp op("DiagPart");
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,?,?,4]", "[d0_0,d0_3]");
INFER_OK(op, "[1,?,3,?,4,3]", "[d0_0,d0_4,d0_2|d0_5]");
INFER_OK(op, "[1,2,3,?,?,?,?,4]", "[d0_0,d0_1,d0_2,d0_7]");
INFER_ERROR("Input must have even and non-zero rank", op, "[]");
INFER_ERROR("Input must have even and non-zero rank", op, "[?]");
INFER_ERROR("Input must have even and non-zero rank", op, "[1,2,3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 10", op, "[1,2,?,10]");
}
TEST(ArrayOpsTest, MatrixDiag_ShapeFn) {
ShapeInferenceTestOp op("MatrixDiag");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]");
INFER_OK(op, "[?]", "[d0_0,d0_0]");
INFER_OK(op, "[1,?,?,4]", "[d0_0,d0_1,d0_2,d0_3,d0_3]");
}
TEST(ArrayOpsTest, MatrixDiagPart_ShapeFn) {
ShapeInferenceTestOp op("MatrixDiagPart");
INFER_OK(op, "?", "?");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[?]");
INFER_OK(op, "[?,1,2,2]", "[d0_0,d0_1,d0_2|d0_3]");
INFER_OK(op, "[?,1,2,3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[?,1,3,2]", "[d0_0,d0_1,d0_3]");
}
TEST(ArrayOpsTest, Reverse_ShapeFn) {
ShapeInferenceTestOp op("Reverse");
INFER_OK(op, "?;?", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,2]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4]");
INFER_ERROR("reverse does not work on tensors with more than 8 dimensions",
op, "[1,2,3,4,5,6,7,8,9];[9]");
INFER_OK(op, "[1,2,3,?];[4]", "in0");
INFER_OK(op, "[1,2,3,?,5,6,7,8];[8]", "in0");
}
TEST(ArrayOpsTest, ReverseV2_ShapeFn) {
ShapeInferenceTestOp op("ReverseV2");
INFER_OK(op, "?;?", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,2]");
INFER_OK(op, "[1,2,3];[2]", "in0");
INFER_ERROR("reverse does not work on tensors with more than 8 dimensions",
op, "[1,2,3,4,5,6,7,8,9];[9]");
INFER_OK(op, "[1,2,3,?];[4]", "in0");
INFER_OK(op, "[1,2,3,?,5,6,7,8];[8]", "in0");
}
TEST(ArrayOpsTest, Fill_ShapeFn) {
ShapeInferenceTestOp op("Fill");
AddNodeAttr("index_type", DT_INT32, &op.node_def);
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?];?", "?");
INFER_OK(op, "[4];?", "[?,?,?,?]");
Tensor in_t = test::AsTensor<int32>({1, 2, 3, 4});
op.input_tensors[0] = &in_t;
INFER_OK(op, "[4];?", "[1,2,3,4]");
}
TEST(ArrayOpsTest, Gather_ShapeFn) {
ShapeInferenceTestOp op("Gather");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,?,2];[3]", "[d1_0,d0_1,d0_2]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[1,2,3]");
}
TEST(ArrayOpsTest, GatherV2_ShapeFn) {
ShapeInferenceTestOp op("GatherV2");
AddNodeAttr("batch_dims", 0, &op.node_def);
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[1,2,3];[3];[]", "[?,?,?]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op,
"[];[1,2,3];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];[1,2,3];[1]");
Tensor axis_dim_t;
op.input_tensors.resize(3);
op.input_tensors[2] = &axis_dim_t;
axis_dim_t = test::AsScalar(1);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[1];[1,2];[]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[];[]", "[d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[];[]", "[d0_0,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[];[]", "[d0_0,d0_1]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[5];[]", "[d1_0,d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[5];[]", "[d0_0,d1_0,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[5];[]", "[d0_0,d0_1,d1_0]");
axis_dim_t = test::AsScalar(0);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d1_0,d1_1,d0_1,d0_2]");
axis_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d1_0,d1_1,d0_2]");
axis_dim_t = test::AsScalar(2);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d0_1,d1_0,d1_1]");
axis_dim_t = test::AsScalar(-3);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d1_0,d1_1,d0_1,d0_2]");
axis_dim_t = test::AsScalar(-2);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d1_0,d1_1,d0_2]");
axis_dim_t = test::AsScalar(-1);
INFER_OK(op, "[1,2,3];[5,6];[]", "[d0_0,d0_1,d1_0,d1_1]");
ShapeInferenceTestOp batch_op("GatherV2");
AddNodeAttr("batch_dims", 1, &batch_op.node_def);
INFER_OK(batch_op, "[1,4800,8];[1,28400];[]", "[?,?,?]");
ShapeInferenceTestOp batch_op_2("GatherV2");
AddNodeAttr("batch_dims", 2, &batch_op_2.node_def);
INFER_OK(batch_op_2, "[1,2,3,4,5];[1,2,3];[]", "[?,?,?,?,?]");
}
TEST(ArrayOpsTest, GatherNd_ShapeFn) {
ShapeInferenceTestOp op("GatherNd");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[1,?,3,?];[?,0]", "[d1_0,d0_0,d0_1,d0_2,d0_3]");
INFER_OK(op, "[1,?,3,?];[?,4]", "[d1_0]");
INFER_ERROR("indices.shape[-1] must be <= params.rank", op, "[1,2,3];[4]");
}
TEST(ArrayOpsTest, Shape_ShapeFn) {
ShapeInferenceTestOp op("Shape");
AddNodeAttr("out_type", DT_INT32, &op.node_def);
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[?]", "[1]");
INFER_OK(op, "[?,2,3,4,5]", "[5]");
}
static Status type_inference(Graph& graph) {
GraphOptimizationPassOptions opt_options;
std::unique_ptr<Graph> graph_ptr(new Graph(OpRegistry::Global()));
graph_ptr->Copy(graph);
opt_options.graph = &graph_ptr;
opt_options.flib_def = graph.mutable_flib_def();
TypeInferencePass pass;
return pass.Run(opt_options);
}
REGISTER_OP("ArrayOpsTest>ConstTypeCtor")
.Output("output: dtype")
.Attr("value: tensor")
.Attr("dtype: type")
.SetTypeConstructor(full_type::Unary(TFT_TENSOR, "dtype"))
.SetShapeFn(shape_inference::UnknownShape);
TEST(ArrayOpsTest, Shape_TypeCtor) {
Graph graph(OpRegistry::Global());
Node* input_tensor_op;
TensorProto tensor_proto;
TF_EXPECT_OK(NodeBuilder("input_tensor_op", "ArrayOpsTest>ConstTypeCtor")
.Attr("value", tensor_proto)
.Attr("dtype", DT_FLOAT)
.Finalize(&graph, &input_tensor_op));
Node* shape_op;
TF_EXPECT_OK(NodeBuilder("shape_op", "Shape")
.Input(input_tensor_op)
.Attr("T", DT_FLOAT)
.Attr("out_type", DT_INT32)
.Finalize(&graph, &shape_op));
TF_EXPECT_OK(type_inference(graph));
FullTypeDef expected_shape_op_t;
protobuf::TextFormat::Parser parser;
CHECK(parser.ParseFromString(
R"pb(type_id: TFT_PRODUCT
args {
type_id: TFT_SHAPE_TENSOR
args { type_id: TFT_INT32 }
})pb",
&expected_shape_op_t));
EXPECT_TRUE(full_type::IsEqual(shape_op->def().experimental_type(),
expected_shape_op_t))
<< "fulltype is\n"
<< shape_op->def().experimental_type().DebugString() << "\nexpected\n"
<< expected_shape_op_t.DebugString();
}
TEST(ArrayOpsTest, ShapeN_ShapeFn) {
ShapeInferenceTestOp op("ShapeN");
int n = 3;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "ShapeN")
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "[?];[?];[?]");
INFER_OK(op, "[?];[?];[?]", "[1];[1];[1]");
INFER_OK(op, "[?,2,3,4,5];?;[1,?,3]", "[5];[?];[3]");
}
TEST(ArrayOpsTest, Unique_ShapeFn) {
ShapeInferenceTestOp op("Unique");
INFER_OK(op, "?", "[?];in0");
INFER_OK(op, "[5]", "[?];in0");
INFER_ERROR("Shape must be rank 1 but is rank 5", op, "[1,2,3,?,5]");
}
TEST(ArrayOpsTest, UniqueWithCounts_ShapeFn) {
ShapeInferenceTestOp op("UniqueWithCounts");
INFER_OK(op, "?", "[?];in0;[?]");
INFER_OK(op, "[1,2,3,?,5]", "[?];in0;[?]");
}
TEST(ArrayOpsTest, InvertPermutation_ShapeFn) {
ShapeInferenceTestOp op("InvertPermutation");
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[1]", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(ArrayOpsTest, PadD_ShapeFn) {
for (const char* op_name : {"Pad", "MirrorPad"}) {
ShapeInferenceTestOp op(op_name);
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3]");
INFER_ERROR("Dimension must be 2 but is 4", op, "?;[1,4]");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4,2]");
INFER_OK(op, "[1,2,3];?", "[?,?,?]");
INFER_OK(op, "?;[3,2]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[100,200,300];[3,2]", "[111,222,333]");
INFER_OK(op, "[100,?,300];[3,2]", "[111,?,333]");
INFER_OK(op, "?;[3,2]", "[?,?,?]");
INFER_OK(op, "?;?", "[?,?,?]");
}
}
TEST(ArrayOpsTest, PadV2_ShapeFn) {
ShapeInferenceTestOp op("PadV2");
op.input_tensors.resize(3);
INFER_OK(op, "?;?;?", "?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;[1,2,3];?");
INFER_ERROR("Dimension must be 2 but is 4", op, "?;[1,4];?");
INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];[4,2];[]");
INFER_OK(op, "[1,2,3];?;[]", "[?,?,?]");
INFER_OK(op, "?;[3,2];[]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[100,200,300];[3,2];[]", "[111,222,333]");
INFER_OK(op, "[100,?,300];[3,2];[]", "[111,?,333]");
INFER_OK(op, "?;[3,2];[]", "[?,?,?]");
INFER_OK(op, "?;?;[]", "[?,?,?]");
}
TEST(ArrayOpsTest, MirrorPadGrad_ShapeFn) {
ShapeInferenceTestOp op("MirrorPadGrad");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[?,4]", "?");
INFER_ERROR("must be rank 3 but is rank 2", op, "[?,?];[3,2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 3 and 2", op,
"[?,?,?];[3,3]");
INFER_OK(op, "[?,?,?];[3,2]", "[?,?,?]");
Tensor paddings_t(DT_INT64, TensorShape{3, 2});
test::FillValues<int64_t>(&paddings_t, {1, 10, 2, 20, 3, 30});
op.input_tensors[1] = &paddings_t;
INFER_OK(op, "[111,222,333];[3,2]", "[100,200,300]");
INFER_OK(op, "[111,?,333];[3,2]", "[100,?,300]");
}
TEST(ArrayOpsTest, BroadcastArgs_ShapeFn) {
ShapeInferenceTestOp op("BroadcastArgs");
INFER_OK(op, "?;?", "[?]");
INFER_OK(op, "[123];[1]", "[123]");
INFER_OK(op, "[1];[123]", "[123]");
INFER_OK(op, "[123];[121]", "[123]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, BroadcastTo_ShapeFn) {
ShapeInferenceTestOp op("BroadcastTo");
op.input_tensors.resize(2);
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[];[1]", "[?]");
INFER_OK(op, "[1];[1]", "[?]");
INFER_OK(op, "[1];[2]", "[?,?]");
INFER_OK(op, "[2,2];[3]", "[?,d0_0,d0_1]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[2,2];[1]");
Tensor shape_t(DT_INT64, TensorShape{3});
test::FillValues<int64_t>(&shape_t, {2, 10, 3});
op.input_tensors[1] = &shape_t;
INFER_OK(op, "[1,?,1];[3]", "[2,10,3]");
INFER_OK(op, "[1,1,1];[3]", "[2,10,3]");
INFER_OK(op, "[10,1];[3]", "[2,d0_0,3]");
INFER_ERROR("Dimensions must be equal, but are 3 and 2 for", op,
"[3,1,1];[3]");
INFER_ERROR("Dimensions must be equal, but are 2 and 10 for", op,
"[2,2,1];[3]");
}
TEST(ArrayOpsTest, BroadcastGradientArgs_ShapeFn) {
ShapeInferenceTestOp op("BroadcastGradientArgs");
INFER_OK(op, "?;?", "[?];[?]");
INFER_OK(op, "[123];[456]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, ListDiff_ShapeFn) {
ShapeInferenceTestOp op("BroadcastGradientArgs");
INFER_OK(op, "?;?", "[?];[?]");
INFER_OK(op, "[123];[456]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]");
}
TEST(ArrayOpsTest, MatrixSetDiag_ShapeFn) {
ShapeInferenceTestOp op("MatrixSetDiag");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op, "[1];?");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "?;[]");
INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[2,2];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,2];[2,2]");
INFER_ERROR("Dimensions must be equal, but are 2 and 3", op, "[2,3];[3]");
INFER_OK(op, "?;?", "in0");
INFER_OK(op, "[1,2,2];[1,2]", "in0");
INFER_OK(op, "[1,2,3];?", "in0");
INFER_OK(op, "[1,3,2];?", "in0");
INFER_OK(op, "[1,?,2];[?,?]", "in0");
INFER_OK(op, "[1,?,?];[?,2]", "in0");
INFER_OK(op, "?;[1,2]", "[d1_0,?,?]");
INFER_OK(op, "[?,?,3];[1,2]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,?];[1,2]", "[d1_0,d0_1,d0_2]");
INFER_OK(op, "[?,3,2];[1,2]", "[d1_0,d0_1,d0_2]");
}
TEST(ArrayOpsTest, ExpandDims_ShapeFn) {
ShapeInferenceTestOp op("ExpandDims");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
Tensor dim_t;
op.input_tensors[1] = &dim_t;
for (int32_t idx : {0, -4}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[1,d0_0,d0_1,d0_2]");
}
for (int32_t idx : {1, -3}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,1,d0_1,d0_2]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,1,d0_1,d0_2]");
}
for (int32_t idx : {2, -2}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,1,d0_2]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,1,d0_2]");
}
for (int32_t idx : {3, -1}) {
dim_t = test::AsScalar<int32>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,d0_2,1]");
dim_t = test::AsScalar<int64_t>(idx);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[d0_0,d0_1,d0_2,1]");
}
for (int32_t idx : {4, -5}) {
dim_t = test::AsScalar<int32>(idx);
INFER_ERROR("not in the interval [-4, 3]", op, "[5,?,7];?");
dim_t = test::AsScalar<int64_t>(idx);
INFER_ERROR("not in the interval [-4, 3]", op, "[5,?,7];?");
}
std::vector<int32> dims;
dims.push_back(0);
dim_t = test::AsTensor<int32>(dims);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[5,?,7];?", "[1,d0_0,d0_1,d0_2]");
dims.push_back(1);
dim_t = test::AsTensor<int32>(dims);
INFER_ERROR("'dim' input must be a tensor with a single", op, "?;?");
INFER_ERROR("'dim' input must be a tensor with a single", op, "[5,6,7];?");
dim_t = test::AsScalar<int32>(0);
INFER_OK(op, "[2];[]", "[1,d0_0]");
dim_t = test::AsScalar<int32>(1);
INFER_OK(op, "[2];[]", "[d0_0,1]");
dim_t = test::AsScalar<int32>(-1);
INFER_OK(op, "[2];[]", "[d0_0,1]");
}
TEST(ArrayOpsTest, ImmutableConst_ShapeFn) {
ShapeInferenceTestOp op("ImmutableConst");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({1, 2, 3}))
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", TensorShape({}))
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_OK(op, "", "[]");
TF_ASSERT_OK(NodeDefBuilder("test", "ImmutableConst")
.Attr("dtype", DT_FLOAT)
.Attr("shape", "invalid")
.Attr("memory_region_name", "test_region")
.Finalize(&op.node_def));
INFER_ERROR("AttrValue had value with type 'string' when 'shape' expected",
op, "");
}
TEST(ArrayOpsTest, Concat_ShapeFn) {
ShapeInferenceTestOp op("Concat");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "Concat")
.Input({"concat_dim", 0, DT_INT32})
.Input(src_list)
.Attr("n", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?;?");
set_n(7);
INFER_OK(op, "?;?;?;?;[1,2,3];?;[3,2,1];?", "[?,?,?]");
set_n(4);
INFER_OK(op, "?;?;?;[1,2,3,4];[4,3,2,1]", "[?,?,?,?]");
INFER_OK(op, "?;?;?;?;?", "?");
INFER_ERROR("Can't concatenate scalars (use tf.stack instead)", op,
"?;?;?;[];[]");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;?;[1,2];[1,2,3]");
Tensor concat_dim_t;
op.input_tensors.push_back(&concat_dim_t);
set_n(2);
for (int concat_dim : {0, -3}) {
concat_dim_t = test::AsScalar(concat_dim);
INFER_OK(op, "[];[100,2,?];[10,?,3]", "[110,d1_1,d2_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[];[100,2,5];[10,?,3]");
INFER_OK(op, "[];[100,2,?];[?,?,3]", "[?,d1_1,d2_2]");
INFER_OK(op, "[];[?,2,?];[10,?,3]", "[?,d1_1,d2_2]");
}
for (bool use_negative : {false, true}) {
concat_dim_t = test::AsScalar(use_negative ? -2 : 1);
INFER_OK(op, "[];[1,100,?];[?,10,3]", "[d1_0,110,d2_2]");
concat_dim_t = test::AsScalar(use_negative ? -1 : 1);
INFER_OK(op, "[];[1,100];[?,10]", "[d1_0,110]");
INFER_OK(op, "[];[?,100];[1,10]", "[d2_0,110]");
concat_dim_t = test::AsScalar(use_negative ? -2 : 1);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100];[10,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100,5];[10]");
}
concat_dim_t = test::AsScalar(-2);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100];[10,?]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[];[100,5];[10]");
set_n(5);
concat_dim_t = test::AsScalar(1);
INFER_OK(op, "[];?;[1,100,?];[?,?,?];[?,10,3];?", "[d2_0,?,d4_2]");
}
TEST(ArrayOpsTest, ConcatV2_ShapeFn) {
ShapeInferenceTestOp op("ConcatV2");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
TF_ASSERT_OK(NodeDefBuilder("test", "ConcatV2")
.Input(src_list)
.Input({"axis", 0, DT_INT32})
.Attr("n", n)
.Finalize(&op.node_def));
};
set_n(2);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;?;[1]");
set_n(7);
INFER_OK(op, "?;?;?;?;[1,2,3];?;[3,2,1];?", "[?,?,?]");
set_n(4);
INFER_OK(op, "?;?;[1,2,3,4];[4,3,2,1];?", "[?,?,?,?]");
INFER_OK(op, "?;?;?;?;?", "?");
INFER_ERROR("Can't concatenate scalars (use tf.stack instead)", op,
"?;?;[];[];?");
INFER_ERROR("Shape must be rank 2 but is rank 3", op, "?;?;[1,2];[1,2,3];?");
Tensor concat_dim_t;
op.input_tensors.resize(3);
op.input_tensors[2] = &concat_dim_t;
set_n(2);
concat_dim_t = test::AsScalar(0);
INFER_OK(op, "[100,2,?];[10,?,3];[]", "[110,d0_1,d1_2]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[100,2,5];[10,?,3];[]");
INFER_OK(op, "[100,2,?];[?,?,3];[]", "[?,d0_1,d1_2]");
INFER_OK(op, "[?,2,?];[10,?,3];[]", "[?,d0_1,d1_2]");
concat_dim_t = test::AsScalar(1);
INFER_OK(op, "[1,100,?];[?,10,3];[]", "[d0_0,110,d1_2]");
INFER_OK(op, "[1,100];[?,10];[]", "[d0_0,110]");
INFER_OK(op, "[?,100];[1,10];[]", "[d1_0,110]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100];[10,?];[]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100,5];[10];[]");
concat_dim_t = test::AsScalar(-2);
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100];[10,?];[]");
INFER_ERROR("Shape must be at least rank 2 but is rank 1", op,
"[100,5];[10];[]");
op.input_tensors.resize(6);
op.input_tensors[3] = nullptr;
op.input_tensors[5] = &concat_dim_t;
concat_dim_t = test::AsScalar(1);
set_n(5);
INFER_OK(op, "?;[1,100,?];[?,?,?];[?,10,3];?;[]", "[d1_0,?,d3_2]");
}
TEST(ArrayOpsTest, ConcatOffset_ShapeFn) {
ShapeInferenceTestOp op("ConcatOffset");
const int n = 4;
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_INT32);
TF_ASSERT_OK(NodeDefBuilder("test", "ConcatOffset")
.Input({"concat_dim", 0, DT_INT32})
.Input(src_list)
.Attr("n", n)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?;?", "in1;in2;in3;in4");
}
TEST(ArrayOpsTest, Reshape_ShapeFn) {
ShapeInferenceTestOp op("Reshape");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[?];?", "?");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "[?];[?]", "?");
INFER_OK(op, "[4];[?]", "?");
Tensor new_shape = test::AsTensor<int32>({1, 2, 3});
op.input_tensors[1] = &new_shape;
INFER_OK(op, "?;[3]", "[1,2,3]");
INFER_OK(op, "[?];[3]", "[1,2,3]");
INFER_OK(op, "[6];[3]", "[1,2,3]");
INFER_ERROR(
"Cannot reshape a tensor with 12 elements to shape [1,2,3] (6 elements)",
op, "[3,4];[3]");
new_shape = test::AsTensor<int32>({-1});
INFER_OK(op, "?;[1]", "[?]");
INFER_OK(op, "[?];[1]", "[d0_0]");
INFER_OK(op, "[2,2];[1]", "[4]");
new_shape = test::AsTensor<int32>({2, -1});
INFER_OK(op, "[3,4];[2]", "[2,6]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 7", op,
"[7];[2]");
new_shape = test::AsTensor<int32>({-1, -1, 2});
INFER_OK(op, "[8];[3]", "[?,?,2]");
INFER_OK(op, "?;[3]", "[?,?,2]");
new_shape = test::AsTensor<int32>({-1, 2, 3});
INFER_OK(op, "[?,2,3];[3]", "[d0_0,2,3]");
new_shape = test::AsTensor<int32>({});
INFER_OK(op, "[1];[0]", "[]");
INFER_ERROR(
"Cannot reshape a tensor with 2 elements to shape [] (1 elements)", op,
"[1,2];[0]");
new_shape = test::AsTensor<int32>({-1});
INFER_OK(op, "[0];[1]", "[0]");
new_shape = test::AsTensor<int32>({-1, 6});
INFER_OK(op, "[0,2];[1]", "[0,6]");
new_shape = test::AsTensor<int32>({0, -1});
INFER_OK(op, "[0,2];[1]", "[0,?]");
}
TEST(ArrayOpsTest, QuantizedReshape_ShapeFn) {
ShapeInferenceTestOp op("QuantizedReshape");
op.input_tensors.resize(2);
INFER_OK(op, "?;?;?;?", "?;[];[]");
INFER_OK(op, "[?];?;?;?", "?;[];[]");
INFER_OK(op, "[?];[?];?;?", "?;[];[]");
INFER_OK(op, "[4];[?];?;?", "?;[];[]");
Tensor new_shape = test::AsTensor<int32>({1, 2, 3});
op.input_tensors[1] = &new_shape;
INFER_OK(op, "[?];[3];?;?", "[1,2,3];[];[]");
INFER_OK(op, "[6];[3];?;?", "[1,2,3];[];[]");
INFER_ERROR(
"Cannot reshape a tensor with 12 elements to shape [1,2,3] (6 elements)",
op, "[3,4];[3];?;?");
INFER_ERROR("must be rank 0", op, "?;?;[1];?");
INFER_ERROR("must be rank 0", op, "?;?;?;[1]");
}
TEST(ArrayOpsTest, Placeholder_ShapeFn) {
{
ShapeInferenceTestOp op("Placeholder");
TensorShape shape({1, 2});
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2]");
}
{
ShapeInferenceTestOp op("Placeholder");
TensorShape shape({});
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[]");
}
{
ShapeInferenceTestOp op("Placeholder");
const int64_t dims[2] = {1, -1};
PartialTensorShape shape;
TF_ASSERT_OK(PartialTensorShape::MakePartialShape(dims, 2, &shape));
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,?]");
}
{
ShapeInferenceTestOp op("Placeholder");
PartialTensorShape shape;
TF_ASSERT_OK(NodeDefBuilder("test", "Placeholder")
.Attr("shape", shape)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "?");
}
}
TEST(ArrayOpsTest, Transpose_ShapeFn) {
ShapeInferenceTestOp op("Transpose");
op.input_tensors.resize(2);
INFER_OK(op, "?;?", "?");
INFER_OK(op, "?;[?]", "?");
INFER_OK(op, "?;[2]", "[?,?]");
INFER_OK(op, "[?];?", "[?]");
INFER_OK(op, "[?,?];[2]", "[?,?]");
INFER_ERROR("Dimension must be 3 but is 2", op, "[1,2,3];[2]");
Tensor perm = test::AsTensor<int32>({0});
op.input_tensors[1] = &perm;
INFER_OK(op, "[?];[?]", "[d0_0]");
perm = test::AsTensor<int32>({1, 0});
INFER_OK(op, "?;[2]", "[?,?]");
INFER_OK(op, "[?,?];[2]", "[d0_1,d0_0]");
INFER_OK(op, "[1,?];[2]", "[d0_1,d0_0]");
INFER_OK(op, "?;[0]", "in0");
perm = test::AsTensor<int32>({1, 2});
INFER_ERROR("perm dim 2 is out of range of input rank 2", op, "[1,2];[2]");
perm = test::AsTensor<int32>({0});
INFER_ERROR("Dimension must be 2 but is 1", op, "[1,2];[1]");
perm = test::AsTensor<int32>({1, 0, 3, 4, 2});
INFER_OK(op, "[0,1,2,3,4];[5]", "[d0_1,d0_0,d0_3,d0_4,d0_2]");
INFER_OK(op, "[0,?,2,3,4];[5]", "[d0_1,d0_0,d0_3,d0_4,d0_2]");
}
TEST(ArrayOpsTest, Bitcast_ShapeFn) {
ShapeInferenceTestOp op("Bitcast");
auto rebuild_node_def = [&op](DataType input_type, DataType output_type) {
TF_ASSERT_OK(NodeDefBuilder("test", "Bitcast")
.Input("input", 0, input_type)
.Attr("type", output_type)
.Finalize(&op.node_def));
};
rebuild_node_def(DT_FLOAT, DT_INT32);
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,2]", "in0");
rebuild_node_def(DT_INT32, DT_INT64);
INFER_OK(op, "[1,2]", "[d0_0]");
INFER_OK(op, "[1,?]", "[d0_0]");
INFER_ERROR("does not match", op, "[1,4]");
INFER_ERROR("does not match", op, "[1,3]");
rebuild_node_def(DT_INT64, DT_INT32);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,2]");
rebuild_node_def(DT_COMPLEX128, DT_INT32);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,4]");
rebuild_node_def(DT_COMPLEX128, DT_HALF);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,8]");
rebuild_node_def(DT_COMPLEX128, DT_INT8);
INFER_OK(op, "[4,5]", "[d0_0,d0_1,16]");
rebuild_node_def(DT_STRING, DT_INT32);
INFER_ERROR("one of the type sizes is zero", op, "[1,2,3]");
rebuild_node_def(DT_INT32, DT_STRING);
INFER_ERROR("one of the type sizes is zero", op, "[1,2,3]");
}
TEST(ArrayOpsTest, Squeeze_ShapeFn) {
ShapeInferenceTestOp op("Squeeze");
auto rebuild_node_def = [&op](const std::vector<int32>& squeeze_dims) {
TF_ASSERT_OK(NodeDefBuilder("test", "Squeeze")
.Input("input", 0, DT_FLOAT)
.Attr("squeeze_dims", squeeze_dims)
.Finalize(&op.node_def));
};
rebuild_node_def({});
INFER_OK(op, "?", "?");
INFER_OK(op, "[1,4,1,5,1]", "[d0_1,d0_3]");
INFER_OK(op, "[1,?,1,?,1]", "?");
rebuild_node_def({1});
INFER_OK(op, "[4,1,5]", "[d0_0,d0_2]");
INFER_OK(op, "[4,?,5]", "[d0_0,d0_2]");
INFER_ERROR("Can not squeeze dim[1]", op, "[4,6,5]");
rebuild_node_def({1, 2});
INFER_OK(op, "[4,1,1,5]", "[d0_0,d0_3]");
rebuild_node_def({1, -2});
INFER_OK(op, "[4,1,1,5]", "[d0_0,d0_3]");
rebuild_node_def({-2});
INFER_OK(op, "[4,1,5]", "[d0_0,d0_2]");
rebuild_node_def({-4});
INFER_ERROR("not in [-3,3)", op, "[1,2,3]");
rebuild_node_def({3});
INFER_ERROR("not in [-3,3)", op, "[1,2,3]");
}
TEST(ArrayOpsTest, ReverseSequence_ShapeFn) {
ShapeInferenceTestOp op("ReverseSequence");
auto rebuild_node_def = [&op](const int32_t seq_dim,
const int32_t batch_dim) {
TF_ASSERT_OK(NodeDefBuilder("test", "ReverseSequence")
.Input("input", 0, DT_FLOAT)
.Input("seq_lengths", 1, DT_INT64)
.Attr("seq_dim", seq_dim)
.Attr("batch_dim", batch_dim)
.Finalize(&op.node_def));
};
rebuild_node_def(1, 2);
INFER_OK(op, "?;[10]", "?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[10,10]");
rebuild_node_def(1, 4);
INFER_ERROR("batch_dim must be < input rank", op, "[1,2,3];[3]");
rebuild_node_def(4, 1);
INFER_ERROR("seq_dim must be < input rank", op, "[1,2,3];[3]");
rebuild_node_def(1, 2);
INFER_OK(op, "[1,2,3];[3]", "[d0_0,d0_1,d0_2]");
INFER_OK(op, "[1,2,?];[3]", "[d0_0,d0_1,d1_0]");
INFER_OK(op, "[1,2,3];[?]", "[d0_0,d0_1,d0_2]");
}
TEST(ArrayOpsTest, Split_ShapeFn) {
ShapeInferenceTestOp op("Split");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "Split")
.Input("split_dim", 0, DT_INT32)
.Input("value", 1, DT_FLOAT)
.Attr("num_split", 2)
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?;?");
INFER_OK(op, "?;[?,?]", "[?,?];[?,?]");
INFER_OK(op, "?;[1,4]", "[?,?];[?,?]");
Tensor split_dim = test::AsTensor<int32>({1, 2});
op.input_tensors[0] = &split_dim;
INFER_ERROR("Input must be scalar but has rank 1", op, "[?];[?,?]");
split_dim = test::AsScalar<int32>(1);
INFER_OK(op, "?;?", "?;?");
INFER_OK(op, "?;[?,?]", "[d1_0,?];[d1_0,?]");
INFER_OK(op, "?;[1,4]", "[d1_0,2];[d1_0,2]");
INFER_OK(op, "?;[1,?]", "[d1_0,?];[d1_0,?]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 5", op,
"?;[1,5]");
split_dim = test::AsScalar<int32>(3);
INFER_ERROR(
"Dimension size, given by scalar input 3 must be in range [-3, 3)", op,
"?;[1,4,8]");
split_dim = test::AsScalar<int32>(-1);
INFER_OK(op, "?;?", "?;?");
INFER_OK(op, "?;[?,?]", "[d1_0,?];[d1_0,?]");
INFER_OK(op, "?;[1,?]", "[d1_0,?];[d1_0,?]");
INFER_OK(op, "?;[1,4]", "[d1_0,2];[d1_0,2]");
INFER_OK(op, "?;[1,4,8]", "[d1_0,d1_1,4];[d1_0,d1_1,4]");
split_dim = test::AsScalar<int32>(-2);
INFER_OK(op, "?;[1,4,8]", "[d1_0,2,d1_2];[d1_0,2,d1_2]");
split_dim = test::AsScalar<int32>(-4);
INFER_ERROR(
"Dimension size, given by scalar input -4 must be in range [-3, 3)", op,
"?;[1,4,8]");
}
TEST(ArrayOpsTest, Tile_ShapeFn) {
ShapeInferenceTestOp op("Tile");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "Tile")
.Input("input", 0, DT_FLOAT)
.Input("multiples", 1, DT_INT32)
.Finalize(&op.node_def));
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[2,3,1,4];?", "[?,?,?,?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,3,1,4];[4,1]");
INFER_OK(op, "?;[4]", "[?,?,?,?]");
Tensor multiples = test::AsTensor<int32>({2, 3, 4, 5});
op.input_tensors[1] = &multiples;
INFER_OK(op, "[2,3,1,4];[4]", "[4,9,4,20]");
multiples = test::AsTensor<int64_t>({2, 3, 4, 5});
INFER_OK(op, "[2,3,1,4];[4]", "[4,9,4,20]");
}
TEST(ArrayOpsTest, EditDistance_ShapeFn) {
ShapeInferenceTestOp op("EditDistance");
op.input_tensors.resize(6);
INFER_OK(op, "[?,?];[?];[4];[?,?];[?];[4]", "?");
Tensor hypothesis_shape = test::AsTensor<int64_t>({2, 30, 4, 50});
op.input_tensors[2] = &hypothesis_shape;
Tensor truth_shape = test::AsTensor<int64_t>({20, 3, 40, 5});
op.input_tensors[5] = &truth_shape;
INFER_OK(op, "[?,?];[?];[4];[?,?];[?];[4]", "[20,30,40]");
hypothesis_shape = test::AsTensor<int64_t>({2});
op.input_tensors[2] = &hypothesis_shape;
INFER_ERROR("Num elements of hypothesis_shape does not match truth_shape", op,
"[?,?];[?];[1];[?,?];[?];[4]");
}
TEST(ArrayOpsTest, OneHot_ShapeFn) {
ShapeInferenceTestOp op("OneHot");
op.input_tensors.resize(4);
auto set_axis = [&op](int axis) {
TF_ASSERT_OK(NodeDefBuilder("test", "OneHot")
.Input("indices", 0, DT_FLOAT)
.Input("depth", 1, DT_INT32)
.Input("on_value", 2, DT_FLOAT)
.Input("off_value", 3, DT_FLOAT)
.Attr("axis", axis)
.Finalize(&op.node_def));
};
set_axis(-2);
INFER_ERROR("axis must be >= -1", op, "?;?;?;?");
set_axis(1);
INFER_OK(op, "?;[];?;?", "?");
Tensor depth = test::AsTensor<int32>({1, 2});
op.input_tensors[1] = &depth;
INFER_ERROR("Input must be scalar but has rank 1", op, "?;[2];?;?");
depth = test::AsScalar<int32>(2);
INFER_OK(op, "[1,3,4];[];?;?", "[d0_0,2,d0_1,d0_2]");
set_axis(-1);
INFER_OK(op, "[1,3,4];[];?;?", "[d0_0,d0_1,d0_2,2]");
}
TEST(ArrayOpsTest, ExtractImagePatchesShapeTest) {
ShapeInferenceTestOp op("ExtractImagePatches");
auto set_op = [&op](const std::vector<int32>& ksizes,
const std::vector<int32>& strides,
const std::vector<int32>& rates, const string& padding) {
TF_ASSERT_OK(NodeDefBuilder("test", "ExtractImagePatches")
.Input("input", 0, DT_FLOAT)
.Attr("ksizes", ksizes)
.Attr("strides", strides)
.Attr("rates", rates)
.Attr("padding", padding)
.Finalize(&op.node_def));
};
set_op({1, 2, 2, 1}, {1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_OK(op, "[1,7,7,2]", "[d0_0,5,5,8]");
set_op({1, 1, 1, 1}, {1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_OK(op, "[1,7,7,2]", "[d0_0,7,7,d0_3]");
set_op({1, 2, 2, 1, 1}, {1, 1, 1, 1}, {1, 2, 2, 1}, "VALID");
INFER_ERROR(
"ExtractImagePatches requires the ksizes attribute to contain 4 values, "
"but got: 5",
op, "[1,7,7,2]");
}
TEST(ArrayOpsTest, QuantizeAndDequantizeV2_ShapeFn) {
ShapeInferenceTestOp op("QuantizeAndDequantizeV2");
op.input_tensors.resize(3);
TF_ASSERT_OK(NodeDefBuilder("test", "QuantizeAndDequantizeV2")
.Input("input", 0, DT_FLOAT)
.Input("input_min", 1, DT_FLOAT)
.Input("input_max", 2, DT_FLOAT)
.Attr("signed_input", true)
.Attr("num_bits", 8)
.Attr("range_given", false)
.Attr("narrow_range", false)
.Attr("axis", -1)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[];?;?", "in0");
INFER_OK(op, "[1,2,?,4,5];?;?", "in0");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1,2,?,4,5];[1];[]");
INFER_ERROR("Shapes must be equal rank, but are 1 and 0", op,
"[1,2,?,4,5];[];[1]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1,2,?,4,5];[1];[1]");
(*op.node_def.mutable_attr())["axis"].set_i(-2);
INFER_ERROR("axis should be at least -1, got -2", op, "?;?;?");
}
TEST(ArrayOpsTest, SpaceToBatch_ShapeFn) {
ShapeInferenceTestOp op("SpaceToBatch");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "SpaceToBatch")
.Input("input", 0, DT_FLOAT)
.Input("paddings", 1, DT_INT32)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[1,10,10,3];[2,2]", "[4,?,?,d0_3]");
INFER_OK(op, "[1,10,10,3];?", "[4,?,?,d0_3]");
INFER_ERROR("rank", op, "[1,10,10,3];[4]");
INFER_ERROR("3 and 2", op, "[1,10,10,3];[2,3]");
Tensor paddings = test::AsTensor<int32>({4, 2, 2, 4}, {{2, 2}});
op.input_tensors[1] = &paddings;
INFER_OK(op, "[1,10,10,3];[2,2]", "[4,8,8,d0_3]");
paddings = test::AsTensor<int64_t>({4, 2, 2, 4}, {{2, 2}});
INFER_OK(op, "[1,10,10,3];[2,2]", "[4,8,8,d0_3]");
paddings = test::AsTensor<int32>({1, 2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &paddings;
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 13", op,
"[1,10,10,3];[2,2]");
paddings = test::AsTensor<int32>({1, -2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &paddings;
INFER_ERROR("cannot be negative", op, "[1,10,10,3];[2,2]");
}
TEST(ArrayOpsTest, SpaceToBatchND_ShapeFn) {
ShapeInferenceTestOp op("SpaceToBatchND");
op.input_tensors.resize(3);
TF_ASSERT_OK(NodeDefBuilder("test", "SpaceToBatchND")
.Input("input", 0, DT_FLOAT)
.Input("block_shape", 1, DT_INT32)
.Input("paddings", 2, DT_INT32)
.Finalize(&op.node_def));
INFER_OK(op, "?;[2];?", "?");
INFER_OK(op, "[?,?,?,?];[2];?", "[?,?,?,d0_3]");
INFER_OK(op, "[?,?,?,2];[2];?", "[?,?,?,d0_3]");
{
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_OK(op, "[3,?,?,2];[2];?", "[18,?,?,d0_3]");
{
Tensor paddings = test::AsTensor<int32>({1, 1, 0, 1}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_OK(op, "[3,?,2,2];[2];[2,2]", "[18,?,1,d0_3]");
op.input_tensors[2] = nullptr;
}
{
Tensor paddings = test::AsTensor<int32>({1, 1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_OK(op, "[3,2,3,2];[2];[2,2]", "[18,2,1,d0_3]");
op.input_tensors[2] = nullptr;
}
op.input_tensors[1] = nullptr;
}
INFER_ERROR("block_shape must have rank 1", op, "?;[1,1];?");
INFER_ERROR("block_shape must have known size", op, "?;[?];?");
{
Tensor block_shape = test::AsTensor<int32>({0, 2});
op.input_tensors[1] = &block_shape;
INFER_ERROR("block_shape must be positive", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({1, 1});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({0, -1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_ERROR("paddings cannot be negative", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({3, 3});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({0, 0, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_ERROR("divisible", op, "[1,2,3,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({});
op.input_tensors[2] = &paddings;
INFER_OK(op, "?;[0];[0,2]", "?");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
INFER_ERROR("rank", op, "[1,3,3,1];[2];[1]");
INFER_ERROR("shape", op, "[1,3,3,1];[2];[1,2]");
}
TEST(ArrayOpsTest, BatchToSpace_ShapeFn) {
ShapeInferenceTestOp op("BatchToSpace");
op.input_tensors.resize(2);
TF_ASSERT_OK(NodeDefBuilder("test", "BatchToSpace")
.Input("input", 0, DT_FLOAT)
.Input("crops", 1, DT_INT32)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[4,8,8,3];[2,2]", "[1,?,?,d0_3]");
INFER_ERROR("Dimension size must be evenly divisible by", op,
"[5,8,8,3];[2,2]");
INFER_OK(op, "[4,8,8,3];?", "[1,?,?,d0_3]");
INFER_ERROR("rank", op, "[4,8,8,3];[4]");
INFER_ERROR("3 and 2", op, "[4,8,8,3];[2,3]");
Tensor croppings = test::AsTensor<int64_t>({4, 2, 2, 4}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_OK(op, "[4,8,8,3];[2,2]", "[1,10,10,d0_3]");
croppings = test::AsTensor<int32>({100, 2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_ERROR("Negative dimension size caused by subtracting", op,
"[4,8,8,3];[2,2]");
croppings = test::AsTensor<int32>({1, 2, 3, 400}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_ERROR("Negative dimension size caused by subtracting", op,
"[4,8,8,3];[2,2]");
croppings = test::AsTensor<int32>({1, -2, 3, 4}, {{2, 2}});
op.input_tensors[1] = &croppings;
INFER_ERROR("cannot be negative", op, "[4,8,8,3];[2,2]");
}
TEST(ArrayOpsTest, BatchToSpaceND_ShapeFn) {
ShapeInferenceTestOp op("BatchToSpaceND");
op.input_tensors.resize(3);
TF_ASSERT_OK(NodeDefBuilder("test", "BatchToSpaceND")
.Input("input", 0, DT_FLOAT)
.Input("block_shape", 1, DT_INT32)
.Input("crops", 2, DT_INT32)
.Finalize(&op.node_def));
INFER_OK(op, "?;[2];?", "?");
INFER_OK(op, "[?,?,?,?];[2];?", "[?,?,?,d0_3]");
{
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_OK(op, "[?,?,?,2];[2];?", "[?,?,?,d0_3]");
INFER_OK(op, "[18,?,?,2];[2];?", "[3,?,?,d0_3]");
{
Tensor crops = test::AsTensor<int32>({1, 1, 0, 1}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_OK(op, "[18,?,2,2];[2];[2,2]", "[3,?,5,d0_3]");
op.input_tensors[2] = nullptr;
}
{
Tensor crops = test::AsTensor<int32>({1, 1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_OK(op, "[18,2,1,2];[2];[2,2]", "[3,2,3,d0_3]");
op.input_tensors[2] = nullptr;
}
op.input_tensors[1] = nullptr;
}
INFER_ERROR("block_shape must have rank 1", op, "?;[1,1];?");
INFER_ERROR("block_shape must have known size", op, "?;[?];?");
INFER_ERROR("rank", op, "[2,2];[2];[2,2]");
INFER_ERROR("rank", op, "[2,2,3];[3];[3,2]");
{
Tensor block_shape = test::AsTensor<int32>({0, 2});
op.input_tensors[1] = &block_shape;
INFER_ERROR("block_shape must be positive", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({1, 1});
op.input_tensors[1] = &block_shape;
Tensor paddings = test::AsTensor<int32>({0, -1, 0, 0}, {{2, 2}});
op.input_tensors[2] = &paddings;
INFER_ERROR("crops cannot be negative", op, "[1,2,2];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({2, 2});
op.input_tensors[1] = &block_shape;
Tensor crops = test::AsTensor<int32>({3, 2, 0, 0}, {{2, 2}});
op.input_tensors[2] = &crops;
INFER_ERROR("Negative", op, "[4,2,3,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
op.input_tensors[2] = nullptr;
}
{
Tensor block_shape = test::AsTensor<int32>({2, 3});
op.input_tensors[1] = &block_shape;
INFER_ERROR("divisible", op, "[3,1,1,1];[2];[2,2]");
op.input_tensors[1] = nullptr;
}
}
TEST(ArrayOpsTest, SpaceToDepth_ShapeFn) {
ShapeInferenceTestOp op("SpaceToDepth");
TF_ASSERT_OK(NodeDefBuilder("test", "SpaceToDepth")
.Input("input", 0, DT_FLOAT)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[1,2,4,4]", "[d0_0,1,2,16]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 3", op,
"[1,3,8,4]");
INFER_ERROR("Dimension size must be evenly divisible by 2 but is 5", op,
"[1,2,5,4]");
INFER_OK(op, "[1,2,4,?]", "[d0_0,1,2,?]");
}
TEST(ArrayOpsTest, DepthToSpace_ShapeFn) {
ShapeInferenceTestOp op("DepthToSpace");
TF_ASSERT_OK(NodeDefBuilder("test", "DepthToSpace")
.Input("input", 0, DT_FLOAT)
.Attr("block_size", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[1,1,2,16]", "[d0_0,2,4,4]");
INFER_ERROR("Dimension size must be evenly divisible by 4 but is 15", op,
"[1,1,2,15]");
INFER_OK(op, "[1,2,4,?]", "[d0_0,4,8,?]");
TF_ASSERT_OK(NodeDefBuilder("test", "DepthToSpace")
.Input("input", 0, DT_FLOAT)
.Attr("block_size", 10)
.Finalize(&op.node_def));
INFER_OK(op, "[1,1,2,200]", "[d0_0,10,20,2]");
}
TEST(ArrayOpsTest, Slice_ShapeFn) {
ShapeInferenceTestOp op("Slice");
TF_ASSERT_OK(NodeDefBuilder("test", "Slice")
.Input("input", 0, DT_FLOAT)
.Input("begin", 1, DT_INT64)
.Input("sizes", 2, DT_INT64)
.Finalize(&op.node_def));
INFER_OK(op, "[2,3,4,5];[4];[4]", "[?,?,?,?]");
INFER_OK(op, "[2,3,4,5];[?];[?]", "[?,?,?,?]");
INFER_OK(op, "?;[?];[?]", "?");
INFER_OK(op, "?;[4];[?]", "[?,?,?,?]");
INFER_ERROR("must be rank 1", op, "[2,3,4,5];[2,3];[3]");
INFER_ERROR("must be rank 1", op, "[2,3,4,5];[2];[3,4]");
INFER_ERROR("must be rank 2", op, "[2,3,4,5];[2];[2]");
op.input_tensors.resize(3);
Tensor begin = test::AsTensor<int32>({0, 1, 2, 1});
Tensor sizes = test::AsTensor<int32>({1, 2, 1, 3});
op.input_tensors[1] = &begin;
op.input_tensors[2] = &sizes;
INFER_OK(op, "[2,3,4,5];[4];[4]", "[1,2,1,3]");
sizes = test::AsTensor<int32>({-1, -1, 1, -1});
INFER_OK(op, "[2,3,4,5];[4];[4]", "[d0_0,2,1,4]");
begin = test::AsTensor<int32>({0, 1, 2, 6});
sizes = test::AsTensor<int32>({-1, -1, -1, -1});
INFER_ERROR("Negative dimension size", op, "[2,3,4,5];[4];[4]");
begin = test::AsTensor<int32>({0, 1, 2, 5});
sizes = test::AsTensor<int32>({-1, -1, -1, -2});
INFER_ERROR("cannot be < -1", op, "[2,3,4,5];[4];[4]");
}
TEST(ArrayOpsTest, StridedSlice_ShapeFn) {
ShapeInferenceTestOp op("StridedSlice");
TF_ASSERT_OK(NodeDefBuilder("test", "StridedSlice")
.Input("input", 0, DT_FLOAT)
.Input("begin", 1, DT_INT32)
.Input("end", 2, DT_INT32)
.Input("strides", 3, DT_INT32)
.Attr("shrink_axis_mask", 1)
.Finalize(&op.node_def));
op.input_tensors.resize(4);
Tensor strides = test::AsTensor<int32>({1});
op.input_tensors[3] = &strides;
INFER_OK(op, "[2,3,4,5];[1];[1];[1]", "[3,4,5]");
INFER_OK(op, "[2,0,3,4];[1];[1];[1]", "[0,3,4]");
}
TEST(ArrayOpsTest, StridedSliceGrad_ShapeFn) {
ShapeInferenceTestOp op("StridedSliceGrad");
op.input_tensors.resize(5);
INFER_OK(op, "?;?;?;?;?", "?");
INFER_OK(op, "[?];?;?;?;?", "?");
INFER_OK(op, "[4];?;?;?;?", "[?,?,?,?]");
Tensor in_t = test::AsTensor<int32>({1, 2, 3, 4});
op.input_tensors[0] = &in_t;
INFER_OK(op, "[4];?;?;?;?", "[1,2,3,4]");
}
TEST(ArrayOpsTest, UnchangedWithQuantizationScalars_ShapeFn) {
for (const char* op_name : {"Dequantize", "FakeQuantWithMinMaxVars"}) {
ShapeInferenceTestOp op(op_name);
if (op_name[0] == 'D') {
TF_ASSERT_OK(NodeDefBuilder("test", "Dequantize")
.Input("input", 0, DT_QINT8)
.Input("input_min", 1, DT_FLOAT)
.Input("input_max", 2, DT_FLOAT)
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("axis", -1)
.Finalize(&op.node_def));
}
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[1,?,3];[];[]", "in0");
INFER_ERROR("be rank 0", op, "[1,?,3];[1];[]");
INFER_ERROR("be rank 0", op, "[1,?,3];[];[1]");
}
}
TEST(ArrayOpsTest, FakeQuantWithMinMaxVarsPerChannel) {
ShapeInferenceTestOp op("FakeQuantWithMinMaxVarsPerChannel");
INFER_OK(op, "?;?;?", "in0");
INFER_OK(op, "[?];?;?", "in0");
INFER_OK(op, "[1,?,3];[3];[3]", "in0");
INFER_OK(op, "[3];[3];[3]", "in0");
INFER_ERROR("be rank 1", op, "[1,?,3];[1];[]");
INFER_ERROR("be rank 1", op, "[1,?,3];[];[1]");
INFER_ERROR("must be equal", op, "[1,?,3];[2];[?]");
INFER_ERROR("must be equal", op, "[1,?,3];[?];[2]");
INFER_ERROR("must be equal", op, "[1,?,?];[1];[2]");
INFER_ERROR("must be equal", op, "[5];[4];[?]");
}
TEST(ArrayOpsTest, FakeQuantWithMinMaxVarsPerChannelGradient) {
ShapeInferenceTestOp op("FakeQuantWithMinMaxVarsPerChannelGradient");
INFER_OK(op, "?;?;?;?", "in0;[?];[?]");
INFER_OK(op, "[3];[3];[3];[3]", "in0;in3;in3");
INFER_OK(op, "[1,3];[1,3];[3];[3]", "in0;in3;in3");
INFER_OK(op, "[1,2,3,4];[1,2,3,4];[4];[4]", "in0;in3;in3");
INFER_ERROR("be equal rank", op, "[1,?,3];[1,?,3];[3];[]");
INFER_ERROR("be rank 1", op, "[1,?,3];[1,?,3];[];[3]");
INFER_ERROR("be at least rank 1", op, "[];[];[1];[1]");
INFER_ERROR("be at most rank 4", op, "[1,2,3,4,5];[1,2,3,4,5];[1];[1]");
INFER_ERROR("must be equal", op, "[1,3];[1,3];[2];[3]");
INFER_ERROR("must be equal", op, "[1,3];[1,3];[3];[2]");
}
TEST(ArrayOpsTest, QuantizedConcat_ShapeFn) {
ShapeInferenceTestOp op("QuantizedConcat");
auto set_n = [&op](int n) {
std::vector<NodeDefBuilder::NodeOut> src_list;
std::vector<NodeDefBuilder::NodeOut> limit_list;
for (int i = 0; i < n; ++i) {
src_list.emplace_back("a", 0, DT_QUINT8);
limit_list.emplace_back("b", 0, DT_FLOAT);
}
TF_ASSERT_OK(NodeDefBuilder("test", "QuantizedConcat")
.Input({"concat_dim", 0, DT_INT32})
.Input(src_list)
.Input(limit_list)
.Input(limit_list)
.Attr("N", n)
.Finalize(&op.node_def));
};
set_n(1);
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1];?;?;?");
set_n(2);
INFER_ERROR("must be rank 0", op, "[];?;?;?;?;?;[1]");
INFER_ERROR("must be rank 0", op, "[];?;?;?;?;[1];?");
INFER_ERROR("must be rank 0", op, "[];?;?;?;[1];?;?");
INFER_ERROR("must be rank 0", op, "[];?;?;[1];?;?;?");
set_n(2);
INFER_ERROR("must be rank 2", op, "[];[1,2];[1,2,3];?;?;?;?");
INFER_OK(op, "[];[1,2];[1,3];?;?;?;?", "[?,?];[];[]");
Tensor concat_dim_t;
op.input_tensors.push_back(&concat_dim_t);
set_n(2);
concat_dim_t = test::AsScalar(0);
INFER_OK(op, "[];[100,2,?];[10,?,3];?;?;?;?", "[110,d1_1,d2_2];[];[]");
INFER_ERROR("Dimension 1 in both shapes must be equal, but are 5 and 3", op,
"[];[100,2,5];[10,?,3];?;?;?;?");
}
TEST(StateOpsTest, _ParallelConcatStart_ShapeFn) {
ShapeInferenceTestOp op("_ParallelConcatStart");
TensorShape shape({1, 2, 3});
TensorShapeProto shape_proto;
shape.AsProto(&shape_proto);
TF_ASSERT_OK(NodeDefBuilder("test", "_ParallelConcatStart")
.Attr("shape", shape_proto)
.Attr("dtype", DT_FLOAT)
.Finalize(&op.node_def));
INFER_OK(op, "", "[1,2,3]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/array_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/array_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
34ccf62e-4ef7-427f-ac2d-6abf3f4c45d8 | cpp | google/tensorstore | data_type_conversion | tensorstore/data_type_conversion.h | tensorstore/data_type_conversion_test.cc | #ifndef TENSORSTORE_DATA_TYPE_CONVERSION_H_
#define TENSORSTORE_DATA_TYPE_CONVERSION_H_
#include <array>
#include <complex>
#include <limits>
#include <type_traits>
#include "tensorstore/data_type.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
template <typename From, typename To>
struct ConvertDataType {
void operator()(const From* from, To* to, void* arg) const {
*to = static_cast<To>(*from);
}
};
template <typename From, typename To>
struct DataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags = DataTypeConversionFlags{};
};
template <typename From, typename To,
DataTypeConversionFlags AdditionalFlags = DataTypeConversionFlags{}>
constexpr inline bool IsDataTypeConversionSupported =
((DataTypeConversionTraits<From, To>::flags &
(DataTypeConversionFlags::kSupported | AdditionalFlags)) ==
(DataTypeConversionFlags::kSupported | AdditionalFlags));
template <typename From, DataTypeConversionFlags AdditionalFlags>
constexpr inline bool
IsDataTypeConversionSupported<From, void, AdditionalFlags> = true;
template <typename To, DataTypeConversionFlags AdditionalFlags>
constexpr inline bool IsDataTypeConversionSupported<void, To, AdditionalFlags> =
true;
template <typename T, DataTypeConversionFlags AdditionalFlags>
constexpr inline bool IsDataTypeConversionSupported<T, T, AdditionalFlags> =
true;
template <DataTypeConversionFlags AdditionalFlags>
constexpr inline bool
IsDataTypeConversionSupported<void, void, AdditionalFlags> = true;
namespace internal {
extern const std::array<DataTypeOperations::CanonicalConversionOperations,
kNumDataTypeIds>
canonical_data_type_conversions;
DataTypeConversionLookupResult GetDataTypeConverter(DataType from, DataType to);
Result<DataTypeConversionLookupResult> GetDataTypeConverterOrError(
DataType from, DataType to, DataTypeConversionFlags required_flags = {});
}
namespace internal_data_type {
template <typename From, typename To>
std::enable_if_t<((DataTypeConversionTraits<From, To>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kCanReinterpretCast)) ==
DataTypeConversionFlags::kSupported &&
!std::is_same_v<From, To>),
internal::ElementwiseFunction<2, void*>>
GetConvertFunction() {
return internal::SimpleElementwiseFunction<
ConvertDataType<From, To>(From, const To), void*>();
}
template <typename From, typename To>
std::enable_if_t<((DataTypeConversionTraits<From, To>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kCanReinterpretCast)) !=
DataTypeConversionFlags::kSupported ||
std::is_same_v<From, To>),
internal::ElementwiseFunction<2, void*>>
GetConvertFunction() {
return {};
}
template <typename From>
constexpr internal::DataTypeOperations::CanonicalConversionOperations
GetConvertToCanonicalOperations() {
return {
MapCanonicalDataTypes([](auto dtype) {
using X = typename decltype(dtype)::Element;
return GetConvertFunction<From, X>();
}),
MapCanonicalDataTypes([](auto dtype) {
using X = typename decltype(dtype)::Element;
return DataTypeConversionTraits<From, X>::flags;
}),
};
}
}
namespace internal_data_type {
template <typename From, typename To>
struct IntegerIntegerDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= std::numeric_limits<To>::digits &&
std::numeric_limits<From>::is_signed <=
std::numeric_limits<To>::is_signed)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{}) |
((std::numeric_limits<From>::digits +
std::numeric_limits<From>::is_signed ==
std::numeric_limits<To>::digits + std::numeric_limits<To>::is_signed)
? DataTypeConversionFlags::kCanReinterpretCast
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct IntegerFloatDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= std::numeric_limits<To>::digits)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct FloatFloatDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= std::numeric_limits<To>::digits &&
std::numeric_limits<From>::min_exponent >=
std::numeric_limits<To>::min_exponent &&
std::numeric_limits<From>::max_exponent <=
std::numeric_limits<To>::max_exponent)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct NumericComplexDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionTraits<From, typename To::value_type>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit);
};
template <typename From, typename To>
struct ComplexComplexDataTypeConversionTraits
: public DataTypeConversionTraits<typename From::value_type,
typename To::value_type> {};
template <typename From, typename To>
struct IntegerJsonDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionFlags::kSupported |
((std::numeric_limits<From>::digits <= 64)
? DataTypeConversionFlags::kSafeAndImplicit
: DataTypeConversionFlags{});
};
template <typename From, typename To>
struct FloatJsonDataTypeConversionTraits {
constexpr static DataTypeConversionFlags flags =
DataTypeConversionTraits<From, double>::flags &
(DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit);
};
}
template <typename T>
struct DataTypeConversionTraits<std::complex<T>, ::tensorstore::dtypes::json_t>
: public DataTypeConversionTraits<T, ::tensorstore::dtypes::json_t> {};
#define TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(FROM, TO, ...) \
template <> \
struct DataTypeConversionTraits<FROM, TO> { \
using From = FROM; \
using To = TO; \
constexpr static DataTypeConversionFlags flags = __VA_ARGS__; \
}; \
#define TENSORSTORE_INTERNAL_INHERITED_CONVERT(FROM, TO, PARENT) \
template <> \
struct DataTypeConversionTraits<FROM, TO> : public PARENT<FROM, TO> {}; \
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::char_t, ::tensorstore::dtypes::byte_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit |
DataTypeConversionFlags::kCanReinterpretCast)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::ustring_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit |
DataTypeConversionFlags::kCanReinterpretCast)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::complex64_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::complex128_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bool_t, ::tensorstore::dtypes::json_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::byte_t, ::tensorstore::dtypes::char_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::ustring_t, ::tensorstore::dtypes::json_t,
DataTypeConversionFlags::kSupported |
DataTypeConversionFlags::kSafeAndImplicit)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::uint16_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::uint32_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::uint64_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::string_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::ustring_t, DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::bool_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int4_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint8_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::int64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::uint64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e4m3fn_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e5m2_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::bfloat16_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float32_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::float64_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::string_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::json_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::string_t, ::tensorstore::dtypes::ustring_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS(
::tensorstore::dtypes::string_t, ::tensorstore::dtypes::json_t,
DataTypeConversionFlags::kSupported)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int4_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint8_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint16_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint32_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::int64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::uint64_t,
internal_data_type::IntegerIntegerDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float32_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::float64_t,
internal_data_type::IntegerFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int4_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int8_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint8_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int16_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint16_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int32_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint32_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::int64_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::uint64_t, ::tensorstore::dtypes::json_t,
internal_data_type::IntegerJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e4m3fn_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e4m3fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t,
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e5m2_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float8_e5m2fnuz_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::bfloat16_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float32_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::float64_t,
internal_data_type::FloatFloatDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t,
::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t,
::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t,
::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::NumericComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fn_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3fnuz_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e4m3b11fnuz_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float8_e5m2fnuz_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float16_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::bfloat16_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float32_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::float64_t, ::tensorstore::dtypes::json_t,
internal_data_type::FloatJsonDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex64_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::complex64_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
TENSORSTORE_INTERNAL_INHERITED_CONVERT(
::tensorstore::dtypes::complex128_t, ::tensorstore::dtypes::complex128_t,
internal_data_type::ComplexComplexDataTypeConversionTraits)
#undef TENSORSTORE_INTERNAL_DEFINE_CONVERT_TRAITS
#undef TENSORSTORE_INTERNAL_INHERITED_CONVERT
}
#endif | #include "tensorstore/data_type_conversion.h"
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/half_gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::DataTypeConversionFlags;
using ::tensorstore::DataTypeConversionTraits;
using ::tensorstore::dtype_v;
using ::tensorstore::Index;
using ::tensorstore::IsDataTypeConversionSupported;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::StrCat;
using ::tensorstore::internal::GetDataTypeConverter;
using ::tensorstore::internal::GetDataTypeConverterOrError;
using ::tensorstore::internal::GetElementCopyErrorStatus;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferPointer;
#define X(T, ...) \
using ::tensorstore::dtypes::T; \
TENSORSTORE_FOR_EACH_DATA_TYPE(X)
#undef X
constexpr DataTypeConversionFlags kSupported =
DataTypeConversionFlags::kSupported;
constexpr DataTypeConversionFlags kIdentity =
DataTypeConversionFlags::kIdentity;
constexpr DataTypeConversionFlags kSafeAndImplicit =
DataTypeConversionFlags::kSafeAndImplicit;
constexpr DataTypeConversionFlags kCanReinterpretCast =
DataTypeConversionFlags::kCanReinterpretCast;
template <typename From, typename To>
void TestUnsupported() {
static_assert(DataTypeConversionTraits<From, To>::flags ==
DataTypeConversionFlags{});
static_assert(!IsDataTypeConversionSupported<From, To>);
auto r = GetDataTypeConverter(dtype_v<From>, dtype_v<To>);
EXPECT_EQ(DataTypeConversionFlags{}, r.flags);
}
template <typename To, typename From>
Result<To> TestConversion(
From from, DataTypeConversionFlags flags = DataTypeConversionFlags{}) {
SCOPED_TRACE(
StrCat("TestConversion<To=", dtype_v<To>, ", From=", dtype_v<From>, ">")
.c_str());
flags = flags | kSupported;
if constexpr (!std::is_same_v<To, From>) {
EXPECT_EQ(flags, (DataTypeConversionTraits<From, To>::flags));
}
EXPECT_EQ(!!(flags & kSafeAndImplicit),
(IsDataTypeConversionSupported<From, To, kSafeAndImplicit>));
EXPECT_TRUE((IsDataTypeConversionSupported<From, To>));
auto r = GetDataTypeConverter(dtype_v<From>, dtype_v<To>);
EXPECT_EQ(flags, r.flags);
To value;
absl::Status status;
if ((*r.closure.function)[IterationBufferKind::kContiguous](
r.closure.context, {1, 1},
IterationBufferPointer(&from, Index(0), Index(0)),
IterationBufferPointer(&value, Index(0), Index(0)), &status) != 1) {
return GetElementCopyErrorStatus(std::move(status));
}
return value;
}
TEST(DataTypeConversionTest, Bool) {
EXPECT_EQ(false, TestConversion<bool_t>(false, kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
EXPECT_EQ(true, TestConversion<bool_t>(true, kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
EXPECT_EQ(0, TestConversion<int4_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int4_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int8_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int8_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int32_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int32_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<int64_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<int64_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint8_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint8_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint32_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint32_t>(true, kSafeAndImplicit));
EXPECT_EQ(0u, TestConversion<uint64_t>(false, kSafeAndImplicit));
EXPECT_EQ(1u, TestConversion<uint64_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<float16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<float16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<bfloat16_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<bfloat16_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<float32_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<float32_t>(true, kSafeAndImplicit));
EXPECT_EQ(0, TestConversion<float64_t>(false, kSafeAndImplicit));
EXPECT_EQ(1, TestConversion<float64_t>(true, kSafeAndImplicit));
EXPECT_EQ(complex64_t(0),
TestConversion<complex64_t>(false, kSafeAndImplicit));
EXPECT_EQ(complex64_t(1),
TestConversion<complex64_t>(true, kSafeAndImplicit));
EXPECT_EQ(complex128_t(0),
TestConversion<complex128_t>(false, kSafeAndImplicit));
EXPECT_EQ(complex128_t(1),
TestConversion<complex128_t>(true, kSafeAndImplicit));
EXPECT_EQ(json_t(false), TestConversion<json_t>(false, kSafeAndImplicit));
EXPECT_EQ(json_t(true), TestConversion<json_t>(true, kSafeAndImplicit));
TestUnsupported<bool, string_t>();
TestUnsupported<bool, ustring_t>();
}
TEST(DataTypeConversionTest, Int4) {
using T = int4_t;
constexpr T pos{7};
constexpr T neg{-8};
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(static_cast<int8_t>(neg),
TestConversion<int8_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<int8_t>(pos),
TestConversion<int8_t>(pos, kSafeAndImplicit));
EXPECT_EQ(static_cast<int16_t>(neg),
TestConversion<int16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<int32_t>(neg),
TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<int64_t>(neg),
TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(static_cast<uint8_t>(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(static_cast<uint8_t>(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(static_cast<uint16_t>(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(static_cast<uint16_t>(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(static_cast<uint32_t>(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(static_cast<uint32_t>(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(static_cast<uint64_t>(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(static_cast<uint64_t>(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-8", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-8"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int8) {
using T = int8_t;
constexpr T pos = 42;
constexpr T neg = -42;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg),
TestConversion<int8_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int8_t(pos),
TestConversion<int8_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos, kCanReinterpretCast));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-42", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-42"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint8) {
using T = uint8_t;
constexpr T pos = 42;
constexpr T neg = -42;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg, kCanReinterpretCast));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos, kCanReinterpretCast));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg),
TestConversion<uint8_t>(
neg, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(uint8_t(pos),
TestConversion<uint8_t>(
pos, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos, kSafeAndImplicit));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("214", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"214"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int16) {
using T = int16_t;
constexpr T pos = 12345;
constexpr T neg = -12345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg),
TestConversion<int16_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos, kCanReinterpretCast));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-12345", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-12345"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint16) {
using T = uint16_t;
constexpr T pos = 12345;
constexpr T neg = -12345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg, kCanReinterpretCast));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg),
TestConversion<uint16_t>(
neg, kCanReinterpretCast | kIdentity | kSafeAndImplicit));
EXPECT_EQ(uint16_t(pos),
TestConversion<uint16_t>(
pos, kCanReinterpretCast | kIdentity | kSafeAndImplicit));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float16_t(neg), TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(neg), TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg, kSafeAndImplicit));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)),
TestConversion<complex64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("53191", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"53191"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int32) {
using T = int32_t;
constexpr T pos = 123456789;
constexpr T neg = -123456789;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg),
TestConversion<int32_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos, kCanReinterpretCast));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("-123456789", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-123456789"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint32) {
using T = uint32_t;
constexpr T pos = 123456789;
constexpr T neg = -123456789;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg, kCanReinterpretCast));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg),
TestConversion<uint32_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(uint32_t(pos),
TestConversion<uint32_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)),
TestConversion<complex128_t>(neg, kSafeAndImplicit));
EXPECT_EQ("4171510507", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"4171510507"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Int64) {
using T = int64_t;
constexpr T pos = 123456789012345;
constexpr T neg = -123456789012345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg));
EXPECT_EQ(int64_t(neg),
TestConversion<int64_t>(
neg, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg), TestConversion<uint64_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos, kCanReinterpretCast));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)), TestConversion<complex128_t>(neg));
EXPECT_EQ("-123456789012345", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"-123456789012345"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Uint64) {
using T = uint64_t;
constexpr T pos = 123456789012345;
constexpr T neg = -123456789012345;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(true, TestConversion<bool_t>(neg));
EXPECT_EQ(int4_t(neg), TestConversion<int4_t>(neg));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(neg), TestConversion<int8_t>(neg));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(neg), TestConversion<int16_t>(neg));
EXPECT_EQ(int32_t(neg), TestConversion<int32_t>(neg));
EXPECT_EQ(int64_t(neg), TestConversion<int64_t>(neg, kCanReinterpretCast));
EXPECT_EQ(uint8_t(neg), TestConversion<uint8_t>(neg));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(neg), TestConversion<uint16_t>(neg));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(neg), TestConversion<uint32_t>(neg));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(neg),
TestConversion<uint64_t>(
neg, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(uint64_t(pos),
TestConversion<uint64_t>(
pos, kCanReinterpretCast | kSafeAndImplicit | kIdentity));
EXPECT_EQ(float16_t(static_cast<float>(neg)),
TestConversion<float16_t>(neg));
EXPECT_EQ(bfloat16_t(static_cast<float>(neg)),
TestConversion<bfloat16_t>(neg));
EXPECT_EQ(float32_t(neg), TestConversion<float32_t>(neg));
EXPECT_EQ(float64_t(neg), TestConversion<float64_t>(neg));
EXPECT_EQ(complex64_t(float32_t(neg)), TestConversion<complex64_t>(neg));
EXPECT_EQ(complex128_t(float64_t(neg)), TestConversion<complex128_t>(neg));
EXPECT_EQ("18446620616920539271", TestConversion<string_t>(neg));
EXPECT_EQ(ustring_t{"18446620616920539271"}, TestConversion<ustring_t>(neg));
EXPECT_EQ(json_t(neg), TestConversion<json_t>(neg, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Float16) {
using T = float16_t;
const T pos(42.5);
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(static_cast<int4_t>(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(static_cast<int8_t>(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(static_cast<int16_t>(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(static_cast<int32_t>(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(static_cast<int64_t>(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(static_cast<uint8_t>(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(static_cast<uint16_t>(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(static_cast<uint32_t>(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(static_cast<uint64_t>(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(static_cast<float16_t>(pos),
TestConversion<float16_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(static_cast<bfloat16_t>(pos), TestConversion<bfloat16_t>(pos));
EXPECT_EQ(static_cast<float32_t>(pos),
TestConversion<float32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(static_cast<float64_t>(pos),
TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(42.5), TestConversion<json_t>(pos, kSafeAndImplicit));
}
template <typename InternalFloat>
class InternalFloat8Test : public ::testing::Test {};
using InternalFloat8Types =
::testing::Types<float8_e4m3fn_t, float8_e4m3fnuz_t, float8_e4m3b11fnuz_t,
float8_e5m2_t, float8_e5m2fnuz_t>;
TYPED_TEST_SUITE(InternalFloat8Test, InternalFloat8Types);
TYPED_TEST(InternalFloat8Test, DataTypeConversionTest_InternalFloat8Types) {
using T = TypeParam;
const T pos(3.5);
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(T(pos), TestConversion<T>(pos, kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
if (!std::is_same_v<T, float8_e4m3fn_t>) {
EXPECT_EQ(float8_e4m3fn_t(pos), TestConversion<float8_e4m3fn_t>(pos));
}
if (!std::is_same_v<T, float8_e4m3fnuz_t>) {
EXPECT_EQ(float8_e4m3fnuz_t(pos), TestConversion<float8_e4m3fnuz_t>(pos));
}
if (!std::is_same_v<T, float8_e4m3b11fnuz_t>) {
EXPECT_EQ(float8_e4m3b11fnuz_t(pos),
TestConversion<float8_e4m3b11fnuz_t>(pos));
}
if (!std::is_same_v<T, float8_e5m2fnuz_t>) {
if (std::is_same_v<T, float8_e5m2_t>) {
EXPECT_EQ(float8_e5m2fnuz_t(pos),
TestConversion<float8_e5m2fnuz_t>(pos, kSafeAndImplicit));
} else {
EXPECT_EQ(float8_e5m2fnuz_t(pos), TestConversion<float8_e5m2fnuz_t>(pos));
}
}
if (!std::is_same_v<T, float8_e5m2_t>) {
EXPECT_EQ(float8_e5m2_t(pos), TestConversion<float8_e5m2_t>(pos));
}
if (std::is_same_v<T, float8_e5m2fnuz_t>) {
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
} else {
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos, kSafeAndImplicit));
}
EXPECT_EQ(bfloat16_t(pos), TestConversion<bfloat16_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float32_t(pos), TestConversion<float32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float64_t(pos), TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("3.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"3.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(3.5), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Bfloat16) {
using T = bfloat16_t;
const T pos(42.5);
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
EXPECT_EQ(bfloat16_t(pos),
TestConversion<bfloat16_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(float32_t(pos), TestConversion<float32_t>(pos, kSafeAndImplicit));
EXPECT_EQ(float64_t(pos), TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(42.5), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Float32) {
using T = float32_t;
constexpr T pos = 42.5;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
EXPECT_EQ(bfloat16_t(pos), TestConversion<bfloat16_t>(pos));
EXPECT_EQ(float32_t(pos),
TestConversion<float32_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(float64_t(pos), TestConversion<float64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex64_t(float32_t(pos)),
TestConversion<complex64_t>(pos, kSafeAndImplicit));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(pos), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Float64) {
using T = float64_t;
constexpr T pos = 42.5;
EXPECT_EQ(false, TestConversion<bool_t>(T(0)));
EXPECT_EQ(true, TestConversion<bool_t>(pos));
EXPECT_EQ(int4_t(pos), TestConversion<int4_t>(pos));
EXPECT_EQ(int8_t(pos), TestConversion<int8_t>(pos));
EXPECT_EQ(int16_t(pos), TestConversion<int16_t>(pos));
EXPECT_EQ(int32_t(pos), TestConversion<int32_t>(pos));
EXPECT_EQ(int64_t(pos), TestConversion<int64_t>(pos));
EXPECT_EQ(uint8_t(pos), TestConversion<uint8_t>(pos));
EXPECT_EQ(uint16_t(pos), TestConversion<uint16_t>(pos));
EXPECT_EQ(uint32_t(pos), TestConversion<uint32_t>(pos));
EXPECT_EQ(uint64_t(pos), TestConversion<uint64_t>(pos));
EXPECT_EQ(float16_t(pos), TestConversion<float16_t>(pos));
EXPECT_EQ(bfloat16_t(pos), TestConversion<bfloat16_t>(pos));
EXPECT_EQ(float32_t(pos), TestConversion<float32_t>(pos));
EXPECT_EQ(float64_t(pos),
TestConversion<float64_t>(
pos, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(complex64_t(float32_t(pos)), TestConversion<complex64_t>(pos));
EXPECT_EQ(complex128_t(float64_t(pos)),
TestConversion<complex128_t>(pos, kSafeAndImplicit));
EXPECT_EQ("42.5", TestConversion<string_t>(pos));
EXPECT_EQ(ustring_t{"42.5"}, TestConversion<ustring_t>(pos));
EXPECT_EQ(json_t(pos), TestConversion<json_t>(pos, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Complex64) {
using T = complex64_t;
constexpr T value(42.5, 43.5);
EXPECT_EQ(int4_t(value.real()), TestConversion<int4_t>(value));
EXPECT_EQ(int8_t(value.real()), TestConversion<int8_t>(value));
EXPECT_EQ(int16_t(value.real()), TestConversion<int16_t>(value));
EXPECT_EQ(int32_t(value.real()), TestConversion<int32_t>(value));
EXPECT_EQ(int64_t(value.real()), TestConversion<int64_t>(value));
EXPECT_EQ(uint8_t(value.real()), TestConversion<uint8_t>(value));
EXPECT_EQ(uint8_t(value.real()), TestConversion<uint8_t>(value));
EXPECT_EQ(uint16_t(value.real()), TestConversion<uint16_t>(value));
EXPECT_EQ(uint16_t(value.real()), TestConversion<uint16_t>(value));
EXPECT_EQ(uint32_t(value.real()), TestConversion<uint32_t>(value));
EXPECT_EQ(uint32_t(value.real()), TestConversion<uint32_t>(value));
EXPECT_EQ(uint64_t(value.real()), TestConversion<uint64_t>(value));
EXPECT_EQ(uint64_t(value.real()), TestConversion<uint64_t>(value));
EXPECT_EQ(float16_t(value.real()), TestConversion<float16_t>(value));
EXPECT_EQ(bfloat16_t(value.real()), TestConversion<bfloat16_t>(value));
EXPECT_EQ(float32_t(value.real()), TestConversion<float32_t>(value));
EXPECT_EQ(float64_t(value.real()), TestConversion<float64_t>(value));
EXPECT_EQ(complex64_t(value),
TestConversion<complex64_t>(
value, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ(complex128_t(value),
TestConversion<complex128_t>(value, kSafeAndImplicit));
EXPECT_EQ("(42.5,43.5)", TestConversion<string_t>(value));
EXPECT_EQ(ustring_t{"(42.5,43.5)"}, TestConversion<ustring_t>(value));
EXPECT_EQ(json_t(json_t::array_t{value.real(), value.imag()}),
TestConversion<json_t>(value, kSafeAndImplicit));
TestUnsupported<T, bool>();
}
TEST(DataTypeConversionTest, Complex128) {
using T = complex128_t;
constexpr T value(42.5, 43.5);
EXPECT_EQ(int4_t(value.real()), TestConversion<int4_t>(value));
EXPECT_EQ(int8_t(value.real()), TestConversion<int8_t>(value));
EXPECT_EQ(int16_t(value.real()), TestConversion<int16_t>(value));
EXPECT_EQ(int32_t(value.real()), TestConversion<int32_t>(value));
EXPECT_EQ(int64_t(value.real()), TestConversion<int64_t>(value));
EXPECT_EQ(uint8_t(value.real()), TestConversion<uint8_t>(value));
EXPECT_EQ(uint16_t(value.real()), TestConversion<uint16_t>(value));
EXPECT_EQ(uint32_t(value.real()), TestConversion<uint32_t>(value));
EXPECT_EQ(uint64_t(value.real()), TestConversion<uint64_t>(value));
EXPECT_EQ(float16_t(value.real()), TestConversion<float16_t>(value));
EXPECT_EQ(bfloat16_t(value.real()), TestConversion<bfloat16_t>(value));
EXPECT_EQ(float32_t(value.real()), TestConversion<float32_t>(value));
EXPECT_EQ(float64_t(value.real()), TestConversion<float64_t>(value));
EXPECT_EQ(complex64_t(value), TestConversion<complex64_t>(value));
EXPECT_EQ(complex128_t(value),
TestConversion<complex128_t>(
value, kSafeAndImplicit | kIdentity | kCanReinterpretCast));
EXPECT_EQ("(42.5,43.5)", TestConversion<string_t>(value));
EXPECT_EQ(ustring_t{"(42.5,43.5)"}, TestConversion<ustring_t>(value));
EXPECT_EQ(json_t(json_t::array_t{value.real(), value.imag()}),
TestConversion<json_t>(value, kSafeAndImplicit));
TestUnsupported<T, bool>();
}
TEST(DataTypeConversionTest, String) {
using T = string_t;
T value = "test";
T invalid_utf8 = "test\xa0";
TestUnsupported<T, bool>();
TestUnsupported<T, int4_t>();
TestUnsupported<T, int8_t>();
TestUnsupported<T, uint8_t>();
TestUnsupported<T, int16_t>();
TestUnsupported<T, uint16_t>();
TestUnsupported<T, int32_t>();
TestUnsupported<T, uint32_t>();
TestUnsupported<T, int64_t>();
TestUnsupported<T, uint64_t>();
TestUnsupported<T, float16_t>();
TestUnsupported<T, bfloat16_t>();
TestUnsupported<T, float32_t>();
TestUnsupported<T, float64_t>();
TestUnsupported<T, complex64_t>();
TestUnsupported<T, complex128_t>();
EXPECT_EQ(value,
TestConversion<string_t>(
value, kSafeAndImplicit | kCanReinterpretCast | kIdentity));
EXPECT_EQ(ustring_t{value}, TestConversion<ustring_t>(value));
EXPECT_THAT(TestConversion<ustring_t>(invalid_utf8),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid UTF-8 sequence encountered"));
EXPECT_EQ(json_t("test"), TestConversion<json_t>(value));
EXPECT_THAT(TestConversion<json_t>(invalid_utf8),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid UTF-8 sequence encountered"));
}
TEST(DataTypeConversionTest, Ustring) {
using T = ustring_t;
T value{"test"};
TestUnsupported<T, bool>();
TestUnsupported<T, int4_t>();
TestUnsupported<T, int8_t>();
TestUnsupported<T, uint8_t>();
TestUnsupported<T, int16_t>();
TestUnsupported<T, uint16_t>();
TestUnsupported<T, int32_t>();
TestUnsupported<T, uint32_t>();
TestUnsupported<T, int64_t>();
TestUnsupported<T, uint64_t>();
TestUnsupported<T, float16_t>();
TestUnsupported<T, bfloat16_t>();
TestUnsupported<T, float32_t>();
TestUnsupported<T, float64_t>();
TestUnsupported<T, complex64_t>();
TestUnsupported<T, complex128_t>();
EXPECT_EQ(value.utf8, TestConversion<string_t>(
value, kSafeAndImplicit | kCanReinterpretCast));
EXPECT_EQ(value,
TestConversion<ustring_t>(
value, kSafeAndImplicit | kCanReinterpretCast | kIdentity));
EXPECT_EQ(json_t("test"), TestConversion<json_t>(value, kSafeAndImplicit));
}
TEST(DataTypeConversionTest, Json) {
EXPECT_THAT(TestConversion<bool_t>(json_t("hello")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<bool_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int4_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int8_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int32_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<int64_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint8_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint32_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<uint64_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<float16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<bfloat16_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<float32_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<float64_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<ustring_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(nullptr)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_EQ(false, TestConversion<bool_t>(json_t(false)));
EXPECT_EQ(false, TestConversion<bool_t>(json_t("false")));
EXPECT_EQ(true, TestConversion<bool_t>(json_t(true)));
EXPECT_EQ(true, TestConversion<bool_t>(json_t("true")));
EXPECT_EQ(int4_t(-8), TestConversion<int4_t>(json_t(-8)));
EXPECT_EQ(int8_t(58), TestConversion<int8_t>(json_t(58)));
EXPECT_EQ(int16_t(1234), TestConversion<int16_t>(json_t(1234)));
EXPECT_EQ(int16_t(1234), TestConversion<int16_t>(json_t("1234")));
EXPECT_EQ(int32_t(123456789), TestConversion<int32_t>(json_t(123456789)));
EXPECT_EQ(int64_t(1234567890123),
TestConversion<int64_t>(json_t(1234567890123)));
EXPECT_EQ(uint8_t(254), TestConversion<uint8_t>(json_t(254u)));
EXPECT_EQ(uint16_t(45123), TestConversion<uint16_t>(json_t(45123u)));
EXPECT_EQ(uint32_t(4012356789),
TestConversion<uint32_t>(json_t(4012356789u)));
EXPECT_EQ(uint64_t(40123567891234),
TestConversion<uint64_t>(json_t(40123567891234)));
EXPECT_EQ(float16_t(42.5), TestConversion<float16_t>(json_t(42.5)));
EXPECT_EQ(float16_t(42.5), TestConversion<float16_t>(json_t("42.5")));
EXPECT_EQ(bfloat16_t(42.5), TestConversion<bfloat16_t>(json_t(42.5)));
EXPECT_EQ(bfloat16_t(42.5), TestConversion<bfloat16_t>(json_t("42.5")));
EXPECT_EQ(float32_t(42.5), TestConversion<float32_t>(json_t(42.5)));
EXPECT_EQ(float64_t(42.5), TestConversion<float64_t>(json_t(42.5)));
EXPECT_EQ(float64_t(42.5), TestConversion<float64_t>(json_t("42.5")));
TestUnsupported<json_t, complex64_t>();
TestUnsupported<json_t, complex128_t>();
EXPECT_EQ("hello", TestConversion<string_t>(json_t("hello")));
EXPECT_THAT(TestConversion<string_t>(json_t(7)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(true)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t(1.5)),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(TestConversion<string_t>(json_t::array({2, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_EQ(ustring_t{"hello"}, TestConversion<ustring_t>(json_t("hello")));
EXPECT_EQ(json_t("hello"), TestConversion<json_t>(
json_t("hello"), kSafeAndImplicit | kIdentity |
kCanReinterpretCast));
}
TEST(GetDataTypeConverterOrErrorTest, Basic) {
TENSORSTORE_EXPECT_OK(
GetDataTypeConverterOrError(dtype_v<int32_t>, dtype_v<int32_t>));
TENSORSTORE_EXPECT_OK(GetDataTypeConverterOrError(
dtype_v<int32_t>, dtype_v<int32_t>, kIdentity));
TENSORSTORE_EXPECT_OK(GetDataTypeConverterOrError(
dtype_v<int32_t>, dtype_v<int64_t>, kSafeAndImplicit));
TENSORSTORE_EXPECT_OK(GetDataTypeConverterOrError(
dtype_v<int32_t>, dtype_v<uint32_t>, kCanReinterpretCast));
EXPECT_THAT(
GetDataTypeConverterOrError(dtype_v<json_t>, dtype_v<complex64_t>),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot convert json -> complex64"));
EXPECT_THAT(
GetDataTypeConverterOrError(dtype_v<uint32_t>, dtype_v<int32_t>,
kSafeAndImplicit),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Explicit data type conversion required to convert uint32 -> int32"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/data_type_conversion.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/data_type_conversion_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
19fab0f1-fdbb-4a45-9421-72aa1b6cb629 | cpp | tensorflow/tensorflow | optimization_registry | tensorflow/core/common_runtime/optimization_registry.cc | tensorflow/core/common_runtime/optimization_registry_test.cc | #include "tensorflow/core/common_runtime/optimization_registry.h"
#include <string>
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
OptimizationPassRegistry* OptimizationPassRegistry::Global() {
static OptimizationPassRegistry* global_optimization_registry =
new OptimizationPassRegistry;
return global_optimization_registry;
}
void OptimizationPassRegistry::Register(
Grouping grouping, int phase, std::unique_ptr<GraphOptimizationPass> pass) {
groups_[grouping][phase].push_back(std::move(pass));
}
Status OptimizationPassRegistry::RunGrouping(
Grouping grouping, const GraphOptimizationPassOptions& options) {
const char* grouping_name = GetGroupingName(grouping);
auto dump_graph = [&](std::string func_name, const std::string& group,
const std::string& tag, bool bypass_filter) {
if (func_name.empty()) func_name = "unknown_graph";
if (options.graph) {
DEBUG_DATA_DUMPER()->DumpGraph(func_name, group, tag,
options.graph->get(), options.flib_def,
bypass_filter);
}
if (options.partition_graphs) {
for (auto& part : *options.partition_graphs) {
DEBUG_DATA_DUMPER()->DumpGraph(func_name + "_partition_" + part.first,
group, tag, part.second.get(),
options.flib_def, bypass_filter);
}
}
};
dump_graph(options.debug_filename_prefix, kDebugGroupMain,
strings::StrCat("before_opt_group_", grouping_name),
VLOG_IS_ON(3));
auto group = groups_.find(grouping);
if (group != groups_.end()) {
static const char* kGraphOptimizationCategory = "GraphOptimizationPass";
tensorflow::metrics::ScopedCounter<2> group_timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{kGraphOptimizationCategory, "*"});
for (auto& phase : group->second) {
VLOG(1) << "Running optimization phase " << phase.first;
for (auto& pass : phase.second) {
VLOG(1) << "Running optimization pass: " << pass->name();
if (options.graph) {
VLOG(1) << "Graph #nodes " << (*options.graph)->num_nodes()
<< " #edges " << (*options.graph)->num_edges();
}
tensorflow::metrics::ScopedCounter<2> pass_timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{kGraphOptimizationCategory, pass->name()});
Status s = pass->Run(options);
if (!s.ok()) return s;
pass_timings.ReportAndStop();
dump_graph(options.debug_filename_prefix, kDebugGroupGraphOptPass,
strings::StrCat("after_opt_group_", grouping_name, "_phase_",
phase.first, "_", pass->name()),
VLOG_IS_ON(5));
}
}
group_timings.ReportAndStop();
}
VLOG(1) << "Finished optimization of a group " << grouping;
if (options.graph && group != groups_.end()) {
VLOG(1) << "Graph #nodes " << (*options.graph)->num_nodes() << " #edges "
<< (*options.graph)->num_edges();
}
dump_graph(options.debug_filename_prefix, kDebugGroupMain,
strings::StrCat("after_opt_group_", grouping_name),
VLOG_IS_ON(3) || (VLOG_IS_ON(2) &&
grouping == Grouping::POST_REWRITE_FOR_EXEC));
return absl::OkStatus();
}
void OptimizationPassRegistry::LogGrouping(Grouping grouping, int vlog_level) {
auto group = groups_.find(grouping);
if (group != groups_.end()) {
for (auto& phase : group->second) {
for (auto& pass : phase.second) {
VLOG(vlog_level) << "Registered optimization pass grouping " << grouping
<< " phase " << phase.first << ": " << pass->name();
}
}
}
}
void OptimizationPassRegistry::LogAllGroupings(int vlog_level) {
for (auto group = groups_.begin(); group != groups_.end(); ++group) {
LogGrouping(group->first, vlog_level);
}
}
} | #include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestOptimization : public GraphOptimizationPass {
public:
static int count_;
Status Run(const GraphOptimizationPassOptions& options) override {
++count_;
return absl::OkStatus();
}
};
int TestOptimization::count_ = 0;
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 1,
TestOptimization);
TEST(OptimizationRegistry, OptimizationPass) {
EXPECT_EQ(0, TestOptimization::count_);
GraphOptimizationPassOptions options;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
options.graph = &graph;
std::unique_ptr<FunctionLibraryDefinition> flib_def(
new FunctionLibraryDefinition(OpRegistry::Global(),
FunctionDefLibrary()));
options.flib_def = flib_def.get();
EXPECT_EQ(absl::OkStatus(),
OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::PRE_PLACEMENT, options));
EXPECT_EQ(1, TestOptimization::count_);
}
class UpdateFuncLibPass : public GraphOptimizationPass {
public:
Status Run(const GraphOptimizationPassOptions& options) override {
return options.flib_def->AddFunctionDef(test::function::WXPlusB());
}
};
REGISTER_OPTIMIZATION(OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, 1,
UpdateFuncLibPass);
class OptimizationPassTest : public ::testing::Test {
public:
OptimizationPassTest() {
FunctionDefLibrary func_def_lib;
*func_def_lib.add_function() = test::function::XTimesTwo();
flib_def_.reset(
new FunctionLibraryDefinition(OpRegistry::Global(), func_def_lib));
}
void RunPass() {
GraphOptimizationPassOptions options;
options.flib_def = flib_def_.get();
EXPECT_EQ(absl::OkStatus(),
OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, options));
}
const FunctionDef* GetFunctionDef(const string& func) const {
return flib_def_->Find(func);
}
private:
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
};
TEST_F(OptimizationPassTest, UpdateFuncLibPass) {
RunPass();
auto f1 = GetFunctionDef("XTimesTwo");
ASSERT_NE(f1, nullptr);
EXPECT_EQ(test::function::XTimesTwo().DebugString(), f1->DebugString());
auto f2 = GetFunctionDef("WXPlusB");
ASSERT_NE(f2, nullptr);
EXPECT_EQ(test::function::WXPlusB().DebugString(), f2->DebugString());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimization_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimization_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ea00a1a5-d830-4939-b4a6-4a3529b5083a | cpp | tensorflow/tensorflow | freeze_requantization_ranges | tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc | tensorflow/tools/graph_transforms/freeze_requantization_ranges_test.cc | #include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
struct MinMaxRecord {
string name;
float min;
float max;
};
Status ExtractMinMaxRecords(const string& log_file_name,
std::vector<MinMaxRecord>* records) {
string file_data;
TF_RETURN_IF_ERROR(
ReadFileToString(Env::Default(), log_file_name, &file_data));
const string print_suffix("__print__");
const string requant_prefix("__requant_min_max:");
std::vector<string> file_lines = str_util::Split(file_data, '\n');
for (const string& file_line : file_lines) {
if (!absl::StrContains(file_line, print_suffix + ";" + requant_prefix)) {
continue;
}
std::vector<string> line_parts = str_util::Split(file_line, ';');
if (line_parts.size() < 2) {
continue;
}
bool min_max_found = false;
int min_max_index;
for (int i = 1; i < line_parts.size(); ++i) {
if (absl::StartsWith(line_parts[i], requant_prefix)) {
min_max_found = true;
min_max_index = i;
}
}
if (!min_max_found) {
continue;
}
string min_max_string = line_parts[min_max_index];
std::vector<string> min_max_parts = str_util::Split(min_max_string, '[');
if ((min_max_parts.size() != 3) || (min_max_parts[0] != requant_prefix)) {
continue;
}
string min_string = min_max_parts[1];
std::vector<string> min_string_parts = str_util::Split(min_string, ']');
if (min_string_parts.size() != 2) {
continue;
}
string min_number_string = min_string_parts[0];
float min;
if (!strings::safe_strtof(min_number_string.c_str(), &min)) {
continue;
}
string max_string = min_max_parts[2];
std::vector<string> max_string_parts = str_util::Split(max_string, ']');
if (max_string_parts.size() != 2) {
continue;
}
string max_number_string = max_string_parts[0];
float max;
if (!strings::safe_strtof(max_number_string.c_str(), &max)) {
continue;
}
StringPiece name_string = line_parts[min_max_index - 1];
if (!str_util::EndsWith(name_string, print_suffix)) {
continue;
}
string name(
name_string.substr(0, name_string.size() - print_suffix.size()));
records->push_back({name, min, max});
}
return OkStatus();
}
Status FreezeRequantizationRanges(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
string min_max_log_file;
TF_RETURN_IF_ERROR(
context.GetOneStringParameter("min_max_log_file", "", &min_max_log_file));
if (min_max_log_file.empty()) {
return errors::InvalidArgument(
"You must pass a file name to min_max_log_file");
}
float min_percentile;
TF_RETURN_IF_ERROR(
context.GetOneFloatParameter("min_percentile", 5.0f, &min_percentile));
float max_percentile;
TF_RETURN_IF_ERROR(
context.GetOneFloatParameter("max_percentile", 5.0f, &max_percentile));
std::vector<MinMaxRecord> records;
TF_RETURN_IF_ERROR(ExtractMinMaxRecords(min_max_log_file, &records));
if (records.empty()) {
return errors::InvalidArgument(
"No min/max range logs were found in the log file");
}
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(input_graph_def, &node_map);
bool any_missing_nodes = false;
std::map<string, std::vector<MinMaxRecord>> records_by_node;
for (const MinMaxRecord& record : records) {
records_by_node[record.name].push_back(record);
if (!node_map.count(record.name)) {
any_missing_nodes = true;
LOG(WARNING) << "Node from log not found in graph: " << record.name;
}
}
if (any_missing_nodes) {
return errors::InvalidArgument(
"Nodes were found in the log file that aren't present in the graph");
}
std::map<string, std::pair<float, float>> range_for_nodes;
for (const auto& record_info : records_by_node) {
const string& name = record_info.first;
const std::vector<MinMaxRecord> records = record_info.second;
std::vector<float> mins;
std::vector<float> maxs;
for (const MinMaxRecord& record : records) {
mins.push_back(record.min);
maxs.push_back(record.max);
}
std::sort(mins.begin(), mins.end());
std::sort(maxs.begin(), maxs.end());
int min_index = std::round(mins.size() * (min_percentile / 100.0f));
if (min_index < 0) {
min_index = 0;
}
int max_index =
std::round(maxs.size() * (1.0f - (max_percentile / 100.0f)));
if (max_index > (maxs.size() - 1)) {
max_index = maxs.size() - 1;
}
const float min = mins[min_index];
const float max = maxs[max_index];
range_for_nodes[name] = {min, max};
}
std::map<string, string> inputs_to_rename;
GraphDef frozen_graph_def;
for (const NodeDef& node : input_graph_def.node()) {
if (range_for_nodes.count(node.name())) {
if (node.op() != "RequantizationRange") {
return errors::InvalidArgument(
"Node is expected to be a RequantizationRange op: ", node.name(),
", but is: ", node.op());
}
const float min_value = range_for_nodes.at(node.name()).first;
NodeDef* min_node = frozen_graph_def.mutable_node()->Add();
min_node->set_op("Const");
min_node->set_name(node.name() + "/frozen_min");
SetNodeAttr("dtype", DT_FLOAT, min_node);
Tensor min_tensor(DT_FLOAT, {});
min_tensor.flat<float>()(0) = min_value;
SetNodeTensorAttr<float>("value", min_tensor, min_node);
inputs_to_rename[node.name() + ":0"] = min_node->name() + ":0";
const float max_value = range_for_nodes.at(node.name()).second;
NodeDef* max_node = frozen_graph_def.mutable_node()->Add();
max_node->set_op("Const");
max_node->set_name(node.name() + "/frozen_max");
SetNodeAttr("dtype", DT_FLOAT, max_node);
Tensor max_tensor(DT_FLOAT, {});
max_tensor.flat<float>()(0) = max_value;
SetNodeTensorAttr<float>("value", max_tensor, max_node);
inputs_to_rename[node.name() + ":1"] = max_node->name() + ":0";
} else {
NodeDef* new_node = frozen_graph_def.mutable_node()->Add();
*new_node = node;
}
}
return RenameNodeInputs(frozen_graph_def, inputs_to_rename,
std::unordered_set<string>(), output_graph_def);
}
REGISTER_GRAPH_TRANSFORM("freeze_requantization_ranges",
FreezeRequantizationRanges);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status FreezeRequantizationRanges(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
struct MinMaxRecord {
string name;
float min;
float max;
};
Status ExtractMinMaxRecords(const string& log_file_name,
std::vector<MinMaxRecord>* records);
class FreezeRequantizationRangesTest : public ::testing::Test {
protected:
void TestFreezeRequantizationRanges() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Tensor quantized_tensor(DT_QUINT8, TensorShape({1, 6}));
test::FillValues<quint8>(&quantized_tensor, {0, 0, 0, 0, 0, 0});
Output quantized_op = Const(root.WithOpName("quantized_op"),
Input::Initializer(quantized_tensor));
Tensor quantized_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_min_tensor, {2.0f});
Output quantized_min_op = Const(root.WithOpName("quantized_min_op"),
Input::Initializer(quantized_min_tensor));
Tensor quantized_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&quantized_max_tensor, {2.0f});
Output quantized_max_op = Const(root.WithOpName("quantized_max_op"),
Input::Initializer(quantized_min_tensor));
Tensor offset_tensor(DT_QUINT8, TensorShape({6}));
test::FillValues<quint8>(&offset_tensor, {1, 2, 3, 4, 5, 6});
Output offset_op =
Const(root.WithOpName("offset_op"), Input::Initializer(offset_tensor));
Tensor offset_min_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_min_tensor, {0.0f});
Output offset_min_op = Const(root.WithOpName("offset_min_op"),
Input::Initializer(offset_min_tensor));
Tensor offset_max_tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&offset_max_tensor, {255.0f});
Output offset_max_op = Const(root.WithOpName("offset_max_op"),
Input::Initializer(offset_max_tensor));
QuantizedBiasAdd quantized_bias_add_op(
root.WithOpName("bias_add_op"), quantized_op, offset_op,
quantized_min_op, quantized_max_op, offset_min_op, offset_max_op,
DT_QINT32);
RequantizationRange requantization_range_op(
root.WithOpName("requantization_range_op"),
quantized_bias_add_op.output, quantized_bias_add_op.min_out,
quantized_bias_add_op.max_out);
Requantize requantize_op(
root.WithOpName("requantize_op"), quantized_bias_add_op.output,
quantized_bias_add_op.min_out, quantized_bias_add_op.max_out,
requantization_range_op.output_min, requantization_range_op.output_max,
DT_QUINT8);
Output dequantize_op =
Dequantize(root.WithOpName("dequantize_op"), requantize_op.output,
requantize_op.output_min, requantize_op.output_max);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
const string min_max_log_file_name =
io::JoinPath(testing::TmpDir(), "min_max_log_file.txt");
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(
Env::Default()->NewWritableFile(min_max_log_file_name, &file));
TF_ASSERT_OK(file->Append("Something irrelevant\n"));
TF_ASSERT_OK(
file->Append("[SomePrefix] "
";requantization_range_op__print__;__requant_min_max:"
"[-2.4313571][10.584145]\n"));
TF_ASSERT_OK(file->Append("Something else irrelevant\n"));
}
TransformFuncContext context;
context.input_names = {};
context.output_names = {"dequantize_op"};
context.params = {{"min_max_log_file", {min_max_log_file_name}}};
GraphDef frozen_graph_def;
TF_EXPECT_OK(
FreezeRequantizationRanges(graph_def, context, &frozen_graph_def));
std::map<string, const NodeDef*> node_map;
MapNamesToNodes(frozen_graph_def, &node_map);
EXPECT_EQ(0, node_map.count("requantization_range_op"));
EXPECT_EQ(1, node_map.count("requantize_op"));
const string& min_input =
NodeNameFromInput(node_map.at("requantize_op")->input(3));
ASSERT_EQ(1, node_map.count(min_input));
EXPECT_EQ("Const", node_map.at(min_input)->op());
const string& max_input =
NodeNameFromInput(node_map.at("requantize_op")->input(4));
ASSERT_EQ(1, node_map.count(max_input));
EXPECT_EQ("Const", node_map.at(max_input)->op());
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(
original_session->Run({}, {"dequantize_op"}, {}, &original_outputs));
std::unique_ptr<Session> frozen_session(NewSession(SessionOptions()));
TF_ASSERT_OK(frozen_session->Create(frozen_graph_def));
std::vector<Tensor> frozen_outputs;
TF_ASSERT_OK(
frozen_session->Run({}, {"dequantize_op"}, {}, &frozen_outputs));
ASSERT_EQ(original_outputs.size(), frozen_outputs.size());
ASSERT_EQ(1, frozen_outputs.size());
test::ExpectTensorNear<float>(original_outputs[0], frozen_outputs[0], 0.5);
}
void TestExtractMinMaxRecords() {
const string min_max_log_file_name =
io::JoinPath(testing::TmpDir(), "min_max_log_file2.txt");
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(
Env::Default()->NewWritableFile(min_max_log_file_name, &file));
TF_ASSERT_OK(file->Append("Something irrelevant\n"));
TF_ASSERT_OK(
file->Append("[SomePrefix] "
";requantization_range_op__print__;__requant_min_max:"
"[-2.4313571][10.584145]\n"));
TF_ASSERT_OK(file->Append("Something else irrelevant\n"));
TF_ASSERT_OK(file->Append(
"[SomeOtherPrefix] "
";other_requantization_range_op__print__;__requant_min_max:"
"[-1.0][2.0]\n"));
TF_ASSERT_OK(file->Append("Something else irrelevant\n"));
TF_ASSERT_OK(
file->Append("[SomePrefix] "
";requantization_range_op__print__;__requant_min_max:"
"[-1.bad][2.0]\n"));
}
std::vector<MinMaxRecord> records;
TF_ASSERT_OK(ExtractMinMaxRecords(min_max_log_file_name, &records));
ASSERT_EQ(2, records.size());
EXPECT_EQ("requantization_range_op", records[0].name);
EXPECT_NEAR(-2.4313571f, records[0].min, 1e-5f);
EXPECT_NEAR(10.584145f, records[0].max, 1e-5f);
EXPECT_EQ("other_requantization_range_op", records[1].name);
EXPECT_NEAR(-1.0f, records[1].min, 1e-5f);
EXPECT_NEAR(2.0f, records[1].max, 1e-5f);
}
};
TEST_F(FreezeRequantizationRangesTest, TestFreezeRequantizationRanges) {
TestFreezeRequantizationRanges();
}
TEST_F(FreezeRequantizationRangesTest, TestExtractMinMaxRecords) {
TestExtractMinMaxRecords();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/freeze_requantization_ranges.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/freeze_requantization_ranges_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d33973b-0f6d-4d9e-933b-378a9e121c71 | cpp | tensorflow/tensorflow | partition_assignment | third_party/xla/xla/service/spmd/partition_assignment.cc | third_party/xla/xla/service/spmd/partition_assignment_test.cc | #include "xla/service/spmd/partition_assignment.h"
#include <cstdint>
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/xla.pb.h"
namespace xla {
PartitioningAlgorithm::PartitioningAlgorithm(AlgorithmKind kind,
int64_t num_partitions) {
kind_ = kind;
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitioningAlgorithm::name() const {
switch (kind_) {
case AlgorithmKind::kNoop:
default:
return "Noop";
}
}
const PartitioningAlgorithm::AlgorithmKind& PartitioningAlgorithm::kind()
const {
return kind_;
}
int64_t PartitioningAlgorithm::num_partitions() const {
return num_partitions_;
}
std::unique_ptr<PartitioningAlgorithm>
PartitioningAlgorithm::CreateNoopPartitioning(int64_t num_partitions) {
return std::make_unique<NoopPartitioning>(num_partitions);
}
NoopPartitioning::NoopPartitioning(int64_t num_partitions)
: PartitioningAlgorithm(AlgorithmKind::kNoop, num_partitions) {
VLOG(2) << "Created a no-op algorithm with the number of partitions: "
<< num_partitions;
}
absl::StatusOr<bool> NoopPartitioning::Run(HloModule* module) const {
VLOG(2) << "No-op algorithm was called to partition module: "
<< module->name();
return false;
}
PartitionAssignment::PartitionAssignment(int64_t num_partitions) {
CHECK_GT(num_partitions, 1) << "Number of partitions must be at least two.";
num_partitions_ = num_partitions;
}
absl::string_view PartitionAssignment::name() const {
return "partitioning-assignment";
}
const PartitioningAlgorithm* PartitionAssignment::algorithm() {
return algorithm_.get();
}
int64_t PartitionAssignment::num_partitions() const { return num_partitions_; }
std::unique_ptr<PartitioningAlgorithm>
PartitionAssignment::ChoosePartitioningAlgorithm(
const HloModule& module) const {
auto algo = module.config().debug_options().xla_partitioning_algorithm();
CHECK_EQ(algo, DebugOptions::PARTITIONING_ALGORITHM_NOOP);
return PartitioningAlgorithm::CreateNoopPartitioning(num_partitions());
}
absl::StatusOr<bool> PartitionAssignment::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Running partition assignment on module " << module->name();
algorithm_ = ChoosePartitioningAlgorithm(*module);
return algorithm()->Run(module);
}
} | #include "xla/service/spmd/partition_assignment.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using PartitionAssignmentTest = HloTestBase;
TEST_F(PartitionAssignmentTest, NoopAlg) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,16]{1,0} parameter(0)
ROOT %copy = f32[16,16]{1,0} copy(%param0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_partitioning_algorithm(
DebugOptions::PARTITIONING_ALGORITHM_NOOP);
PartitionAssignment partition_assignment(16);
EXPECT_EQ(partition_assignment.algorithm(), nullptr);
TF_ASSERT_OK_AND_ASSIGN(bool changed, partition_assignment.Run(module.get()));
EXPECT_FALSE(changed);
EXPECT_NE(partition_assignment.algorithm(), nullptr);
EXPECT_EQ(partition_assignment.algorithm()->kind(),
PartitioningAlgorithm::AlgorithmKind::kNoop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/partition_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/partition_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d27c1299-07dc-48ba-9a98-b5439509c8ea | cpp | abseil/abseil-cpp | randen_hwaes | absl/random/internal/randen_hwaes.cc | absl/random/internal/randen_hwaes_test.cc | #include "absl/random/internal/randen_hwaes.h"
#include <cstdint>
#include <cstring>
#include "absl/base/attributes.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/platform.h"
#include "absl/random/internal/randen_traits.h"
#if ABSL_HAVE_ACCELERATED_AES
#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32) || \
defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \
defined(ABSL_ARCH_AARCH64)
#define ABSL_RANDEN_HWAES_IMPL 1
#endif
#endif
#if !defined(ABSL_RANDEN_HWAES_IMPL)
#include <cstdio>
#include <cstdlib>
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
bool HasRandenHwAesImplementation() { return false; }
const void* RandenHwAes::GetKeys() {
const int d = ABSL_RANDOM_INTERNAL_AES_DISPATCH;
fprintf(stderr, "AES Hardware detection failed (%d).\n", d);
exit(1);
return nullptr;
}
void RandenHwAes::Absorb(const void*, void*) {
const int d = ABSL_RANDOM_INTERNAL_AES_DISPATCH;
fprintf(stderr, "AES Hardware detection failed (%d).\n", d);
exit(1);
}
void RandenHwAes::Generate(const void*, void*) {
const int d = ABSL_RANDOM_INTERNAL_AES_DISPATCH;
fprintf(stderr, "AES Hardware detection failed (%d).\n", d);
exit(1);
}
}
ABSL_NAMESPACE_END
}
#else
namespace {
using absl::random_internal::RandenTraits;
}
#if (defined(__clang__) || defined(__GNUC__))
#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32)
#define ABSL_TARGET_CRYPTO __attribute__((target("aes")))
#elif defined(ABSL_ARCH_PPC)
#define ABSL_TARGET_CRYPTO __attribute__((target("crypto")))
#else
#define ABSL_TARGET_CRYPTO
#endif
#else
#define ABSL_TARGET_CRYPTO
#endif
#if defined(ABSL_ARCH_PPC)
#include <altivec.h>
#undef vector
#undef bool
using Vector128 = __vector unsigned long long;
namespace {
inline ABSL_TARGET_CRYPTO Vector128 ReverseBytes(const Vector128& v) {
const __vector unsigned char perm = {15, 14, 13, 12, 11, 10, 9, 8,
7, 6, 5, 4, 3, 2, 1, 0};
return vec_perm(v, v, perm);
}
inline ABSL_TARGET_CRYPTO Vector128 Vector128Load(const void* from) {
return vec_vsx_ld(0, reinterpret_cast<const Vector128*>(from));
}
inline ABSL_TARGET_CRYPTO void Vector128Store(const Vector128& v, void* to) {
vec_vsx_st(v, 0, reinterpret_cast<Vector128*>(to));
}
inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state,
const Vector128& round_key) {
return Vector128(__builtin_crypto_vcipher(state, round_key));
}
inline ABSL_TARGET_CRYPTO void SwapEndian(absl::uint128* state) {
for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) {
Vector128Store(ReverseBytes(Vector128Load(state + block)), state + block);
}
}
}
#elif defined(ABSL_ARCH_ARM) || defined(ABSL_ARCH_AARCH64)
#include <arm_neon.h>
using Vector128 = uint8x16_t;
namespace {
inline ABSL_TARGET_CRYPTO Vector128 Vector128Load(const void* from) {
return vld1q_u8(reinterpret_cast<const uint8_t*>(from));
}
inline ABSL_TARGET_CRYPTO void Vector128Store(const Vector128& v, void* to) {
vst1q_u8(reinterpret_cast<uint8_t*>(to), v);
}
inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state,
const Vector128& round_key) {
return vaesmcq_u8(vaeseq_u8(state, uint8x16_t{})) ^ round_key;
}
inline ABSL_TARGET_CRYPTO void SwapEndian(void*) {}
}
#elif defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32)
#include <immintrin.h>
namespace {
class Vector128 {
public:
inline explicit Vector128(const __m128i& v) : data_(v) {}
inline __m128i data() const { return data_; }
inline Vector128& operator^=(const Vector128& other) {
data_ = _mm_xor_si128(data_, other.data());
return *this;
}
private:
__m128i data_;
};
inline ABSL_TARGET_CRYPTO Vector128 Vector128Load(const void* from) {
return Vector128(_mm_load_si128(reinterpret_cast<const __m128i*>(from)));
}
inline ABSL_TARGET_CRYPTO void Vector128Store(const Vector128& v, void* to) {
_mm_store_si128(reinterpret_cast<__m128i*>(to), v.data());
}
inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state,
const Vector128& round_key) {
return Vector128(_mm_aesenc_si128(state.data(), round_key.data()));
}
inline ABSL_TARGET_CRYPTO void SwapEndian(void*) {}
}
#endif
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunknown-pragmas"
#endif
namespace {
inline ABSL_TARGET_CRYPTO void BlockShuffle(absl::uint128* state) {
static_assert(RandenTraits::kFeistelBlocks == 16,
"Expecting 16 FeistelBlocks.");
constexpr size_t shuffle[RandenTraits::kFeistelBlocks] = {
7, 2, 13, 4, 11, 8, 3, 6, 15, 0, 9, 10, 1, 14, 5, 12};
const Vector128 v0 = Vector128Load(state + shuffle[0]);
const Vector128 v1 = Vector128Load(state + shuffle[1]);
const Vector128 v2 = Vector128Load(state + shuffle[2]);
const Vector128 v3 = Vector128Load(state + shuffle[3]);
const Vector128 v4 = Vector128Load(state + shuffle[4]);
const Vector128 v5 = Vector128Load(state + shuffle[5]);
const Vector128 v6 = Vector128Load(state + shuffle[6]);
const Vector128 v7 = Vector128Load(state + shuffle[7]);
const Vector128 w0 = Vector128Load(state + shuffle[8]);
const Vector128 w1 = Vector128Load(state + shuffle[9]);
const Vector128 w2 = Vector128Load(state + shuffle[10]);
const Vector128 w3 = Vector128Load(state + shuffle[11]);
const Vector128 w4 = Vector128Load(state + shuffle[12]);
const Vector128 w5 = Vector128Load(state + shuffle[13]);
const Vector128 w6 = Vector128Load(state + shuffle[14]);
const Vector128 w7 = Vector128Load(state + shuffle[15]);
Vector128Store(v0, state + 0);
Vector128Store(v1, state + 1);
Vector128Store(v2, state + 2);
Vector128Store(v3, state + 3);
Vector128Store(v4, state + 4);
Vector128Store(v5, state + 5);
Vector128Store(v6, state + 6);
Vector128Store(v7, state + 7);
Vector128Store(w0, state + 8);
Vector128Store(w1, state + 9);
Vector128Store(w2, state + 10);
Vector128Store(w3, state + 11);
Vector128Store(w4, state + 12);
Vector128Store(w5, state + 13);
Vector128Store(w6, state + 14);
Vector128Store(w7, state + 15);
}
inline ABSL_TARGET_CRYPTO const absl::uint128* FeistelRound(
absl::uint128* state,
const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
static_assert(RandenTraits::kFeistelBlocks == 16,
"Expecting 16 FeistelBlocks.");
const Vector128 s0 = Vector128Load(state + 0);
const Vector128 s1 = Vector128Load(state + 1);
const Vector128 s2 = Vector128Load(state + 2);
const Vector128 s3 = Vector128Load(state + 3);
const Vector128 s4 = Vector128Load(state + 4);
const Vector128 s5 = Vector128Load(state + 5);
const Vector128 s6 = Vector128Load(state + 6);
const Vector128 s7 = Vector128Load(state + 7);
const Vector128 s8 = Vector128Load(state + 8);
const Vector128 s9 = Vector128Load(state + 9);
const Vector128 s10 = Vector128Load(state + 10);
const Vector128 s11 = Vector128Load(state + 11);
const Vector128 s12 = Vector128Load(state + 12);
const Vector128 s13 = Vector128Load(state + 13);
const Vector128 s14 = Vector128Load(state + 14);
const Vector128 s15 = Vector128Load(state + 15);
const Vector128 e0 = AesRound(s0, Vector128Load(keys + 0));
const Vector128 e2 = AesRound(s2, Vector128Load(keys + 1));
const Vector128 e4 = AesRound(s4, Vector128Load(keys + 2));
const Vector128 e6 = AesRound(s6, Vector128Load(keys + 3));
const Vector128 e8 = AesRound(s8, Vector128Load(keys + 4));
const Vector128 e10 = AesRound(s10, Vector128Load(keys + 5));
const Vector128 e12 = AesRound(s12, Vector128Load(keys + 6));
const Vector128 e14 = AesRound(s14, Vector128Load(keys + 7));
const Vector128 o1 = AesRound(e0, s1);
const Vector128 o3 = AesRound(e2, s3);
const Vector128 o5 = AesRound(e4, s5);
const Vector128 o7 = AesRound(e6, s7);
const Vector128 o9 = AesRound(e8, s9);
const Vector128 o11 = AesRound(e10, s11);
const Vector128 o13 = AesRound(e12, s13);
const Vector128 o15 = AesRound(e14, s15);
Vector128Store(o1, state + 1);
Vector128Store(o3, state + 3);
Vector128Store(o5, state + 5);
Vector128Store(o7, state + 7);
Vector128Store(o9, state + 9);
Vector128Store(o11, state + 11);
Vector128Store(o13, state + 13);
Vector128Store(o15, state + 15);
return keys + 8;
}
inline ABSL_TARGET_CRYPTO void Permute(
absl::uint128* state,
const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) {
#ifdef __clang__
#pragma clang loop unroll_count(2)
#endif
for (size_t round = 0; round < RandenTraits::kFeistelRounds; ++round) {
keys = FeistelRound(state, keys);
BlockShuffle(state);
}
}
}
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
bool HasRandenHwAesImplementation() { return true; }
const void* ABSL_TARGET_CRYPTO RandenHwAes::GetKeys() {
#if defined(ABSL_ARCH_PPC)
return kRandenRoundKeysBE;
#else
return kRandenRoundKeys;
#endif
}
void ABSL_TARGET_CRYPTO RandenHwAes::Absorb(const void* seed_void,
void* state_void) {
static_assert(RandenTraits::kCapacityBytes / sizeof(Vector128) == 1,
"Unexpected Randen kCapacityBlocks");
static_assert(RandenTraits::kStateBytes / sizeof(Vector128) == 16,
"Unexpected Randen kStateBlocks");
auto* state = reinterpret_cast<absl::uint128 * ABSL_RANDOM_INTERNAL_RESTRICT>(
state_void);
const auto* seed =
reinterpret_cast<const absl::uint128 * ABSL_RANDOM_INTERNAL_RESTRICT>(
seed_void);
Vector128 b1 = Vector128Load(state + 1);
b1 ^= Vector128Load(seed + 0);
Vector128Store(b1, state + 1);
Vector128 b2 = Vector128Load(state + 2);
b2 ^= Vector128Load(seed + 1);
Vector128Store(b2, state + 2);
Vector128 b3 = Vector128Load(state + 3);
b3 ^= Vector128Load(seed + 2);
Vector128Store(b3, state + 3);
Vector128 b4 = Vector128Load(state + 4);
b4 ^= Vector128Load(seed + 3);
Vector128Store(b4, state + 4);
Vector128 b5 = Vector128Load(state + 5);
b5 ^= Vector128Load(seed + 4);
Vector128Store(b5, state + 5);
Vector128 b6 = Vector128Load(state + 6);
b6 ^= Vector128Load(seed + 5);
Vector128Store(b6, state + 6);
Vector128 b7 = Vector128Load(state + 7);
b7 ^= Vector128Load(seed + 6);
Vector128Store(b7, state + 7);
Vector128 b8 = Vector128Load(state + 8);
b8 ^= Vector128Load(seed + 7);
Vector128Store(b8, state + 8);
Vector128 b9 = Vector128Load(state + 9);
b9 ^= Vector128Load(seed + 8);
Vector128Store(b9, state + 9);
Vector128 b10 = Vector128Load(state + 10);
b10 ^= Vector128Load(seed + 9);
Vector128Store(b10, state + 10);
Vector128 b11 = Vector128Load(state + 11);
b11 ^= Vector128Load(seed + 10);
Vector128Store(b11, state + 11);
Vector128 b12 = Vector128Load(state + 12);
b12 ^= Vector128Load(seed + 11);
Vector128Store(b12, state + 12);
Vector128 b13 = Vector128Load(state + 13);
b13 ^= Vector128Load(seed + 12);
Vector128Store(b13, state + 13);
Vector128 b14 = Vector128Load(state + 14);
b14 ^= Vector128Load(seed + 13);
Vector128Store(b14, state + 14);
Vector128 b15 = Vector128Load(state + 15);
b15 ^= Vector128Load(seed + 14);
Vector128Store(b15, state + 15);
}
void ABSL_TARGET_CRYPTO RandenHwAes::Generate(const void* keys_void,
void* state_void) {
static_assert(RandenTraits::kCapacityBytes == sizeof(Vector128),
"Capacity mismatch");
auto* state = reinterpret_cast<absl::uint128*>(state_void);
const auto* keys = reinterpret_cast<const absl::uint128*>(keys_void);
const Vector128 prev_inner = Vector128Load(state);
SwapEndian(state);
Permute(state, keys);
SwapEndian(state);
Vector128 inner = Vector128Load(state);
inner ^= prev_inner;
Vector128Store(inner, state);
}
#ifdef __clang__
#pragma clang diagnostic pop
#endif
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/randen_hwaes.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/log.h"
#include "absl/random/internal/platform.h"
#include "absl/random/internal/randen_detect.h"
#include "absl/random/internal/randen_traits.h"
#include "absl/strings/str_format.h"
namespace {
using absl::random_internal::RandenHwAes;
using absl::random_internal::RandenTraits;
TEST(RandenHwAesTest, Default) {
EXPECT_TRUE(absl::random_internal::CPUSupportsRandenHwAes());
constexpr uint8_t kGolden[] = {
0xee, 0xd3, 0xe6, 0x0e, 0x09, 0x34, 0x65, 0x6c, 0xc6, 0x33, 0x53, 0x9d,
0x9b, 0x2b, 0x4e, 0x04, 0x77, 0x39, 0x43, 0x4e, 0x13, 0x4f, 0xc1, 0xc3,
0xee, 0x10, 0x04, 0xd9, 0x7c, 0xf4, 0xa9, 0xdd, 0x10, 0xca, 0xd8, 0x7f,
0x08, 0xf3, 0x7b, 0x88, 0x12, 0x29, 0xc7, 0x45, 0xf5, 0x80, 0xb7, 0xf0,
0x9f, 0x59, 0x96, 0x76, 0xd3, 0xb1, 0xdb, 0x15, 0x59, 0x6d, 0x3c, 0xff,
0xba, 0x63, 0xec, 0x30, 0xa6, 0x20, 0x7f, 0x6f, 0x60, 0x73, 0x9f, 0xb2,
0x4c, 0xa5, 0x49, 0x6f, 0x31, 0x8a, 0x80, 0x02, 0x0e, 0xe5, 0xc8, 0xd5,
0xf9, 0xea, 0x8f, 0x3b, 0x8a, 0xde, 0xd9, 0x3f, 0x5e, 0x60, 0xbf, 0x9c,
0xbb, 0x3b, 0x18, 0x78, 0x1a, 0xae, 0x70, 0xc9, 0xd5, 0x1e, 0x30, 0x56,
0xd3, 0xff, 0xb2, 0xd8, 0x37, 0x3c, 0xc7, 0x0f, 0xfe, 0x27, 0xb3, 0xf4,
0x19, 0x9a, 0x8f, 0xeb, 0x76, 0x8d, 0xfd, 0xcd, 0x9d, 0x0c, 0x42, 0x91,
0xeb, 0x06, 0xa5, 0xc3, 0x56, 0x95, 0xff, 0x3e, 0xdd, 0x05, 0xaf, 0xd5,
0xa1, 0xc4, 0x83, 0x8f, 0xb7, 0x1b, 0xdb, 0x48, 0x8c, 0xfe, 0x6b, 0x0d,
0x0e, 0x92, 0x23, 0x70, 0x42, 0x6d, 0x95, 0x34, 0x58, 0x57, 0xd3, 0x58,
0x40, 0xb8, 0x87, 0x6b, 0xc2, 0xf4, 0x1e, 0xed, 0xf3, 0x2d, 0x0b, 0x3e,
0xa2, 0x32, 0xef, 0x8e, 0xfc, 0x54, 0x11, 0x43, 0xf3, 0xab, 0x7c, 0x49,
0x8b, 0x9a, 0x02, 0x70, 0x05, 0x37, 0x24, 0x4e, 0xea, 0xe5, 0x90, 0xf0,
0x49, 0x57, 0x8b, 0xd8, 0x2f, 0x69, 0x70, 0xa9, 0x82, 0xa5, 0x51, 0xc6,
0xf5, 0x42, 0x63, 0xbb, 0x2c, 0xec, 0xfc, 0x78, 0xdb, 0x55, 0x2f, 0x61,
0x45, 0xb7, 0x3c, 0x46, 0xe3, 0xaf, 0x16, 0x18, 0xad, 0xe4, 0x2e, 0x35,
0x7e, 0xda, 0x01, 0xc1, 0x74, 0xf3, 0x6f, 0x02, 0x51, 0xe8, 0x3d, 0x1c,
0x82, 0xf0, 0x1e, 0x81,
};
alignas(16) uint8_t state[RandenTraits::kStateBytes];
std::memset(state, 0, sizeof(state));
RandenHwAes::Generate(RandenHwAes::GetKeys(), state);
EXPECT_EQ(0, std::memcmp(state, kGolden, sizeof(state)));
}
}
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc, argv);
LOG(INFO) << "ABSL_HAVE_ACCELERATED_AES=" << ABSL_HAVE_ACCELERATED_AES;
LOG(INFO) << "ABSL_RANDOM_INTERNAL_AES_DISPATCH="
<< ABSL_RANDOM_INTERNAL_AES_DISPATCH;
#if defined(ABSL_ARCH_X86_64)
LOG(INFO) << "ABSL_ARCH_X86_64";
#elif defined(ABSL_ARCH_X86_32)
LOG(INFO) << "ABSL_ARCH_X86_32";
#elif defined(ABSL_ARCH_AARCH64)
LOG(INFO) << "ABSL_ARCH_AARCH64";
#elif defined(ABSL_ARCH_ARM)
LOG(INFO) << "ABSL_ARCH_ARM";
#elif defined(ABSL_ARCH_PPC)
LOG(INFO) << "ABSL_ARCH_PPC";
#else
LOG(INFO) << "ARCH Unknown";
#endif
int x = absl::random_internal::HasRandenHwAesImplementation();
LOG(INFO) << "HasRandenHwAesImplementation = " << x;
int y = absl::random_internal::CPUSupportsRandenHwAes();
LOG(INFO) << "CPUSupportsRandenHwAes = " << x;
if (!x || !y) {
LOG(INFO) << "Skipping Randen HWAES tests.";
return 0;
}
return RUN_ALL_TESTS();
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen_hwaes.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/randen_hwaes_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
64487d51-1c0d-4732-87e3-c169ab23ba5d | cpp | google/cel-cpp | ident_step | eval/eval/ident_step.cc | eval/eval/ident_step_test.cc | #include "eval/eval/ident_step.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "base/ast_internal/expr.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/comprehension_slots.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "eval/internal/errors.h"
#include "internal/status_macros.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::Value;
using ::cel::runtime_internal::CreateError;
class IdentStep : public ExpressionStepBase {
public:
IdentStep(absl::string_view name, int64_t expr_id)
: ExpressionStepBase(expr_id), name_(name) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
std::string name_;
};
absl::Status LookupIdent(const std::string& name, ExecutionFrameBase& frame,
Value& result, AttributeTrail& attribute) {
if (frame.attribute_tracking_enabled()) {
attribute = AttributeTrail(name);
if (frame.missing_attribute_errors_enabled() &&
frame.attribute_utility().CheckForMissingAttribute(attribute)) {
CEL_ASSIGN_OR_RETURN(
result, frame.attribute_utility().CreateMissingAttributeError(
attribute.attribute()));
return absl::OkStatus();
}
if (frame.unknown_processing_enabled() &&
frame.attribute_utility().CheckForUnknownExact(attribute)) {
result =
frame.attribute_utility().CreateUnknownSet(attribute.attribute());
return absl::OkStatus();
}
}
CEL_ASSIGN_OR_RETURN(auto found, frame.activation().FindVariable(
frame.value_manager(), name, result));
if (found) {
return absl::OkStatus();
}
result = frame.value_manager().CreateErrorValue(CreateError(
absl::StrCat("No value with name \"", name, "\" found in Activation")));
return absl::OkStatus();
}
absl::Status IdentStep::Evaluate(ExecutionFrame* frame) const {
Value value;
AttributeTrail attribute;
CEL_RETURN_IF_ERROR(LookupIdent(name_, *frame, value, attribute));
frame->value_stack().Push(std::move(value), std::move(attribute));
return absl::OkStatus();
}
absl::StatusOr<absl::Nonnull<const ComprehensionSlots::Slot*>> LookupSlot(
absl::string_view name, size_t slot_index, ExecutionFrameBase& frame) {
const ComprehensionSlots::Slot* slot =
frame.comprehension_slots().Get(slot_index);
if (slot == nullptr) {
return absl::InternalError(
absl::StrCat("Comprehension variable accessed out of scope: ", name));
}
return slot;
}
class SlotStep : public ExpressionStepBase {
public:
SlotStep(absl::string_view name, size_t slot_index, int64_t expr_id)
: ExpressionStepBase(expr_id), name_(name), slot_index_(slot_index) {}
absl::Status Evaluate(ExecutionFrame* frame) const override {
CEL_ASSIGN_OR_RETURN(const ComprehensionSlots::Slot* slot,
LookupSlot(name_, slot_index_, *frame));
frame->value_stack().Push(slot->value, slot->attribute);
return absl::OkStatus();
}
private:
std::string name_;
size_t slot_index_;
};
class DirectIdentStep : public DirectExpressionStep {
public:
DirectIdentStep(absl::string_view name, int64_t expr_id)
: DirectExpressionStep(expr_id), name_(name) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute) const override {
return LookupIdent(name_, frame, result, attribute);
}
private:
std::string name_;
};
class DirectSlotStep : public DirectExpressionStep {
public:
DirectSlotStep(std::string name, size_t slot_index, int64_t expr_id)
: DirectExpressionStep(expr_id),
name_(std::move(name)),
slot_index_(slot_index) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute) const override {
CEL_ASSIGN_OR_RETURN(const ComprehensionSlots::Slot* slot,
LookupSlot(name_, slot_index_, frame));
if (frame.attribute_tracking_enabled()) {
attribute = slot->attribute;
}
result = slot->value;
return absl::OkStatus();
}
private:
std::string name_;
size_t slot_index_;
};
}
std::unique_ptr<DirectExpressionStep> CreateDirectIdentStep(
absl::string_view identifier, int64_t expr_id) {
return std::make_unique<DirectIdentStep>(identifier, expr_id);
}
std::unique_ptr<DirectExpressionStep> CreateDirectSlotIdentStep(
absl::string_view identifier, size_t slot_index, int64_t expr_id) {
return std::make_unique<DirectSlotStep>(std::string(identifier), slot_index,
expr_id);
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateIdentStep(
const cel::ast_internal::Ident& ident_expr, int64_t expr_id) {
return std::make_unique<IdentStep>(ident_expr.name(), expr_id);
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateIdentStepForSlot(
const cel::ast_internal::Ident& ident_expr, size_t slot_index,
int64_t expr_id) {
return std::make_unique<SlotStep>(ident_expr.name(), slot_index, expr_id);
}
} | #include "eval/eval/ident_step.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "base/type_provider.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/evaluator_core.h"
#include "eval/public/activation.h"
#include "eval/public/cel_attribute.h"
#include "internal/testing.h"
#include "runtime/activation.h"
#include "runtime/managed_value_factory.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::Cast;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::IntValue;
using ::cel::ManagedValueFactory;
using ::cel::MemoryManagerRef;
using ::cel::RuntimeOptions;
using ::cel::TypeProvider;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::ast_internal::Expr;
using ::google::protobuf::Arena;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::SizeIs;
TEST(IdentStepTest, TestIdentStep) {
Expr expr;
auto& ident_expr = expr.mutable_ident_expr();
ident_expr.set_name("name0");
ASSERT_OK_AND_ASSIGN(auto step, CreateIdentStep(ident_expr, expr.id()));
ExecutionPath path;
path.push_back(std::move(step));
CelExpressionFlatImpl impl(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), cel::RuntimeOptions{}));
Activation activation;
Arena arena;
std::string value("test");
activation.InsertValue("name0", CelValue::CreateString(&value));
auto status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
CelValue result = status0.value();
ASSERT_TRUE(result.IsString());
EXPECT_THAT(result.StringOrDie().value(), Eq("test"));
}
TEST(IdentStepTest, TestIdentStepNameNotFound) {
Expr expr;
auto& ident_expr = expr.mutable_ident_expr();
ident_expr.set_name("name0");
ASSERT_OK_AND_ASSIGN(auto step, CreateIdentStep(ident_expr, expr.id()));
ExecutionPath path;
path.push_back(std::move(step));
CelExpressionFlatImpl impl(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), cel::RuntimeOptions{}));
Activation activation;
Arena arena;
std::string value("test");
auto status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
CelValue result = status0.value();
ASSERT_TRUE(result.IsError());
}
TEST(IdentStepTest, DisableMissingAttributeErrorsOK) {
Expr expr;
auto& ident_expr = expr.mutable_ident_expr();
ident_expr.set_name("name0");
ASSERT_OK_AND_ASSIGN(auto step, CreateIdentStep(ident_expr, expr.id()));
ExecutionPath path;
path.push_back(std::move(step));
cel::RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kDisabled;
CelExpressionFlatImpl impl(FlatExpression(std::move(path),
0,
TypeProvider::Builtin(), options));
Activation activation;
Arena arena;
std::string value("test");
activation.InsertValue("name0", CelValue::CreateString(&value));
auto status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
CelValue result = status0.value();
ASSERT_TRUE(result.IsString());
EXPECT_THAT(result.StringOrDie().value(), Eq("test"));
const CelAttributePattern pattern("name0", {});
activation.set_missing_attribute_patterns({pattern});
status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
EXPECT_THAT(status0->StringOrDie().value(), Eq("test"));
}
TEST(IdentStepTest, TestIdentStepMissingAttributeErrors) {
Expr expr;
auto& ident_expr = expr.mutable_ident_expr();
ident_expr.set_name("name0");
ASSERT_OK_AND_ASSIGN(auto step, CreateIdentStep(ident_expr, expr.id()));
ExecutionPath path;
path.push_back(std::move(step));
cel::RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kDisabled;
options.enable_missing_attribute_errors = true;
CelExpressionFlatImpl impl(FlatExpression(std::move(path),
0,
TypeProvider::Builtin(), options));
Activation activation;
Arena arena;
std::string value("test");
activation.InsertValue("name0", CelValue::CreateString(&value));
auto status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
CelValue result = status0.value();
ASSERT_TRUE(result.IsString());
EXPECT_THAT(result.StringOrDie().value(), Eq("test"));
CelAttributePattern pattern("name0", {});
activation.set_missing_attribute_patterns({pattern});
status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
EXPECT_EQ(status0->ErrorOrDie()->code(), absl::StatusCode::kInvalidArgument);
EXPECT_EQ(status0->ErrorOrDie()->message(), "MissingAttributeError: name0");
}
TEST(IdentStepTest, TestIdentStepUnknownAttribute) {
Expr expr;
auto& ident_expr = expr.mutable_ident_expr();
ident_expr.set_name("name0");
ASSERT_OK_AND_ASSIGN(auto step, CreateIdentStep(ident_expr, expr.id()));
ExecutionPath path;
path.push_back(std::move(step));
cel::RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
CelExpressionFlatImpl impl(FlatExpression(std::move(path),
0,
TypeProvider::Builtin(), options));
Activation activation;
Arena arena;
std::string value("test");
activation.InsertValue("name0", CelValue::CreateString(&value));
std::vector<CelAttributePattern> unknown_patterns;
unknown_patterns.push_back(CelAttributePattern("name_bad", {}));
activation.set_unknown_attribute_patterns(unknown_patterns);
auto status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
CelValue result = status0.value();
ASSERT_TRUE(result.IsString());
EXPECT_THAT(result.StringOrDie().value(), Eq("test"));
unknown_patterns.push_back(CelAttributePattern("name0", {}));
activation.set_unknown_attribute_patterns(unknown_patterns);
status0 = impl.Evaluate(activation, &arena);
ASSERT_OK(status0);
result = status0.value();
ASSERT_TRUE(result.IsUnknownSet());
}
TEST(DirectIdentStepTest, Basic) {
ManagedValueFactory value_factory(TypeProvider::Builtin(),
MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
RuntimeOptions options;
activation.InsertOrAssignValue("var1", IntValue(42));
ExecutionFrameBase frame(activation, options, value_factory.get());
Value result;
AttributeTrail trail;
auto step = CreateDirectIdentStep("var1", -1);
ASSERT_OK(step->Evaluate(frame, result, trail));
ASSERT_TRUE(InstanceOf<IntValue>(result));
EXPECT_THAT(Cast<IntValue>(result).NativeValue(), Eq(42));
}
TEST(DirectIdentStepTest, UnknownAttribute) {
ManagedValueFactory value_factory(TypeProvider::Builtin(),
MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
activation.InsertOrAssignValue("var1", IntValue(42));
activation.SetUnknownPatterns({CreateCelAttributePattern("var1", {})});
ExecutionFrameBase frame(activation, options, value_factory.get());
Value result;
AttributeTrail trail;
auto step = CreateDirectIdentStep("var1", -1);
ASSERT_OK(step->Evaluate(frame, result, trail));
ASSERT_TRUE(InstanceOf<UnknownValue>(result));
EXPECT_THAT(Cast<UnknownValue>(result).attribute_set(), SizeIs(1));
}
TEST(DirectIdentStepTest, MissingAttribute) {
ManagedValueFactory value_factory(TypeProvider::Builtin(),
MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
RuntimeOptions options;
options.enable_missing_attribute_errors = true;
activation.InsertOrAssignValue("var1", IntValue(42));
activation.SetMissingPatterns({CreateCelAttributePattern("var1", {})});
ExecutionFrameBase frame(activation, options, value_factory.get());
Value result;
AttributeTrail trail;
auto step = CreateDirectIdentStep("var1", -1);
ASSERT_OK(step->Evaluate(frame, result, trail));
ASSERT_TRUE(InstanceOf<ErrorValue>(result));
EXPECT_THAT(Cast<ErrorValue>(result).NativeValue(),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("var1")));
}
TEST(DirectIdentStepTest, NotFound) {
ManagedValueFactory value_factory(TypeProvider::Builtin(),
MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
RuntimeOptions options;
ExecutionFrameBase frame(activation, options, value_factory.get());
Value result;
AttributeTrail trail;
auto step = CreateDirectIdentStep("var1", -1);
ASSERT_OK(step->Evaluate(frame, result, trail));
ASSERT_TRUE(InstanceOf<ErrorValue>(result));
EXPECT_THAT(Cast<ErrorValue>(result).NativeValue(),
StatusIs(absl::StatusCode::kUnknown,
HasSubstr("\"var1\" found in Activation")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/ident_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/ident_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b6c1594a-127c-43ef-a7fd-eb27fc8713a4 | cpp | google/quiche | hpack_decoder_tables | quiche/http2/hpack/decoder/hpack_decoder_tables.cc | quiche/http2/hpack/decoder/hpack_decoder_tables_test.cc | #include "quiche/http2/hpack/decoder/hpack_decoder_tables.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
namespace {
std::vector<HpackStringPair>* MakeStaticTable() {
auto* ptr = new std::vector<HpackStringPair>();
ptr->reserve(kFirstDynamicTableIndex);
ptr->emplace_back("", "");
#define STATIC_TABLE_ENTRY(name, value, index) \
QUICHE_DCHECK_EQ(ptr->size(), static_cast<size_t>(index)); \
ptr->emplace_back(name, value)
#include "quiche/http2/hpack/hpack_static_table_entries.inc"
#undef STATIC_TABLE_ENTRY
return ptr;
}
const std::vector<HpackStringPair>* GetStaticTable() {
static const std::vector<HpackStringPair>* const g_static_table =
MakeStaticTable();
return g_static_table;
}
}
HpackStringPair::HpackStringPair(std::string name, std::string value)
: name(std::move(name)), value(std::move(value)) {
QUICHE_DVLOG(3) << DebugString() << " ctor";
}
HpackStringPair::~HpackStringPair() {
QUICHE_DVLOG(3) << DebugString() << " dtor";
}
std::string HpackStringPair::DebugString() const {
return absl::StrCat("HpackStringPair(name=", name, ", value=", value, ")");
}
std::ostream& operator<<(std::ostream& os, const HpackStringPair& p) {
os << p.DebugString();
return os;
}
HpackDecoderStaticTable::HpackDecoderStaticTable(
const std::vector<HpackStringPair>* table)
: table_(table) {}
HpackDecoderStaticTable::HpackDecoderStaticTable() : table_(GetStaticTable()) {}
const HpackStringPair* HpackDecoderStaticTable::Lookup(size_t index) const {
if (0 < index && index < kFirstDynamicTableIndex) {
return &((*table_)[index]);
}
return nullptr;
}
HpackDecoderDynamicTable::HpackDecoderDynamicTable()
: insert_count_(kFirstDynamicTableIndex - 1) {}
HpackDecoderDynamicTable::~HpackDecoderDynamicTable() = default;
void HpackDecoderDynamicTable::DynamicTableSizeUpdate(size_t size_limit) {
QUICHE_DVLOG(3) << "HpackDecoderDynamicTable::DynamicTableSizeUpdate "
<< size_limit;
EnsureSizeNoMoreThan(size_limit);
QUICHE_DCHECK_LE(current_size_, size_limit);
size_limit_ = size_limit;
}
void HpackDecoderDynamicTable::Insert(std::string name, std::string value) {
HpackStringPair entry(std::move(name), std::move(value));
size_t entry_size = entry.size();
QUICHE_DVLOG(2) << "InsertEntry of size=" << entry_size
<< "\n name: " << entry.name
<< "\n value: " << entry.value;
if (entry_size > size_limit_) {
QUICHE_DVLOG(2) << "InsertEntry: entry larger than table, removing "
<< table_.size() << " entries, of total size "
<< current_size_ << " bytes.";
table_.clear();
current_size_ = 0;
return;
}
++insert_count_;
size_t insert_limit = size_limit_ - entry_size;
EnsureSizeNoMoreThan(insert_limit);
table_.push_front(std::move(entry));
current_size_ += entry_size;
QUICHE_DVLOG(2) << "InsertEntry: current_size_=" << current_size_;
QUICHE_DCHECK_GE(current_size_, entry_size);
QUICHE_DCHECK_LE(current_size_, size_limit_);
}
const HpackStringPair* HpackDecoderDynamicTable::Lookup(size_t index) const {
if (index < table_.size()) {
return &table_[index];
}
return nullptr;
}
void HpackDecoderDynamicTable::EnsureSizeNoMoreThan(size_t limit) {
QUICHE_DVLOG(2) << "EnsureSizeNoMoreThan limit=" << limit
<< ", current_size_=" << current_size_;
while (current_size_ > limit) {
RemoveLastEntry();
}
QUICHE_DCHECK_LE(current_size_, limit);
}
void HpackDecoderDynamicTable::RemoveLastEntry() {
QUICHE_DCHECK(!table_.empty());
if (!table_.empty()) {
QUICHE_DVLOG(2) << "RemoveLastEntry current_size_=" << current_size_
<< ", last entry size=" << table_.back().size();
QUICHE_DCHECK_GE(current_size_, table_.back().size());
current_size_ -= table_.back().size();
table_.pop_back();
QUICHE_DCHECK_EQ(table_.empty(), current_size_ == 0);
}
}
HpackDecoderTables::HpackDecoderTables() = default;
HpackDecoderTables::~HpackDecoderTables() = default;
const HpackStringPair* HpackDecoderTables::Lookup(size_t index) const {
if (index < kFirstDynamicTableIndex) {
return static_table_.Lookup(index);
} else {
return dynamic_table_.Lookup(index - kFirstDynamicTableIndex);
}
}
} | #include "quiche/http2/hpack/decoder/hpack_decoder_tables.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <vector>
#include "quiche/http2/hpack/http2_hpack_constants.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/random_util.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::AssertionResult;
using ::testing::AssertionSuccess;
namespace http2 {
namespace test {
class HpackDecoderTablesPeer {
public:
static size_t num_dynamic_entries(const HpackDecoderTables& tables) {
return tables.dynamic_table_.table_.size();
}
};
namespace {
struct StaticEntry {
const char* name;
const char* value;
size_t index;
};
std::vector<StaticEntry> MakeSpecStaticEntries() {
std::vector<StaticEntry> static_entries;
#define STATIC_TABLE_ENTRY(name, value, index) \
QUICHE_DCHECK_EQ(static_entries.size() + 1, static_cast<size_t>(index)); \
static_entries.push_back({name, value, index});
#include "quiche/http2/hpack/hpack_static_table_entries.inc"
#undef STATIC_TABLE_ENTRY
return static_entries;
}
template <class C>
void ShuffleCollection(C* collection, Http2Random* r) {
std::shuffle(collection->begin(), collection->end(), *r);
}
class HpackDecoderStaticTableTest : public quiche::test::QuicheTest {
protected:
HpackDecoderStaticTableTest() = default;
std::vector<StaticEntry> shuffled_static_entries() {
std::vector<StaticEntry> entries = MakeSpecStaticEntries();
ShuffleCollection(&entries, &random_);
return entries;
}
AssertionResult VerifyStaticTableContents() {
for (const auto& expected : shuffled_static_entries()) {
const HpackStringPair* found = Lookup(expected.index);
HTTP2_VERIFY_NE(found, nullptr);
HTTP2_VERIFY_EQ(expected.name, found->name) << expected.index;
HTTP2_VERIFY_EQ(expected.value, found->value) << expected.index;
}
HTTP2_VERIFY_EQ(nullptr, Lookup(0));
return AssertionSuccess();
}
virtual const HpackStringPair* Lookup(size_t index) {
return static_table_.Lookup(index);
}
Http2Random* RandomPtr() { return &random_; }
Http2Random random_;
private:
HpackDecoderStaticTable static_table_;
};
TEST_F(HpackDecoderStaticTableTest, StaticTableContents) {
EXPECT_TRUE(VerifyStaticTableContents());
}
size_t Size(const std::string& name, const std::string& value) {
return name.size() + value.size() + 32;
}
typedef std::tuple<std::string, std::string, size_t> FakeHpackEntry;
const std::string& Name(const FakeHpackEntry& entry) {
return std::get<0>(entry);
}
const std::string& Value(const FakeHpackEntry& entry) {
return std::get<1>(entry);
}
size_t Size(const FakeHpackEntry& entry) { return std::get<2>(entry); }
class HpackDecoderTablesTest : public HpackDecoderStaticTableTest {
protected:
const HpackStringPair* Lookup(size_t index) override {
return tables_.Lookup(index);
}
size_t dynamic_size_limit() const {
return tables_.header_table_size_limit();
}
size_t current_dynamic_size() const {
return tables_.current_header_table_size();
}
size_t num_dynamic_entries() const {
return HpackDecoderTablesPeer::num_dynamic_entries(tables_);
}
void FakeInsert(const std::string& name, const std::string& value) {
FakeHpackEntry entry(name, value, Size(name, value));
fake_dynamic_table_.insert(fake_dynamic_table_.begin(), entry);
}
size_t FakeSize() {
size_t sz = 0;
for (const auto& entry : fake_dynamic_table_) {
sz += Size(entry);
}
return sz;
}
size_t FakeTrim(size_t limit) {
size_t original_size = FakeSize();
size_t total_size = 0;
for (size_t ndx = 0; ndx < fake_dynamic_table_.size(); ++ndx) {
total_size += Size(fake_dynamic_table_[ndx]);
if (total_size > limit) {
fake_dynamic_table_.erase(fake_dynamic_table_.begin() + ndx,
fake_dynamic_table_.end());
return original_size - FakeSize();
}
}
return 0;
}
AssertionResult VerifyDynamicTableContents() {
HTTP2_VERIFY_EQ(current_dynamic_size(), FakeSize());
HTTP2_VERIFY_EQ(num_dynamic_entries(), fake_dynamic_table_.size());
for (size_t ndx = 0; ndx < fake_dynamic_table_.size(); ++ndx) {
const HpackStringPair* found = Lookup(ndx + kFirstDynamicTableIndex);
HTTP2_VERIFY_NE(found, nullptr);
const auto& expected = fake_dynamic_table_[ndx];
HTTP2_VERIFY_EQ(Name(expected), found->name);
HTTP2_VERIFY_EQ(Value(expected), found->value);
}
HTTP2_VERIFY_EQ(
nullptr, Lookup(fake_dynamic_table_.size() + kFirstDynamicTableIndex));
return AssertionSuccess();
}
AssertionResult DynamicTableSizeUpdate(size_t size_limit) {
HTTP2_VERIFY_EQ(current_dynamic_size(), FakeSize());
if (size_limit < current_dynamic_size()) {
tables_.DynamicTableSizeUpdate(size_limit);
FakeTrim(size_limit);
return VerifyDynamicTableContents();
}
tables_.DynamicTableSizeUpdate(size_limit);
return VerifyDynamicTableContents();
}
AssertionResult Insert(const std::string& name, const std::string& value) {
size_t old_count = num_dynamic_entries();
tables_.Insert(name, value);
FakeInsert(name, value);
HTTP2_VERIFY_EQ(old_count + 1, fake_dynamic_table_.size());
FakeTrim(dynamic_size_limit());
HTTP2_VERIFY_EQ(current_dynamic_size(), FakeSize());
HTTP2_VERIFY_EQ(num_dynamic_entries(), fake_dynamic_table_.size());
return VerifyDynamicTableContents();
}
private:
HpackDecoderTables tables_;
std::vector<FakeHpackEntry> fake_dynamic_table_;
};
TEST_F(HpackDecoderTablesTest, StaticTableContents) {
EXPECT_TRUE(VerifyStaticTableContents());
}
TEST_F(HpackDecoderTablesTest, RandomDynamicTable) {
EXPECT_EQ(0u, current_dynamic_size());
EXPECT_TRUE(VerifyStaticTableContents());
EXPECT_TRUE(VerifyDynamicTableContents());
std::vector<size_t> table_sizes;
table_sizes.push_back(dynamic_size_limit());
table_sizes.push_back(0);
table_sizes.push_back(dynamic_size_limit() / 2);
table_sizes.push_back(dynamic_size_limit());
table_sizes.push_back(dynamic_size_limit() / 2);
table_sizes.push_back(0);
table_sizes.push_back(dynamic_size_limit());
for (size_t limit : table_sizes) {
ASSERT_TRUE(DynamicTableSizeUpdate(limit));
for (int insert_count = 0; insert_count < 100; ++insert_count) {
std::string name =
GenerateHttp2HeaderName(random_.UniformInRange(2, 40), RandomPtr());
std::string value =
GenerateWebSafeString(random_.UniformInRange(2, 600), RandomPtr());
ASSERT_TRUE(Insert(name, value));
}
EXPECT_TRUE(VerifyStaticTableContents());
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_decoder_tables.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/decoder/hpack_decoder_tables_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
04433fb2-7496-4b2a-81ed-6816e4136e92 | cpp | tensorflow/tensorflow | grpc_server | third_party/xla/xla/python/ifrt_proxy/server/grpc_server.cc | third_party/xla/xla/python/ifrt_proxy/server/grpc_server_test.cc | #include "xla/python/ifrt_proxy/server/grpc_server.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/functional/any_invocable.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "grpc/grpc.h"
#include "grpcpp/completion_queue.h"
#include "grpcpp/grpcpp.h"
#include "grpcpp/server_builder.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt_proxy/common/grpc_credentials.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "xla/python/ifrt_proxy/common/ifrt_service.pb.h"
#include "xla/python/ifrt_proxy/server/grpc_service_impl.h"
#include "xla/python/ifrt_proxy/server/host_buffer.h"
#include "xla/python/ifrt_proxy/server/ifrt_backend.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
GrpcServer::~GrpcServer() {
server_->Shutdown();
server_->Wait();
}
absl::StatusOr<std::unique_ptr<GrpcServer>> GrpcServer::Create(
absl::string_view address,
std::unique_ptr<grpc::GrpcIfrtService::Service> impl) {
if (impl == nullptr) {
return absl::InvalidArgumentError(
"Service implementation cannot be a nullptr.");
}
::grpc::ServerBuilder builder;
builder.AddChannelArgument(GRPC_ARG_MAX_SEND_MESSAGE_LENGTH, -1);
builder.AddChannelArgument(GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH, -1);
builder.RegisterService(impl.get());
builder.AddListeningPort(std::string(address), GetServerCredentials());
auto server = builder.BuildAndStart();
if (server == nullptr) {
return absl::UnavailableError(
absl::StrCat("Failed to initialize gRPC server at address:", address));
}
return absl::WrapUnique<GrpcServer>(
new GrpcServer(address, std::move(impl), std::move(server)));
}
absl::StatusOr<std::unique_ptr<GrpcServer>>
GrpcServer::CreateFromIfrtClientFactory(
absl::string_view address,
absl::AnyInvocable<absl::StatusOr<std::shared_ptr<xla::ifrt::Client>>()>
backend_ifrt_client_factory) {
if (backend_ifrt_client_factory == nullptr) {
return absl::InvalidArgumentError(
"backend_ifrt_client_factory cannot be nullptr.");
}
auto service = std::make_unique<GrpcServiceImpl>(
[ifrt_client_factory = std::move(backend_ifrt_client_factory)](
IfrtProxyVersion version, uint64_t session_id,
std::shared_ptr<HostBufferStore> host_buffer_store) mutable
-> absl::StatusOr<std::unique_ptr<BackendInterface>> {
TF_ASSIGN_OR_RETURN(auto ifrt_client, ifrt_client_factory());
return IfrtBackend::Create(version, session_id, std::move(ifrt_client),
std::move(host_buffer_store));
});
return Create(address, std::move(service));
}
}
}
} | #include "xla/python/ifrt_proxy/server/grpc_server.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt_proxy/common/grpc_ifrt_service.grpc.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::Not;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
class FakeIfrtService : public grpc::GrpcIfrtService::Service {};
TEST(GrpcServerTest, CreationTest) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
ASSERT_THAT(GrpcServer::Create(addr, std::move(grpc_service_impl)), IsOk());
}
TEST(GrpcServerTest, CreationFailsIfImplIsNullptr) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
EXPECT_THAT(GrpcServer::Create(addr, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(GrpcServerTest, CreationFailsWithInvalidAddress) {
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
EXPECT_THAT(GrpcServer::Create("invalid-address",
std::move(grpc_service_impl)),
Not(IsOk()));
}
TEST(GrpcServerTest, RetrievingServerAddressWorks) {
auto addr = absl::StrCat("[::1]:", tsl::testing::PickUnusedPortOrDie());
auto grpc_service_impl = std::make_unique<FakeIfrtService>();
TF_ASSERT_OK_AND_ASSIGN(
auto grpc_server, GrpcServer::Create(addr, std::move(grpc_service_impl)));
EXPECT_EQ(grpc_server->address(), addr);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_server.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/server/grpc_server_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1a315438-fcc1-4394-b61d-58b16b12b0a4 | cpp | google/libaddressinput | validation_task | cpp/src/validation_task.cc | cpp/test/validation_task_test.cc | #include "validation_task.h"
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_metadata.h>
#include <libaddressinput/address_problem.h>
#include <libaddressinput/address_validator.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/supplier.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <string>
#include <re2/re2.h>
#include "lookup_key.h"
#include "post_box_matchers.h"
#include "rule.h"
#include "util/re2ptr.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
ValidationTask::ValidationTask(const AddressData& address, bool allow_postal,
bool require_name, const FieldProblemMap* filter,
FieldProblemMap* problems,
const AddressValidator::Callback& validated)
: address_(address),
allow_postal_(allow_postal),
require_name_(require_name),
filter_(filter),
problems_(problems),
validated_(validated),
supplied_(BuildCallback(this, &ValidationTask::Validate)),
lookup_key_(new LookupKey),
max_depth_(size(LookupKey::kHierarchy)) {
assert(problems_ != nullptr);
assert(supplied_ != nullptr);
assert(lookup_key_ != nullptr);
}
ValidationTask::~ValidationTask() = default;
void ValidationTask::Run(Supplier* supplier) {
assert(supplier != nullptr);
problems_->clear();
lookup_key_->FromAddress(address_);
max_depth_ = supplier->GetLoadedRuleDepth(lookup_key_->ToKeyString(0));
supplier->SupplyGlobally(*lookup_key_, *supplied_);
}
void ValidationTask::Validate(bool success,
const LookupKey& lookup_key,
const Supplier::RuleHierarchy& hierarchy) {
assert(&lookup_key == lookup_key_.get());
if (success) {
if (address_.IsFieldEmpty(COUNTRY)) {
ReportProblemMaybe(COUNTRY, MISSING_REQUIRED_FIELD);
} else if (hierarchy.rule[0] == nullptr) {
ReportProblemMaybe(COUNTRY, UNKNOWN_VALUE);
} else {
const std::string& region_code = address_.region_code;
CheckUnexpectedField(region_code);
CheckMissingRequiredField(region_code);
CheckUnknownValue(hierarchy);
CheckPostalCodeFormatAndValue(hierarchy);
CheckUsesPoBox(hierarchy);
CheckUnsupportedField();
}
}
validated_(success, address_, *problems_);
delete this;
}
void ValidationTask::CheckUnexpectedField(
const std::string& region_code) const {
static const AddressField kFields[] = {
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
SORTING_CODE,
POSTAL_CODE,
STREET_ADDRESS,
ORGANIZATION,
RECIPIENT,
};
for (AddressField field : kFields) {
if (!address_.IsFieldEmpty(field) && !IsFieldUsed(field, region_code)) {
ReportProblemMaybe(field, UNEXPECTED_FIELD);
}
}
}
void ValidationTask::CheckMissingRequiredField(
const std::string& region_code) const {
static const AddressField kFields[] = {
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
SORTING_CODE,
POSTAL_CODE,
STREET_ADDRESS,
};
for (AddressField field : kFields) {
if (address_.IsFieldEmpty(field) && IsFieldRequired(field, region_code)) {
ReportProblemMaybe(field, MISSING_REQUIRED_FIELD);
}
}
if (require_name_ && address_.IsFieldEmpty(RECIPIENT)) {
ReportProblemMaybe(RECIPIENT, MISSING_REQUIRED_FIELD);
}
}
void ValidationTask::CheckUnknownValue(
const Supplier::RuleHierarchy& hierarchy) const {
for (size_t depth = 1; depth < size(LookupKey::kHierarchy); ++depth) {
AddressField field = LookupKey::kHierarchy[depth];
if (!(address_.IsFieldEmpty(field) ||
hierarchy.rule[depth - 1] == nullptr ||
hierarchy.rule[depth - 1]->GetSubKeys().empty() ||
hierarchy.rule[depth] != nullptr)) {
ReportProblemMaybe(field, UNKNOWN_VALUE);
}
}
}
void ValidationTask::CheckUnsupportedField() const {
for (size_t depth = max_depth_; depth < size(LookupKey::kHierarchy);
++depth) {
ReportProblemMaybe(LookupKey::kHierarchy[depth], UNSUPPORTED_FIELD);
}
}
void ValidationTask::CheckPostalCodeFormatAndValue(
const Supplier::RuleHierarchy& hierarchy) const {
assert(hierarchy.rule[0] != nullptr);
const Rule& country_rule = *hierarchy.rule[0];
if (!(ShouldReport(POSTAL_CODE, INVALID_FORMAT) ||
ShouldReport(POSTAL_CODE, MISMATCHING_VALUE))) {
return;
}
if (address_.IsFieldEmpty(POSTAL_CODE)) {
return;
} else if (std::find(problems_->begin(), problems_->end(),
FieldProblemMap::value_type(POSTAL_CODE,
UNEXPECTED_FIELD))
!= problems_->end()) {
return;
}
const RE2ptr* format_ptr = country_rule.GetPostalCodeMatcher();
if (format_ptr != nullptr &&
!RE2::FullMatch(address_.postal_code, *format_ptr->ptr) &&
ShouldReport(POSTAL_CODE, INVALID_FORMAT)) {
ReportProblem(POSTAL_CODE, INVALID_FORMAT);
return;
}
if (!ShouldReport(POSTAL_CODE, MISMATCHING_VALUE)) {
return;
}
for (size_t depth = size(LookupKey::kHierarchy) - 1; depth > 0; --depth) {
if (hierarchy.rule[depth] != nullptr) {
const RE2ptr* prefix_ptr = hierarchy.rule[depth]->GetPostalCodeMatcher();
if (prefix_ptr != nullptr) {
if (!RE2::PartialMatch(address_.postal_code, *prefix_ptr->ptr)) {
ReportProblem(POSTAL_CODE, MISMATCHING_VALUE);
}
return;
}
}
}
}
void ValidationTask::CheckUsesPoBox(
const Supplier::RuleHierarchy& hierarchy) const {
assert(hierarchy.rule[0] != nullptr);
const Rule& country_rule = *hierarchy.rule[0];
if (allow_postal_ ||
!ShouldReport(STREET_ADDRESS, USES_P_O_BOX) ||
address_.IsFieldEmpty(STREET_ADDRESS)) {
return;
}
const auto matchers = PostBoxMatchers::GetMatchers(country_rule);
for (const auto& line : address_.address_line) {
for (auto ptr : matchers) {
assert(ptr != nullptr);
if (RE2::PartialMatch(line, *ptr->ptr)) {
ReportProblem(STREET_ADDRESS, USES_P_O_BOX);
return;
}
}
}
}
void ValidationTask::ReportProblem(AddressField field,
AddressProblem problem) const {
problems_->emplace(field, problem);
}
void ValidationTask::ReportProblemMaybe(AddressField field,
AddressProblem problem) const {
if (ShouldReport(field, problem)) {
ReportProblem(field, problem);
}
}
bool ValidationTask::ShouldReport(AddressField field,
AddressProblem problem) const {
return filter_ == nullptr || filter_->empty() ||
std::find(filter_->begin(),
filter_->end(),
FieldProblemMap::value_type(field, problem)) !=
filter_->end();
}
}
} | #include "validation_task.h"
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_problem.h>
#include <libaddressinput/address_validator.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/supplier.h>
#include <cstddef>
#include <memory>
#include <gtest/gtest.h>
#include "lookup_key.h"
#include "rule.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
class ValidationTaskTest : public testing::Test {
public:
ValidationTaskTest(const ValidationTaskTest&) = delete;
ValidationTaskTest& operator=(const ValidationTaskTest&) = delete;
protected:
ValidationTaskTest()
: json_(),
success_(true),
address_(),
allow_postal_(false),
require_name_(false),
filter_{
{COUNTRY, UNEXPECTED_FIELD},
{COUNTRY, MISSING_REQUIRED_FIELD},
{RECIPIENT, UNEXPECTED_FIELD},
{RECIPIENT, MISSING_REQUIRED_FIELD},
},
problems_(),
expected_(),
called_(false),
validated_(BuildCallback(this, &ValidationTaskTest::Validated)) {
static const AddressField kFields[] = {
COUNTRY,
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
SORTING_CODE,
POSTAL_CODE,
STREET_ADDRESS,
ORGANIZATION,
RECIPIENT,
};
static const AddressProblem kProblems[] = {
UNKNOWN_VALUE,
INVALID_FORMAT,
MISMATCHING_VALUE,
USES_P_O_BOX,
};
for (AddressField field : kFields) {
for (AddressProblem problem : kProblems) {
filter_.emplace(field, problem);
}
}
}
void Validate() {
Rule rule[size(LookupKey::kHierarchy)];
auto* task = new ValidationTask(
address_,
allow_postal_,
require_name_,
&filter_,
&problems_,
*validated_);
Supplier::RuleHierarchy hierarchy;
for (size_t i = 0;
i < size(LookupKey::kHierarchy) && json_[i] != nullptr; ++i) {
ASSERT_TRUE(rule[i].ParseSerializedRule(json_[i]));
hierarchy.rule[i] = &rule[i];
}
(*task->supplied_)(success_, *task->lookup_key_, hierarchy);
}
const char* json_[size(LookupKey::kHierarchy)];
bool success_;
AddressData address_;
bool allow_postal_;
bool require_name_;
FieldProblemMap filter_;
FieldProblemMap problems_;
FieldProblemMap expected_;
bool called_;
private:
void Validated(bool success,
const AddressData& address,
const FieldProblemMap& problems) {
ASSERT_EQ(success_, success);
ASSERT_EQ(&address_, &address);
ASSERT_EQ(&problems_, &problems);
called_ = true;
}
const std::unique_ptr<const AddressValidator::Callback> validated_;
};
namespace {
TEST_F(ValidationTaskTest, FailureCountryRuleNull) {
success_ = false;
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, FailureCountryRuleEmpty) {
json_[0] = "{}";
success_ = false;
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, SuccessCountryRuleNullNameEmpty) {
expected_ = {{COUNTRY, MISSING_REQUIRED_FIELD}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, SuccessCountryRuleNullNameNotEmpty) {
address_ = {.region_code = "rrr"};
expected_ = {{COUNTRY, UNKNOWN_VALUE}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, SuccessCountryRuleEmptyNameEmpty) {
json_[0] = "{}";
expected_ = {{COUNTRY, MISSING_REQUIRED_FIELD}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, SuccessCountryRuleEmptyNameNotEmpty) {
json_[0] = "{}";
address_ = {.region_code = "rrr"};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, MissingRequiredFieldsUS) {
json_[0] = "{}";
address_ = {.region_code = "US"};
filter_ = {
{ADMIN_AREA, MISSING_REQUIRED_FIELD},
{LOCALITY, MISSING_REQUIRED_FIELD},
{POSTAL_CODE, MISSING_REQUIRED_FIELD},
{STREET_ADDRESS, MISSING_REQUIRED_FIELD},
};
expected_ = {
{ADMIN_AREA, MISSING_REQUIRED_FIELD},
{LOCALITY, MISSING_REQUIRED_FIELD},
{POSTAL_CODE, MISSING_REQUIRED_FIELD},
{STREET_ADDRESS, MISSING_REQUIRED_FIELD},
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, MissingNoRequiredFieldsUS) {
json_[0] = "{}";
address_ = {
.region_code = "US",
.address_line{"aaa"},
.administrative_area = "sss",
.locality = "ccc",
.postal_code = "zzz",
.organization = "ooo",
.recipient = "nnn",
};
filter_ = {
{ADMIN_AREA, MISSING_REQUIRED_FIELD},
{LOCALITY, MISSING_REQUIRED_FIELD},
{POSTAL_CODE, MISSING_REQUIRED_FIELD},
{STREET_ADDRESS, MISSING_REQUIRED_FIELD},
{ORGANIZATION, MISSING_REQUIRED_FIELD},
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, UnexpectedFieldUS) {
json_[0] = "{}";
address_ = {
.region_code = "US",
.dependent_locality = "ddd",
};
filter_ = {{DEPENDENT_LOCALITY, UNEXPECTED_FIELD}};
expected_ = {{DEPENDENT_LOCALITY, UNEXPECTED_FIELD}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, MissingRequiredFieldRequireName) {
json_[0] = "{}";
address_ = {.region_code = "rrr"};
require_name_ = true;
expected_ = {{RECIPIENT, MISSING_REQUIRED_FIELD}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, UnknownValueRuleNull) {
json_[0] = R"({"fmt":"%R%S","require":"RS","sub_keys":"aa~bb"})";
address_ = {
.region_code = "rrr",
.administrative_area = "sss",
};
expected_ = {{ADMIN_AREA, UNKNOWN_VALUE}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, NoUnknownValueRuleNotNull) {
json_[0] = R"({"fmt":"%R%S","require":"RS","sub_keys":"aa~bb"})";
json_[1] = "{}";
address_ = {
.region_code = "rrr",
.administrative_area = "sss",
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeUnrecognizedFormatTooShort) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{3}"})";
address_ = {
.region_code = "rrr",
.postal_code = "12",
};
expected_ = {{POSTAL_CODE, INVALID_FORMAT}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeUnrecognizedFormatTooLong) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{3}"})";
address_ = {
.region_code = "rrr",
.postal_code = "1234",
};
expected_ = {{POSTAL_CODE, INVALID_FORMAT}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeRecognizedFormat) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{3}"})";
address_ = {
.region_code = "rrr",
.postal_code = "123",
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeMismatchingValue1) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{3}"})";
json_[1] = R"({"zip":"1"})";
address_ = {
.region_code = "rrr",
.postal_code = "000",
};
expected_ = {{POSTAL_CODE, MISMATCHING_VALUE}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeMismatchingValue2) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{3}"})";
json_[1] = R"({"zip":"1"})";
json_[2] = R"({"zip":"12"})";
address_ = {
.region_code = "rrr",
.postal_code = "100",
};
expected_ = {{POSTAL_CODE, MISMATCHING_VALUE}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeMismatchingValue3) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{3}"})";
json_[1] = R"({"zip":"1"})";
json_[2] = R"({"zip":"12"})";
json_[3] = R"({"zip":"123"})";
address_ = {
.region_code = "rrr",
.postal_code = "120",
};
expected_ = {{POSTAL_CODE, MISMATCHING_VALUE}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeMatchingValue) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{3}"})";
json_[1] = R"({"zip":"1"})";
json_[2] = R"({"zip":"12"})";
json_[3] = R"({"zip":"123"})";
address_ = {
.region_code = "rrr",
.postal_code = "123",
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodePrefixMismatchingValue) {
json_[0] = R"({"fmt":"%Z","zip":"\\d{5}"})";
json_[1] = R"({"zip":"9[0-5]|96[01]"})";
address_ = {
.region_code = "rrr",
.postal_code = "10960",
};
expected_ = {{POSTAL_CODE, MISMATCHING_VALUE}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, PostalCodeFilterIgnoresMismatching) {
json_[0] = R"({"zip":"\\d{3}"})";
json_[1] = R"({"zip":"1"})";
address_ = {
.region_code = "rrr",
.postal_code = "000",
};
filter_ = {{POSTAL_CODE, INVALID_FORMAT}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, UsesPoBoxLanguageUnd) {
json_[0] = R"({"fmt":"%A"})";
address_ = {
.region_code = "rrr",
.address_line{
"aaa",
"P.O. Box",
"aaa",
},
};
expected_ = {{STREET_ADDRESS, USES_P_O_BOX}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, UsesPoBoxLanguageDa) {
json_[0] = R"({"fmt":"%A","languages":"da"})";
address_ = {
.region_code = "rrr",
.address_line{
"aaa",
"Postboks",
"aaa",
},
};
expected_ = {{STREET_ADDRESS, USES_P_O_BOX}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, UsesPoBoxLanguageDaNotMatchDe) {
json_[0] = R"({"fmt":"%A","languages":"da"})";
address_ = {
.region_code = "rrr",
.address_line{
"aaa",
"Postfach",
"aaa",
},
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_F(ValidationTaskTest, UsesPoBoxAllowPostal) {
json_[0] = R"({"fmt":"%A"})";
address_ = {
.region_code = "rrr",
.address_line{
"aaa",
"P.O. Box",
"aaa",
},
};
allow_postal_ = true;
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
}
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/validation_task.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/validation_task_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
af364550-5955-4c94-a193-5b2803f102e2 | cpp | google/cel-cpp | parsed_json_map_value | common/values/parsed_json_map_value.cc | common/values/parsed_json_map_value_test.cc | #include "common/values/parsed_json_map_value.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/parsed_json_value.h"
#include "internal/json.h"
#include "internal/status_macros.h"
#include "internal/strings.h"
#include "internal/well_known_types.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/map.h"
#include "google/protobuf/map_field.h"
#include "google/protobuf/message.h"
#include "google/protobuf/message_lite.h"
namespace cel {
namespace common_internal {
absl::Status CheckWellKnownStructMessage(const google::protobuf::Message& message) {
return internal::CheckJsonMap(message);
}
}
std::string ParsedJsonMapValue::DebugString() const {
if (value_ == nullptr) {
return "{}";
}
return internal::JsonMapDebugString(*value_);
}
absl::Status ParsedJsonMapValue::SerializeTo(AnyToJsonConverter& converter,
absl::Cord& value) const {
if (value_ == nullptr) {
value.Clear();
return absl::OkStatus();
}
if (!value_->SerializePartialToCord(&value)) {
return absl::UnknownError("failed to serialize protocol buffer message");
}
return absl::OkStatus();
}
absl::StatusOr<Json> ParsedJsonMapValue::ConvertToJson(
AnyToJsonConverter& converter) const {
if (value_ == nullptr) {
return JsonObject();
}
return internal::ProtoJsonMapToNativeJsonMap(*value_);
}
absl::Status ParsedJsonMapValue::Equal(ValueManager& value_manager,
const Value& other,
Value& result) const {
if (auto other_value = other.AsParsedJsonMap(); other_value) {
result = BoolValue(*this == *other_value);
return absl::OkStatus();
}
if (auto other_value = other.AsMap(); other_value) {
return common_internal::MapValueEqual(value_manager, MapValue(*this),
*other_value, result);
}
result = BoolValue(false);
return absl::OkStatus();
}
absl::StatusOr<Value> ParsedJsonMapValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
size_t ParsedJsonMapValue::Size() const {
if (value_ == nullptr) {
return 0;
}
return static_cast<size_t>(
well_known_types::GetStructReflectionOrDie(value_->GetDescriptor())
.FieldsSize(*value_));
}
absl::Status ParsedJsonMapValue::Get(ValueManager& value_manager,
const Value& key, Value& result) const {
CEL_ASSIGN_OR_RETURN(bool ok, Find(value_manager, key, result));
if (ABSL_PREDICT_FALSE(!ok) && !(result.IsError() || result.IsUnknown())) {
return absl::NotFoundError(
absl::StrCat("Key not found in map : ", key.DebugString()));
}
return absl::OkStatus();
}
absl::StatusOr<Value> ParsedJsonMapValue::Get(ValueManager& value_manager,
const Value& key) const {
Value result;
CEL_RETURN_IF_ERROR(Get(value_manager, key, result));
return result;
}
absl::StatusOr<bool> ParsedJsonMapValue::Find(ValueManager& value_manager,
const Value& key,
Value& result) const {
if (key.IsError() || key.IsUnknown()) {
result = key;
return false;
}
if (value_ != nullptr) {
if (auto string_key = key.AsString(); string_key) {
if (ABSL_PREDICT_FALSE(value_ == nullptr)) {
result = NullValue();
return false;
}
std::string key_scratch;
if (const auto* value =
well_known_types::GetStructReflectionOrDie(
value_->GetDescriptor())
.FindField(*value_, string_key->NativeString(key_scratch));
value != nullptr) {
result = common_internal::ParsedJsonValue(
value_manager.GetMemoryManager().arena(), Borrowed(value_, value));
return true;
}
result = NullValue();
return false;
}
}
result = NullValue();
return false;
}
absl::StatusOr<std::pair<Value, bool>> ParsedJsonMapValue::Find(
ValueManager& value_manager, const Value& key) const {
Value result;
CEL_ASSIGN_OR_RETURN(auto found, Find(value_manager, key, result));
if (found) {
return std::pair{std::move(result), found};
}
return std::pair{NullValue(), found};
}
absl::Status ParsedJsonMapValue::Has(ValueManager& value_manager,
const Value& key, Value& result) const {
if (key.IsError() || key.IsUnknown()) {
result = key;
return absl::OkStatus();
}
if (value_ != nullptr) {
if (auto string_key = key.AsString(); string_key) {
if (ABSL_PREDICT_FALSE(value_ == nullptr)) {
result = BoolValue(false);
return absl::OkStatus();
}
std::string key_scratch;
if (const auto* value =
well_known_types::GetStructReflectionOrDie(
value_->GetDescriptor())
.FindField(*value_, string_key->NativeString(key_scratch));
value != nullptr) {
result = BoolValue(true);
} else {
result = BoolValue(false);
}
return absl::OkStatus();
}
}
result = BoolValue(false);
return absl::OkStatus();
}
absl::StatusOr<Value> ParsedJsonMapValue::Has(ValueManager& value_manager,
const Value& key) const {
Value result;
CEL_RETURN_IF_ERROR(Has(value_manager, key, result));
return result;
}
namespace {
class ParsedJsonMapValueKeysList final
: public ParsedListValueInterface,
public EnableSharedFromThis<ParsedJsonMapValueKeysList> {
public:
ParsedJsonMapValueKeysList(Owned<const google::protobuf::MessageLite> message,
absl::Nullable<google::protobuf::Arena*> keys_arena,
std::string* keys, size_t keys_size)
: message_(std::move(message)),
keys_arena_(keys_arena),
keys_(keys),
keys_size_(keys_size) {}
~ParsedJsonMapValueKeysList() override {
if (keys_arena_ == nullptr) {
delete[] keys_;
}
}
std::string DebugString() const override {
std::string result;
result.push_back('[');
for (size_t i = 0; i < keys_size_; ++i) {
if (i > 0) {
result.append(", ");
}
result.append(internal::FormatStringLiteral(keys_[i]));
}
result.push_back(']');
return result;
}
size_t Size() const override { return keys_size_; }
absl::Status Contains(ValueManager& value_manager, const Value& other,
Value& result) const override {
if (ABSL_PREDICT_FALSE(other.IsError() || other.IsUnknown())) {
result = other;
return absl::OkStatus();
}
if (const auto other_string = other.AsString(); other_string) {
for (size_t i = 0; i < keys_size_; ++i) {
if (keys_[i] == *other_string) {
result = BoolValue(true);
return absl::OkStatus();
}
}
}
result = BoolValue(false);
return absl::OkStatus();
}
absl::StatusOr<JsonArray> ConvertToJsonArray(
AnyToJsonConverter&) const override {
JsonArrayBuilder builder;
builder.reserve(keys_size_);
for (size_t i = 0; i < keys_size_; ++i) {
builder.push_back(JsonString(keys_[i]));
}
return std::move(builder).Build();
}
protected:
absl::Status GetImpl(ValueManager& value_manager, size_t index,
Value& result) const override {
result =
StringValue(value_manager.GetMemoryManager().arena(), keys_[index]);
return absl::OkStatus();
}
private:
friend struct cel::NativeTypeTraits<ParsedJsonMapValueKeysList>;
NativeTypeId GetNativeTypeId() const noexcept override {
return NativeTypeId::For<ParsedJsonMapValueKeysList>();
}
const Owned<const google::protobuf::MessageLite> message_;
const absl::Nullable<google::protobuf::Arena*> keys_arena_;
std::string* const keys_;
const size_t keys_size_;
};
struct ArenaStringArray {
absl::Nullable<std::string*> data;
size_t size;
};
void DestroyArenaStringArray(void* strings) {
std::destroy_n(reinterpret_cast<ArenaStringArray*>(strings)->data,
reinterpret_cast<ArenaStringArray*>(strings)->size);
}
}
template <>
struct NativeTypeTraits<ParsedJsonMapValueKeysList> final {
static NativeTypeId Id(const ParsedJsonMapValueKeysList& type) {
return type.GetNativeTypeId();
}
static bool SkipDestructor(const ParsedJsonMapValueKeysList& type) {
return NativeType::SkipDestructor(type.message_) &&
type.keys_arena_ != nullptr;
}
};
absl::Status ParsedJsonMapValue::ListKeys(ValueManager& value_manager,
ListValue& result) const {
if (value_ == nullptr) {
result = ListValue();
return absl::OkStatus();
}
google::protobuf::Arena* arena = value_manager.GetMemoryManager().arena();
size_t keys_size;
std::string* keys;
const auto reflection =
well_known_types::GetStructReflectionOrDie(value_->GetDescriptor());
keys_size = static_cast<size_t>(reflection.FieldsSize(*value_));
auto keys_it = reflection.BeginFields(*value_);
if (arena != nullptr) {
keys = reinterpret_cast<std::string*>(arena->AllocateAligned(
keys_size * sizeof(std::string), alignof(std::string)));
for (size_t i = 0; i < keys_size; ++i, ++keys_it) {
::new (static_cast<void*>(keys + i))
std::string(keys_it.GetKey().GetStringValue());
}
} else {
keys = new std::string[keys_size];
for (size_t i = 0; i < keys_size; ++i, ++keys_it) {
const auto& key = keys_it.GetKey().GetStringValue();
(keys + i)->assign(key.data(), key.size());
}
}
if (arena != nullptr) {
ArenaStringArray* array = google::protobuf::Arena::Create<ArenaStringArray>(arena);
array->data = keys;
array->size = keys_size;
arena->OwnCustomDestructor(array, &DestroyArenaStringArray);
}
result = ParsedListValue(
value_manager.GetMemoryManager().MakeShared<ParsedJsonMapValueKeysList>(
value_, arena, keys, keys_size));
return absl::OkStatus();
}
absl::StatusOr<ListValue> ParsedJsonMapValue::ListKeys(
ValueManager& value_manager) const {
ListValue result;
CEL_RETURN_IF_ERROR(ListKeys(value_manager, result));
return result;
}
absl::Status ParsedJsonMapValue::ForEach(ValueManager& value_manager,
ForEachCallback callback) const {
if (value_ == nullptr) {
return absl::OkStatus();
}
const auto reflection =
well_known_types::GetStructReflectionOrDie(value_->GetDescriptor());
Value key_scratch;
Value value_scratch;
auto map_begin = reflection.BeginFields(*value_);
const auto map_end = reflection.EndFields(*value_);
for (; map_begin != map_end; ++map_begin) {
key_scratch = StringValue(value_manager.GetMemoryManager().arena(),
map_begin.GetKey().GetStringValue());
value_scratch = common_internal::ParsedJsonValue(
value_manager.GetMemoryManager().arena(),
Borrowed(value_, &map_begin.GetValueRef().GetMessageValue()));
CEL_ASSIGN_OR_RETURN(auto ok, callback(key_scratch, value_scratch));
if (!ok) {
break;
}
}
return absl::OkStatus();
}
namespace {
class ParsedJsonMapValueIterator final : public ValueIterator {
public:
explicit ParsedJsonMapValueIterator(Owned<const google::protobuf::Message> message)
: message_(std::move(message)),
reflection_(well_known_types::GetStructReflectionOrDie(
message_->GetDescriptor())),
begin_(reflection_.BeginFields(*message_)),
end_(reflection_.EndFields(*message_)) {}
bool HasNext() override { return begin_ != end_; }
absl::Status Next(ValueManager& value_manager, Value& result) override {
if (ABSL_PREDICT_FALSE(begin_ == end_)) {
return absl::FailedPreconditionError(
"`ValueIterator::Next` called after `ValueIterator::HasNext` "
"returned false");
}
std::string scratch =
static_cast<std::string>(begin_.GetKey().GetStringValue());
result = StringValue(value_manager.GetMemoryManager().arena(),
std::move(scratch));
++begin_;
return absl::OkStatus();
}
private:
const Owned<const google::protobuf::Message> message_;
const well_known_types::StructReflection reflection_;
google::protobuf::MapIterator begin_;
const google::protobuf::MapIterator end_;
std::string scratch_;
};
}
absl::StatusOr<absl::Nonnull<std::unique_ptr<ValueIterator>>>
ParsedJsonMapValue::NewIterator(ValueManager& value_manager) const {
if (value_ == nullptr) {
return NewEmptyValueIterator();
}
return std::make_unique<ParsedJsonMapValueIterator>(value_);
}
bool operator==(const ParsedJsonMapValue& lhs, const ParsedJsonMapValue& rhs) {
if (cel::to_address(lhs.value_) == cel::to_address(rhs.value_)) {
return true;
}
if (cel::to_address(lhs.value_) == nullptr) {
return rhs.IsEmpty();
}
if (cel::to_address(rhs.value_) == nullptr) {
return lhs.IsEmpty();
}
return internal::JsonMapEquals(*lhs.value_, *rhs.value_);
}
} | #include <utility>
#include <vector>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "common/value_manager.h"
#include "common/value_testing.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::cel::test::BoolValueIs;
using ::cel::test::IsNullValue;
using ::cel::test::StringValueIs;
using ::testing::AnyOf;
using ::testing::IsEmpty;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::Pair;
using ::testing::PrintToStringParamName;
using ::testing::TestWithParam;
using ::testing::UnorderedElementsAre;
using ::testing::VariantWith;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
class ParsedJsonMapValueTest : public TestWithParam<AllocatorKind> {
public:
void SetUp() override {
switch (GetParam()) {
case AllocatorKind::kArena:
arena_.emplace();
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::Pooling(arena()),
NewThreadCompatibleTypeReflector(MemoryManager::Pooling(arena())));
break;
case AllocatorKind::kNewDelete:
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::ReferenceCounting(),
NewThreadCompatibleTypeReflector(
MemoryManager::ReferenceCounting()));
break;
}
}
void TearDown() override {
value_manager_.reset();
arena_.reset();
}
Allocator<> allocator() {
return arena_ ? ArenaAllocator(&*arena_) : NewDeleteAllocator();
}
absl::Nullable<google::protobuf::Arena*> arena() { return allocator().arena(); }
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() {
return GetTestingDescriptorPool();
}
absl::Nonnull<google::protobuf::MessageFactory*> message_factory() {
return GetTestingMessageFactory();
}
ValueManager& value_manager() { return **value_manager_; }
template <typename T>
auto GeneratedParseTextProto(absl::string_view text) {
return ::cel::internal::GeneratedParseTextProto<T>(
allocator(), text, descriptor_pool(), message_factory());
}
template <typename T>
auto DynamicParseTextProto(absl::string_view text) {
return ::cel::internal::DynamicParseTextProto<T>(
allocator(), text, descriptor_pool(), message_factory());
}
private:
absl::optional<google::protobuf::Arena> arena_;
absl::optional<Shared<ValueManager>> value_manager_;
};
TEST_P(ParsedJsonMapValueTest, Kind) {
EXPECT_EQ(ParsedJsonMapValue::kind(), ParsedJsonMapValue::kKind);
EXPECT_EQ(ParsedJsonMapValue::kind(), ValueKind::kMap);
}
TEST_P(ParsedJsonMapValueTest, GetTypeName) {
EXPECT_EQ(ParsedJsonMapValue::GetTypeName(), ParsedJsonMapValue::kName);
EXPECT_EQ(ParsedJsonMapValue::GetTypeName(), "google.protobuf.Struct");
}
TEST_P(ParsedJsonMapValueTest, GetRuntimeType) {
ParsedJsonMapValue value;
EXPECT_EQ(ParsedJsonMapValue::GetRuntimeType(), JsonMapType());
}
TEST_P(ParsedJsonMapValueTest, DebugString_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"));
EXPECT_EQ(valid_value.DebugString(), "{}");
}
TEST_P(ParsedJsonMapValueTest, IsZeroValue_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"));
EXPECT_TRUE(valid_value.IsZeroValue());
}
TEST_P(ParsedJsonMapValueTest, SerializeTo_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"));
absl::Cord serialized;
EXPECT_THAT(valid_value.SerializeTo(value_manager(), serialized), IsOk());
EXPECT_THAT(serialized, IsEmpty());
}
TEST_P(ParsedJsonMapValueTest, ConvertToJson_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"));
EXPECT_THAT(valid_value.ConvertToJson(value_manager()),
IsOkAndHolds(VariantWith<JsonObject>(JsonObject())));
}
TEST_P(ParsedJsonMapValueTest, Equal_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"));
EXPECT_THAT(valid_value.Equal(value_manager(), BoolValue()),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(
valid_value.Equal(
value_manager(),
ParsedJsonMapValue(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"))),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Equal(value_manager(), MapValue()),
IsOkAndHolds(BoolValueIs(true)));
}
TEST_P(ParsedJsonMapValueTest, Empty_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"));
EXPECT_TRUE(valid_value.IsEmpty());
}
TEST_P(ParsedJsonMapValueTest, Size_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(R"pb()pb"));
EXPECT_EQ(valid_value.Size(), 0);
}
TEST_P(ParsedJsonMapValueTest, Get_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: {}
}
fields {
key: "bar"
value: { bool_value: true }
})pb"));
EXPECT_THAT(valid_value.Get(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(valid_value.Get(value_manager(), StringValue("foo")),
IsOkAndHolds(IsNullValue()));
EXPECT_THAT(valid_value.Get(value_manager(), StringValue("bar")),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Get(value_manager(), StringValue("baz")),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_P(ParsedJsonMapValueTest, Find_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: {}
}
fields {
key: "bar"
value: { bool_value: true }
})pb"));
EXPECT_THAT(valid_value.Find(value_manager(), BoolValue()),
IsOkAndHolds(Pair(IsNullValue(), IsFalse())));
EXPECT_THAT(valid_value.Find(value_manager(), StringValue("foo")),
IsOkAndHolds(Pair(IsNullValue(), IsTrue())));
EXPECT_THAT(valid_value.Find(value_manager(), StringValue("bar")),
IsOkAndHolds(Pair(BoolValueIs(true), IsTrue())));
EXPECT_THAT(valid_value.Find(value_manager(), StringValue("baz")),
IsOkAndHolds(Pair(IsNullValue(), IsFalse())));
}
TEST_P(ParsedJsonMapValueTest, Has_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: {}
}
fields {
key: "bar"
value: { bool_value: true }
})pb"));
EXPECT_THAT(valid_value.Has(value_manager(), BoolValue()),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(valid_value.Has(value_manager(), StringValue("foo")),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Has(value_manager(), StringValue("bar")),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Has(value_manager(), StringValue("baz")),
IsOkAndHolds(BoolValueIs(false)));
}
TEST_P(ParsedJsonMapValueTest, ListKeys_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: {}
}
fields {
key: "bar"
value: { bool_value: true }
})pb"));
ASSERT_OK_AND_ASSIGN(auto keys, valid_value.ListKeys(value_manager()));
EXPECT_THAT(keys.Size(), IsOkAndHolds(2));
EXPECT_THAT(keys.DebugString(),
AnyOf("[\"foo\", \"bar\"]", "[\"bar\", \"foo\"]"));
EXPECT_THAT(keys.Contains(value_manager(), BoolValue()),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(keys.Contains(value_manager(), StringValue("bar")),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(keys.Get(value_manager(), 0),
IsOkAndHolds(AnyOf(StringValueIs("foo"), StringValueIs("bar"))));
EXPECT_THAT(keys.Get(value_manager(), 1),
IsOkAndHolds(AnyOf(StringValueIs("foo"), StringValueIs("bar"))));
EXPECT_THAT(
keys.ConvertToJson(value_manager()),
IsOkAndHolds(AnyOf(VariantWith<JsonArray>(MakeJsonArray(
{JsonString("foo"), JsonString("bar")})),
VariantWith<JsonArray>(MakeJsonArray(
{JsonString("bar"), JsonString("foo")})))));
}
TEST_P(ParsedJsonMapValueTest, ForEach_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: {}
}
fields {
key: "bar"
value: { bool_value: true }
})pb"));
std::vector<std::pair<Value, Value>> entries;
EXPECT_THAT(
valid_value.ForEach(
value_manager(),
[&](const Value& key, const Value& value) -> absl::StatusOr<bool> {
entries.push_back(std::pair{std::move(key), std::move(value)});
return true;
}),
IsOk());
EXPECT_THAT(entries, UnorderedElementsAre(
Pair(StringValueIs("foo"), IsNullValue()),
Pair(StringValueIs("bar"), BoolValueIs(true))));
}
TEST_P(ParsedJsonMapValueTest, NewIterator_Dynamic) {
ParsedJsonMapValue valid_value(
DynamicParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: {}
}
fields {
key: "bar"
value: { bool_value: true }
})pb"));
ASSERT_OK_AND_ASSIGN(auto iterator, valid_value.NewIterator(value_manager()));
ASSERT_TRUE(iterator->HasNext());
EXPECT_THAT(iterator->Next(value_manager()),
IsOkAndHolds(AnyOf(StringValueIs("foo"), StringValueIs("bar"))));
ASSERT_TRUE(iterator->HasNext());
EXPECT_THAT(iterator->Next(value_manager()),
IsOkAndHolds(AnyOf(StringValueIs("foo"), StringValueIs("bar"))));
ASSERT_FALSE(iterator->HasNext());
EXPECT_THAT(iterator->Next(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
INSTANTIATE_TEST_SUITE_P(ParsedJsonMapValueTest, ParsedJsonMapValueTest,
::testing::Values(AllocatorKind::kArena,
AllocatorKind::kNewDelete),
PrintToStringParamName());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_json_map_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_json_map_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
8dd52495-17d6-4d03-9b40-62c146124490 | cpp | tensorflow/tensorflow | binary_elementwise | tensorflow/lite/experimental/shlo/ops/binary_elementwise.h | tensorflow/lite/experimental/shlo/ops/binary_elementwise_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_BINARY_ELEMENTWISE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_BINARY_ELEMENTWISE_H_
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace detail {
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeOpQuantizePerTensor(F&& func, const Tensor& lhs,
const Tensor& rhs, Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const DimensionSize num_elements = lhs.NumElements();
const StorageT lhs_zero_point =
lhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT lhs_scale =
lhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT rhs_zero_point =
rhs.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT rhs_scale =
rhs.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT output_zero_point =
output.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT output_scale =
output.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT* lhs_data = lhs.GetDataAs<storage_type>();
const StorageT* rhs_data = rhs.GetDataAs<storage_type>();
StorageT* output_data = output.GetDataAs<storage_type>();
const ExpressedT inv_scale = static_cast<ExpressedT>(1) / output_scale;
for (DimensionSize i = 0; i < num_elements;
++i, ++lhs_data, ++rhs_data, ++output_data) {
const ExpressedT dequantized_lhs =
Dequantize(*lhs_data, lhs_zero_point, lhs_scale);
const ExpressedT dequantized_rhs =
Dequantize(*rhs_data, rhs_zero_point, rhs_scale);
const ExpressedT dequantized_res = func(dequantized_lhs, dequantized_rhs);
*output_data = Quantize<storage_type, expressed_type>(
dequantized_res, output_zero_point, inv_scale);
}
}
template <DataType data_type, class F>
void EvaluateNoQuantization(F&& func, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
using T = StorageType<data_type>;
const T* lhs_data = lhs.GetDataAs<data_type>();
const T* rhs_data = rhs.GetDataAs<data_type>();
T* output_data = output.GetDataAs<data_type>();
const DimensionSize num_elements = lhs.NumElements();
for (DimensionSize i = 0; i < num_elements;
++i, ++output_data, ++lhs_data, ++rhs_data) {
*output_data = static_cast<T>(func(*lhs_data, *rhs_data));
}
}
}
}
#endif | #include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
namespace shlo_ref {
namespace {
struct TestOp {
template <typename T>
T operator()(const T& lhs, const T& rhs) {
return lhs + rhs;
}
};
template <class T>
struct EvaluateNoQuantizationTest : ::testing::Test {};
TYPED_TEST_SUITE(EvaluateNoQuantizationTest, ArithmeticTestTypes,
TestParamNames);
TYPED_TEST(EvaluateNoQuantizationTest, ArithmeticTensorsWithTestOp) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(), TestOp());
detail::EvaluateNoQuantization<TypeParam::kStorage>(
TestOp(), lhs_tensor, rhs_tensor, output_tensor);
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
template <class T>
struct DequantizeOpQuantizePerTensor : ::testing::Test {};
TYPED_TEST_SUITE(DequantizeOpQuantizePerTensor, QuantizedTestTypes,
TestParamNames);
TYPED_TEST(DequantizeOpQuantizePerTensor, QuantizedPerTensorWithTestOp) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -5, 5);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT lhs_scale = static_cast<ExpressedT>(1.3);
const StorageT lhs_zero_point = static_cast<StorageT>(4);
const ExpressedT rhs_scale = static_cast<ExpressedT>(1.2);
const StorageT rhs_zero_point = static_cast<StorageT>(5);
const ExpressedT output_scale = static_cast<ExpressedT>(1.5);
const StorageT output_zero_point = static_cast<StorageT>(3);
Tensor lhs_tensor{.type =
QuantizedPerTensorTensorType{
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, lhs_zero_point,
TypeParam::kExpressed, lhs_scale)},
.data = lhs_data.data()};
Tensor rhs_tensor{.type =
QuantizedPerTensorTensorType{
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, rhs_zero_point,
TypeParam::kExpressed, rhs_scale)},
.data = rhs_data.data()};
Tensor output_tensor{.type =
QuantizedPerTensorTensorType{
.shape = shape,
.element_type = QuantizedElementTypePerTensor(
TypeParam::kStorage, output_zero_point,
TypeParam::kExpressed, output_scale)},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
lhs_data, rhs_data, expected_data.begin(),
[lhs_zero_point, lhs_scale, rhs_zero_point, rhs_scale, output_zero_point,
output_scale](auto lhs, auto rhs) {
const ExpressedT dequantized_lhs =
Dequantize(lhs, lhs_zero_point, lhs_scale);
const ExpressedT dequantized_rhs =
Dequantize(rhs, rhs_zero_point, rhs_scale);
const ExpressedT dequantized_res =
TestOp()(dequantized_lhs, dequantized_rhs);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, output_zero_point,
static_cast<ExpressedT>(1.) / output_scale);
});
detail::DequantizeOpQuantizePerTensor<TypeParam::kStorage,
TypeParam::kExpressed>(
TestOp(), lhs_tensor, rhs_tensor, output_tensor);
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/binary_elementwise.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/binary_elementwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
76550dac-69ef-42d5-8854-311eb683d6cd | cpp | google/cel-cpp | null_type | common/types/null_type.h | common/types/null_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_NULL_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_NULL_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class NullType final {
public:
static constexpr TypeKind kKind = TypeKind::kNull;
static constexpr absl::string_view kName = "null_type";
NullType() = default;
NullType(const NullType&) = default;
NullType(NullType&&) = default;
NullType& operator=(const NullType&) = default;
NullType& operator=(NullType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(NullType&) noexcept {}
};
inline constexpr void swap(NullType& lhs, NullType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(NullType, NullType) { return true; }
inline constexpr bool operator!=(NullType lhs, NullType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, NullType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const NullType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(NullType, Kind) {
EXPECT_EQ(NullType().kind(), NullType::kKind);
EXPECT_EQ(Type(NullType()).kind(), NullType::kKind);
}
TEST(NullType, Name) {
EXPECT_EQ(NullType().name(), NullType::kName);
EXPECT_EQ(Type(NullType()).name(), NullType::kName);
}
TEST(NullType, DebugString) {
{
std::ostringstream out;
out << NullType();
EXPECT_EQ(out.str(), NullType::kName);
}
{
std::ostringstream out;
out << Type(NullType());
EXPECT_EQ(out.str(), NullType::kName);
}
}
TEST(NullType, Hash) {
EXPECT_EQ(absl::HashOf(NullType()), absl::HashOf(NullType()));
}
TEST(NullType, Equal) {
EXPECT_EQ(NullType(), NullType());
EXPECT_EQ(Type(NullType()), NullType());
EXPECT_EQ(NullType(), Type(NullType()));
EXPECT_EQ(Type(NullType()), Type(NullType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/null_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/null_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
d2d45273-35c5-4952-a6d7-b819d01e7583 | cpp | google/tensorstore | data_type_endian_conversion | tensorstore/internal/data_type_endian_conversion.cc | tensorstore/internal/data_type_endian_conversion_test.cc | #include "tensorstore/internal/data_type_endian_conversion.h"
#include <cassert>
#include <complex>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/data_type.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal {
void EncodeArray(ArrayView<const void> source, ArrayView<void> target,
endian target_endian) {
const DataType dtype = source.dtype();
assert(absl::c_equal(source.shape(), target.shape()));
assert(dtype == target.dtype());
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
internal::IterateOverStridedLayouts<2>(
{(target_endian == endian::native) ? functions.copy
: functions.swap_endian,
nullptr},
nullptr, source.shape(),
{{const_cast<void*>(source.data()), target.data()}},
{{source.byte_strides().data(), target.byte_strides().data()}},
skip_repeated_elements, {{dtype.size(), dtype.size()}});
}
namespace {
static_assert(sizeof(bool) == 1);
struct DecodeBoolArray {
void operator()(unsigned char* source, bool* output, void*) const {
*output = static_cast<bool>(*source);
}
};
struct DecodeBoolArrayInplace {
void operator()(unsigned char* source, void*) const {
*source = static_cast<bool>(*source);
}
};
}
void DecodeArray(ArrayView<const void> source, endian source_endian,
ArrayView<void> target) {
const DataType dtype = source.dtype();
assert(absl::c_equal(source.shape(), target.shape()));
assert(dtype == target.dtype());
if (dtype.id() != DataTypeId::bool_t) {
EncodeArray(source, target, source_endian);
return;
}
internal::IterateOverStridedLayouts<2>(
{SimpleElementwiseFunction<
DecodeBoolArray(unsigned char, bool), void*>(),
nullptr},
nullptr, source.shape(),
{{const_cast<void*>(source.data()), target.data()}},
{{source.byte_strides().data(), target.byte_strides().data()}},
skip_repeated_elements, {{1, 1}});
}
void DecodeArray(SharedArrayView<void>* source, endian source_endian,
StridedLayoutView<> decoded_layout) {
assert(source != nullptr);
assert(absl::c_equal(source->shape(), decoded_layout.shape()));
const DataType dtype = source->dtype();
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
if ((reinterpret_cast<std::uintptr_t>(source->data()) % dtype->alignment) ==
0 &&
std::all_of(source->byte_strides().begin(), source->byte_strides().end(),
[&](Index byte_stride) {
return (byte_stride % dtype->alignment) == 0;
})) {
const ElementwiseFunction<1, void*>* convert_func = nullptr;
if (dtype.id() == DataTypeId::bool_t) {
convert_func =
SimpleElementwiseFunction<DecodeBoolArrayInplace(unsigned char),
void*>();
} else if (source_endian != endian::native &&
functions.swap_endian_inplace) {
convert_func = functions.swap_endian_inplace;
}
if (convert_func) {
internal::IterateOverStridedLayouts<1>(
{convert_func,
nullptr},
nullptr, source->shape(), {{source->data()}},
{{source->byte_strides().data()}},
skip_repeated_elements, {{dtype.size()}});
}
} else {
*source = CopyAndDecodeArray(*source, source_endian, decoded_layout);
}
}
SharedArrayView<void> CopyAndDecodeArray(ArrayView<const void> source,
endian source_endian,
StridedLayoutView<> decoded_layout) {
SharedArrayView<void> target(
internal::AllocateAndConstructSharedElements(
decoded_layout.num_elements(), default_init, source.dtype()),
decoded_layout);
DecodeArray(source, source_endian, target);
return target;
}
SharedArrayView<const void> TryViewCordAsArray(const absl::Cord& source,
Index offset, DataType dtype,
endian source_endian,
StridedLayoutView<> layout) {
const auto& functions =
kUnalignedDataTypeFunctions[static_cast<size_t>(dtype.id())];
assert(functions.copy != nullptr);
if (source_endian != endian::native && functions.swap_endian_inplace) {
return {};
}
auto maybe_flat = source.TryFlat();
if (!maybe_flat) {
return {};
}
ByteStridedPointer<const void> ptr = maybe_flat->data();
ptr += offset;
if ((reinterpret_cast<std::uintptr_t>(ptr.get()) % dtype->alignment) != 0 ||
!std::all_of(layout.byte_strides().begin(), layout.byte_strides().end(),
[&](Index byte_stride) {
return (byte_stride % dtype->alignment) == 0;
})) {
return {};
}
auto shared_cord = std::make_shared<absl::Cord>(source);
if (auto shared_flat = shared_cord->TryFlat();
!shared_flat || shared_flat->data() != maybe_flat->data()) {
return {};
}
return SharedArrayView<const void>(
SharedElementPointer<const void>(
std::shared_ptr<const void>(std::move(shared_cord), ptr.get()),
dtype),
layout);
}
}
} | #include "tensorstore/internal/data_type_endian_conversion.h"
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Array;
using ::tensorstore::c_order;
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::endian;
using ::tensorstore::fortran_order;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::SharedArrayView;
using ::tensorstore::StridedLayout;
using ::tensorstore::internal::DecodeArray;
using ::tensorstore::internal::EncodeArray;
using ::tensorstore::internal::TryViewCordAsArray;
TEST(EncodeDecodeArrayTest, Uint8) {
uint8_t source[6] = {1, 2, 3, 4, 5, 6};
uint8_t dest1[6];
uint8_t dest2[6];
uint8_t dest3[6];
uint8_t dest4[6];
EncodeArray(Array(source, {2, 3}, c_order),
Array(dest1, {2, 3}, fortran_order), endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
EncodeArray(Array(source, {2, 3}, c_order),
Array(dest2, {2, 3}, fortran_order), endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
DecodeArray(Array(source, {2, 3}, c_order), endian::little,
Array(dest3, {2, 3}, fortran_order));
EXPECT_THAT(dest3, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
DecodeArray(Array(source, {2, 3}, c_order), endian::big,
Array(dest4, {2, 3}, fortran_order));
EXPECT_THAT(dest4, ::testing::ElementsAre(1, 4, 2, 5, 3, 6));
}
TEST(EncodeDecodeArrayTest, Uint16) {
uint16_t source[6] = {0x1234, 0x5678, 0x9012, 0x3456, 0x7890, 0x3344};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<uint16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({0x0,
0x34, 0x12, 0x56, 0x34,
0x78, 0x56, 0x90, 0x78,
0x12, 0x90, 0x44, 0x33}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<uint16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({0x0,
0x12, 0x34, 0x34, 0x56,
0x56, 0x78, 0x78, 0x90,
0x90, 0x12, 0x33, 0x44}));
}
TEST(EncodeDecodeArrayTest, Float16) {
using ::tensorstore::dtypes::float16_t;
float16_t source[6] = {float16_t(1.0), float16_t(2.0), float16_t(3.0),
float16_t(4.0), float16_t(5.0), float16_t(6.0)};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<float16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({0x0,
0x00, 0x3c,
0x00, 0x44,
0x00, 0x40,
0x00, 0x45,
0x00, 0x42,
0x00, 0x46}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<float16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({
0x0,
0x3c, 0x00,
0x44, 0x00,
0x40, 0x00,
0x45, 0x00,
0x42, 0x00,
0x46, 0x00,
}));
}
TEST(EncodeDecodeArrayTest, Bfloat16) {
using ::tensorstore::dtypes::bfloat16_t;
bfloat16_t source[6] = {bfloat16_t(1.0), bfloat16_t(2.0), bfloat16_t(3.0),
bfloat16_t(4.0), bfloat16_t(5.0), bfloat16_t(6.0)};
alignas(2) unsigned char dest1[13] = {};
alignas(2) unsigned char dest2[13] = {};
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<bfloat16_t*>(dest1 + 1), {2, 3}, fortran_order),
endian::little);
EXPECT_THAT(dest1, ::testing::ElementsAreArray({
0x0,
0x80, 0x3f,
0x80, 0x40,
0x00, 0x40,
0xa0, 0x40,
0x40, 0x40,
0xc0, 0x40,
}));
EncodeArray(
Array(source, {2, 3}, c_order),
Array(reinterpret_cast<bfloat16_t*>(dest2 + 1), {2, 3}, fortran_order),
endian::big);
EXPECT_THAT(dest2, ::testing::ElementsAreArray({
0x0,
0x3f, 0x80,
0x40, 0x80,
0x40, 0x00,
0x40, 0xa0,
0x40, 0x40,
0x40, 0xc0,
}));
}
TEST(DecodeArrayTest, Bool) {
unsigned char source[6] = {0x12, 0x00, 0x34, 0x1, 0x78, 0x00};
unsigned char dest[6];
DecodeArray(Array(reinterpret_cast<bool*>(source), {2, 3}, c_order),
endian::little,
Array(reinterpret_cast<bool*>(dest), {2, 3}, fortran_order));
EXPECT_THAT(dest, ::testing::ElementsAre(1, 1, 0, 1, 1, 0));
}
TEST(DecodeArrayTest, Uint16InPlaceLittleEndian) {
alignas(2) unsigned char source[12] = {0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::little, alt_layout);
EXPECT_EQ(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), source_array.layout());
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x3412, 0x7856, 0x1290},
{0x5634, 0x9078, 0x4433}}));
}
TEST(DecodeArrayTest, Uint16InPlaceBigEndian) {
alignas(2) unsigned char source[12] = {0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::big, alt_layout);
EXPECT_EQ(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), source_array.layout());
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x1234, 0x5678, 0x9012},
{0x3456, 0x7890, 0x3344}}));
}
TEST(DecodeArrayTest, Uint16InPlaceLittleEndianUnaligned) {
alignas(2) unsigned char source[13] = {0x00,
0x12, 0x34, 0x56, 0x78, 0x90, 0x12,
0x34, 0x56, 0x78, 0x90, 0x33, 0x44};
auto source_array = UnownedToShared(
Array(reinterpret_cast<uint16_t*>(source + 1), {2, 3}, c_order));
SharedArrayView<void> source_array_view = source_array;
auto alt_layout = StridedLayout(fortran_order, 2, {2, 3});
DecodeArray(&source_array_view, endian::little, alt_layout);
EXPECT_NE(source_array_view.data(), source);
EXPECT_EQ(source_array_view.layout(), alt_layout);
EXPECT_EQ(source_array_view, MakeArray<uint16_t>({{0x3412, 0x7856, 0x1290},
{0x5634, 0x9078, 0x4433}}));
}
void TestConvertCordInplace(DataType dtype, endian endian_value,
ContiguousLayoutOrder order,
bool expected_inplace) {
SCOPED_TRACE(tensorstore::StrCat("dtype=", dtype, ", order=", order,
", endian=", endian_value));
auto orig_array = tensorstore::AllocateArray(
{4, 5, 6}, order, tensorstore::default_init, dtype);
EXPECT_EQ(1, orig_array.pointer().use_count());
auto cord = absl::MakeCordFromExternal(
std::string_view(reinterpret_cast<const char*>(orig_array.data()),
dtype.size() * orig_array.num_elements()),
[owner = orig_array.pointer()](std::string_view s) {});
auto cord_array = TryViewCordAsArray(cord, 0, dtype, endian_value,
orig_array.layout());
if (expected_inplace) {
EXPECT_EQ(orig_array.data(), cord_array.data());
EXPECT_EQ(2, orig_array.pointer().use_count());
cord.Clear();
EXPECT_EQ(2, orig_array.pointer().use_count());
} else {
EXPECT_FALSE(cord_array.valid());
}
}
TEST(TryViewCordAsArrayTest, Inplace) {
const DataType data_types[] = {dtype_v<uint8_t>, dtype_v<uint16_t>,
dtype_v<uint32_t>, dtype_v<uint64_t>};
for (auto dtype : data_types) {
for (auto order : {tensorstore::c_order, tensorstore::fortran_order}) {
TestConvertCordInplace(dtype, endian::native, order,
true);
}
}
constexpr endian non_native =
endian::native == endian::little ? endian::big : endian::little;
TestConvertCordInplace(dtype_v<uint8_t>, non_native, tensorstore::c_order,
true);
TestConvertCordInplace(dtype_v<bool>, non_native, tensorstore::c_order,
true);
TestConvertCordInplace(dtype_v<uint32_t>, non_native, tensorstore::c_order,
false);
}
TEST(TryViewCordAsArrayTest, FlatCordBuilder) {
constexpr size_t kExtraBytes = 8;
tensorstore::internal::FlatCordBuilder builder(sizeof(uint32_t) * 3 * 4 * 5 +
kExtraBytes);
StridedLayout<> layout(tensorstore::c_order, sizeof(uint32_t), {3, 4, 5});
char* data_ptr = builder.data();
auto cord = std::move(builder).Build();
for (size_t offset = 0; offset < kExtraBytes; ++offset) {
auto array = TryViewCordAsArray(cord, offset, dtype_v<uint32_t>,
endian::native, layout);
if ((offset % alignof(uint32_t)) == 0) {
EXPECT_EQ(static_cast<void*>(data_ptr + offset), array.data());
EXPECT_EQ(layout, array.layout());
} else {
EXPECT_FALSE(array.valid());
}
}
}
TEST(TryViewCordAsArrayTest, Fragmented) {
std::vector<std::string> parts{
std::string(sizeof(uint32_t) * 3 * 3 * 5, '\0'),
std::string(sizeof(uint32_t) * 3 * 1 * 5, '\0')};
StridedLayout<> layout(tensorstore::c_order, sizeof(uint32_t), {3, 4, 5});
absl::Cord cord = absl::MakeFragmentedCord(parts);
auto array = TryViewCordAsArray(cord, 0, dtype_v<uint32_t>,
endian::native, layout);
EXPECT_FALSE(array.valid());
}
TEST(TryViewCordAsArrayTest, SmallBuffer) {
StridedLayout<> layout(tensorstore::c_order, sizeof(uint8_t), {4});
absl::Cord cord("abcd");
auto array = TryViewCordAsArray(cord, 0, dtype_v<uint8_t>,
endian::native, layout);
EXPECT_FALSE(array.valid());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/data_type_endian_conversion.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/data_type_endian_conversion_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a521486a-75e5-4dda-8e18-e58c91d51eb9 | cpp | tensorflow/tensorflow | inference_profiler_stage | tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.cc | tensorflow/lite/tools/evaluation/stages/inference_profiler_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <random>
#include "fp16.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr float kGaussianFloatMean = 0.5;
constexpr float kGaussianStdDev = 1.0 / 3;
template <typename T>
void GenerateRandomGaussianData(int64_t num_elements, float min, float max,
std::vector<T>* data) {
data->clear();
data->reserve(num_elements);
static std::normal_distribution<double> distribution(kGaussianFloatMean,
kGaussianStdDev);
static std::default_random_engine generator;
for (int i = 0; i < num_elements; ++i) {
auto rand_n = distribution(generator);
while (rand_n < 0 || rand_n >= 1) {
rand_n = distribution(generator);
}
auto rand_float = min + (max - min) * static_cast<float>(rand_n);
data->push_back(static_cast<T>(rand_float));
}
}
template <typename T>
float CalculateAverageError(T* reference, T* test, int64_t num_elements) {
float error = 0;
for (int i = 0; i < num_elements; i++) {
float test_value = static_cast<float>(test[i]);
float reference_value = static_cast<float>(reference[i]);
error += std::abs(test_value - reference_value);
}
error /= num_elements;
return error;
}
}
TfLiteStatus InferenceProfilerStage::Init(
const DelegateProviders* delegate_providers) {
test_stage_ = std::make_unique<TfliteInferenceStage>(config_);
if (test_stage_->Init(delegate_providers) != kTfLiteOk) return kTfLiteError;
LOG(INFO) << "Test interpreter has been initialized.";
EvaluationStageConfig reference_config;
reference_config.set_name("reference_inference");
auto* params = reference_config.mutable_specification()
->mutable_tflite_inference_params();
params->set_model_file_path(
config_.specification().tflite_inference_params().model_file_path());
params->set_invocations_per_run(
config_.specification().tflite_inference_params().invocations_per_run());
reference_stage_ = std::make_unique<TfliteInferenceStage>(reference_config);
if (reference_stage_->Init() != kTfLiteOk) return kTfLiteError;
LOG(INFO) << "Reference interpreter (1 thread on CPU) has been initialized.";
model_info_ = reference_stage_->GetModelInfo();
for (int i = 0; i < model_info_->inputs.size(); ++i) {
const TfLiteType model_input_type = model_info_->inputs[i]->type;
if (model_input_type == kTfLiteUInt8 || model_input_type == kTfLiteInt8 ||
model_input_type == kTfLiteInt32 || model_input_type == kTfLiteInt64 ||
model_input_type == kTfLiteBool || model_input_type == kTfLiteFloat32 ||
model_input_type == kTfLiteFloat16) {
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float16/float32/int8/uint8/int32/int64/bool "
"input types";
return kTfLiteError;
}
auto* input_shape = model_info_->inputs[i]->dims;
int64_t total_num_elements = 1;
for (int i = 0; i < input_shape->size; i++) {
total_num_elements *= input_shape->data[i];
}
input_num_elements_.push_back(total_num_elements);
float_tensors_.emplace_back();
uint8_tensors_.emplace_back();
int8_tensors_.emplace_back();
float16_tensors_.emplace_back();
int64_tensors_.emplace_back();
int32_tensors_.emplace_back();
bool_tensors_.emplace_back();
}
for (int i = 0; i < model_info_->outputs.size(); ++i) {
const TfLiteType model_output_type = model_info_->outputs[i]->type;
if (model_output_type == kTfLiteUInt8 || model_output_type == kTfLiteInt8 ||
model_output_type == kTfLiteInt32 || model_output_type == kTfLiteBool ||
model_output_type == kTfLiteFloat32) {
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float32/int8/uint8/int32/bool "
"output types";
return kTfLiteError;
}
auto* output_shape = model_info_->outputs[i]->dims;
int64_t total_num_elements = 1;
for (int i = 0; i < output_shape->size; i++) {
total_num_elements *= output_shape->data[i];
}
output_num_elements_.push_back(total_num_elements);
error_stats_.emplace_back();
}
return kTfLiteOk;
}
TfLiteStatus InferenceProfilerStage::Run() {
std::vector<void*> input_ptrs;
for (int i = 0; i < model_info_->inputs.size(); ++i) {
const TfLiteType model_input_type = model_info_->inputs[i]->type;
if (model_input_type == kTfLiteUInt8) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<uint8_t>::min(),
std::numeric_limits<uint8_t>::max(), &uint8_tensors_[i]);
input_ptrs.push_back(uint8_tensors_[i].data());
} else if (model_input_type == kTfLiteInt8) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int8_t>::min(),
std::numeric_limits<int8_t>::max(), &int8_tensors_[i]);
input_ptrs.push_back(int8_tensors_[i].data());
} else if (model_input_type == kTfLiteInt32) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max(), &int32_tensors_[i]);
input_ptrs.push_back(int32_tensors_[i].data());
} else if (model_input_type == kTfLiteInt64) {
GenerateRandomGaussianData(
input_num_elements_[i], std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max(), &int64_tensors_[i]);
input_ptrs.push_back(int64_tensors_[i].data());
} else if (model_input_type == kTfLiteBool) {
GenerateRandomGaussianData(input_num_elements_[i], 0, 1,
&bool_tensors_[i]);
input_ptrs.push_back(bool_tensors_[i].data());
} else if (model_input_type == kTfLiteFloat32) {
GenerateRandomGaussianData(input_num_elements_[i], -1, 1,
&(float_tensors_[i]));
input_ptrs.push_back(float_tensors_[i].data());
} else if (model_input_type == kTfLiteFloat16) {
GenerateRandomGaussianData(input_num_elements_[i], -1, 1,
&(float_tensors_[i]));
for (size_t j = 0; j < float_tensors_[i].size(); j++) {
float16_tensors_[i][j] =
fp16_ieee_from_fp32_value(float_tensors_[i][j]);
}
input_ptrs.push_back(float16_tensors_[i].data());
} else {
LOG(ERROR) << "InferenceProfilerStage only supports "
"float16/float32/int8/uint8/int32/int64/bool "
"input types";
return kTfLiteError;
}
}
test_stage_->SetInputs(input_ptrs);
reference_stage_->SetInputs(input_ptrs);
if (test_stage_->Run() != kTfLiteOk) return kTfLiteError;
if (reference_stage_->Run() != kTfLiteOk) return kTfLiteError;
for (int i = 0; i < model_info_->outputs.size(); ++i) {
const TfLiteType model_output_type = model_info_->outputs[i]->type;
void* reference_ptr = reference_stage_->GetOutputs()->at(i);
void* test_ptr = test_stage_->GetOutputs()->at(i);
float output_diff = 0;
if (model_output_type == kTfLiteUInt8) {
output_diff = CalculateAverageError(static_cast<uint8_t*>(reference_ptr),
static_cast<uint8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteInt8) {
output_diff = CalculateAverageError(static_cast<int8_t*>(reference_ptr),
static_cast<int8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteInt32) {
output_diff = CalculateAverageError(static_cast<int32_t*>(reference_ptr),
static_cast<int32_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteBool) {
output_diff = CalculateAverageError(static_cast<int8_t*>(reference_ptr),
static_cast<int8_t*>(test_ptr),
output_num_elements_[i]);
} else if (model_output_type == kTfLiteFloat32) {
output_diff = CalculateAverageError(static_cast<float*>(reference_ptr),
static_cast<float*>(test_ptr),
output_num_elements_[i]);
}
error_stats_[i].UpdateStat(output_diff);
}
return kTfLiteOk;
}
EvaluationStageMetrics InferenceProfilerStage::LatestMetrics() {
EvaluationStageMetrics metrics;
const auto& reference_metrics = reference_stage_->LatestMetrics();
metrics.set_num_runs(reference_metrics.num_runs());
auto* inference_profiler_metrics =
metrics.mutable_process_metrics()->mutable_inference_profiler_metrics();
*inference_profiler_metrics->mutable_reference_latency() =
reference_metrics.process_metrics().total_latency();
*inference_profiler_metrics->mutable_test_latency() =
test_stage_->LatestMetrics().process_metrics().total_latency();
for (int i = 0; i < error_stats_.size(); ++i) {
AccuracyMetrics* diff = inference_profiler_metrics->add_output_errors();
diff->set_avg_value(error_stats_[i].avg());
diff->set_std_deviation(error_stats_[i].std_deviation());
diff->set_min_value(error_stats_[i].min());
if (error_stats_[i].avg() != 0) {
diff->set_max_value(error_stats_[i].max());
} else {
diff->set_max_value(0);
}
}
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kInferenceProfilerStageName[] = "inference_profiler_stage";
constexpr char kModelPath[] =
"tensorflow/lite/testdata/add_quantized.bin";
EvaluationStageConfig GetInferenceProfilerStageConfig(int num_threads = 1) {
EvaluationStageConfig config;
config.set_name(kInferenceProfilerStageName);
auto* params =
config.mutable_specification()->mutable_tflite_inference_params();
params->set_model_file_path(kModelPath);
params->set_invocations_per_run(2);
params->set_num_threads(num_threads);
return config;
}
TEST(InferenceProfilerStage, NoParams) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
config.mutable_specification()->clear_tflite_inference_params();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(InferenceProfilerStage, NoModelPath) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->clear_model_file_path();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(InferenceProfilerStage, NoOutputDiffForDefaultConfig) {
EvaluationStageConfig config = GetInferenceProfilerStageConfig();
InferenceProfilerStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(stage.Run(), kTfLiteOk);
}
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_TRUE(metrics.process_metrics().has_inference_profiler_metrics());
auto profiler_metrics =
metrics.process_metrics().inference_profiler_metrics();
EXPECT_TRUE(profiler_metrics.has_reference_latency());
EXPECT_TRUE(profiler_metrics.has_test_latency());
EXPECT_EQ(profiler_metrics.output_errors_size(), 1);
EXPECT_EQ(profiler_metrics.output_errors(0).avg_value(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/inference_profiler_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/inference_profiler_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9b79f15a-5ad8-4f05-b7e9-569a601d059c | cpp | tensorflow/tensorflow | profile_summary_formatter | tensorflow/lite/profiling/profile_summary_formatter.cc | tensorflow/lite/profiling/profile_summary_formatter_test.cc | #include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include <fstream>
#include <iomanip>
#include <ios>
#include <map>
#include <memory>
#include <ostream>
#include <queue>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/profiling/proto/profiling_info.pb.h"
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace profiling {
std::string ProfileSummaryDefaultFormatter::GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
return GenerateReport("profile", true,
stats_calculator_map, delegate_stats_calculator,
subgraph_name_map);
}
std::string ProfileSummaryDefaultFormatter::GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
return GenerateReport("summary", false,
stats_calculator_map, delegate_stats_calculator,
subgraph_name_map);
}
std::string ProfileSummaryDefaultFormatter::GenerateReport(
const std::string& tag, bool include_output_string,
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
std::stringstream stream;
bool has_non_primary_graph =
(stats_calculator_map.size() - stats_calculator_map.count(0)) > 0;
for (const auto& stats_calc : stats_calculator_map) {
auto subgraph_index = stats_calc.first;
auto subgraph_stats = stats_calc.second.get();
std::string subgraph_name = "";
if (subgraph_name_map.find(subgraph_index) != subgraph_name_map.end()) {
subgraph_name = subgraph_name_map.at(subgraph_index);
}
if (has_non_primary_graph) {
if (subgraph_index == 0) {
stream << "Primary graph (name: " << subgraph_name << ") " << tag << ":"
<< std::endl;
} else {
stream << "Subgraph (index: " << subgraph_index
<< ", name: " << subgraph_name << ") " << tag << ":"
<< std::endl;
}
}
if (include_output_string) {
stream << subgraph_stats->GetOutputString();
}
if (subgraph_index != 0) {
stream << "Subgraph (index: " << subgraph_index
<< ", name: " << subgraph_name << ") ";
}
stream << subgraph_stats->GetShortSummary() << std::endl;
}
if (delegate_stats_calculator.num_runs() > 0) {
stream << "Delegate internal: " << std::endl;
if (include_output_string) {
stream << delegate_stats_calculator.GetOutputString();
}
stream << delegate_stats_calculator.GetShortSummary() << std::endl;
}
return stream.str();
}
void ProfileSummaryDefaultFormatter::HandleOutput(
const std::string& init_output, const std::string& run_output,
std::string output_file_path) const {
std::ofstream output_file(output_file_path);
std::ostream* output_stream = nullptr;
if (output_file.good()) {
output_stream = &output_file;
}
if (!init_output.empty()) {
WriteOutput("Profiling Info for Benchmark Initialization:", init_output,
output_stream == nullptr ? &TFLITE_LOG(INFO) : output_stream);
}
if (!run_output.empty()) {
WriteOutput(
"Operator-wise Profiling Info for Regular Benchmark Runs:", run_output,
output_stream == nullptr ? &TFLITE_LOG(INFO) : output_stream);
}
}
tensorflow::StatSummarizerOptions
ProfileSummaryDefaultFormatter::GetStatSummarizerOptions() const {
auto options = tensorflow::StatSummarizerOptions();
options.show_summary = false;
options.show_memory = false;
return options;
}
tensorflow::StatSummarizerOptions
ProfileSummaryCSVFormatter::GetStatSummarizerOptions() const {
auto options = ProfileSummaryDefaultFormatter::GetStatSummarizerOptions();
options.format_as_csv = true;
return options;
}
std::vector<tensorflow::StatsCalculator::Detail>
ProfileSummaryProtoFormatter::GetDetailsSortedByRunOrder(
const tensorflow::StatsCalculator* stats_calculator) const {
std::vector<tensorflow::StatsCalculator::Detail> details;
std::map<std::string, tensorflow::StatsCalculator::Detail> unsorted_details =
stats_calculator->GetDetails();
std::priority_queue<
std::pair<std::string, const tensorflow::StatsCalculator::Detail*>>
sorted_list;
const int num_nodes = unsorted_details.size();
for (const auto& det : unsorted_details) {
const tensorflow::StatsCalculator::Detail* detail = &(det.second);
std::stringstream stream_for_sort;
stream_for_sort << std::setw(20) << std::right << std::setprecision(10)
<< std::fixed;
stream_for_sort << num_nodes - detail->run_order;
sorted_list.emplace(stream_for_sort.str(), detail);
}
while (!sorted_list.empty()) {
auto entry = sorted_list.top();
sorted_list.pop();
details.push_back(*entry.second);
}
return details;
}
void ProfileSummaryProtoFormatter::GenerateOpProfileDataFromDetail(
const tensorflow::StatsCalculator::Detail* detail,
const tensorflow::StatsCalculator* stats_calculator,
OpProfileData* const op_profile_data) const {
if (detail == nullptr) {
return;
}
op_profile_data->set_node_type(detail->type);
OpProfilingStat* inference_stat =
op_profile_data->mutable_inference_microseconds();
inference_stat->set_first(detail->elapsed_time.first());
inference_stat->set_last(detail->elapsed_time.newest());
inference_stat->set_avg(detail->elapsed_time.avg());
inference_stat->set_stddev(detail->elapsed_time.std_deviation());
inference_stat->set_variance(detail->elapsed_time.variance());
inference_stat->set_min(detail->elapsed_time.min());
inference_stat->set_max(detail->elapsed_time.max());
inference_stat->set_sum(detail->elapsed_time.sum());
inference_stat->set_count(detail->elapsed_time.count());
OpProfilingStat* memory_stat = op_profile_data->mutable_mem_kb();
memory_stat->set_first(detail->mem_used.first() / 1000.0);
memory_stat->set_last(detail->mem_used.newest() / 1000.0);
memory_stat->set_avg(detail->mem_used.avg() / 1000.0);
memory_stat->set_stddev(detail->mem_used.std_deviation() / 1000.0);
memory_stat->set_variance(detail->mem_used.variance() / 1000000.0);
memory_stat->set_min(detail->mem_used.min() / 1000.0);
memory_stat->set_max(detail->mem_used.max() / 1000.0);
memory_stat->set_sum(detail->mem_used.sum() / 1000.0);
memory_stat->set_count(detail->mem_used.count());
op_profile_data->set_times_called(detail->times_called /
stats_calculator->num_runs());
op_profile_data->set_name(detail->name);
op_profile_data->set_run_order(detail->run_order);
}
void ProfileSummaryProtoFormatter::GenerateSubGraphProfilingData(
const tensorflow::StatsCalculator* stats_calculator, int subgraph_index,
const std::map<uint32_t, std::string>& subgraph_name_map,
SubGraphProfilingData* const sub_graph_profiling_data) const {
sub_graph_profiling_data->set_subgraph_index(subgraph_index);
std::string subgraph_name = "";
if (subgraph_name_map.find(subgraph_index) != subgraph_name_map.end()) {
subgraph_name = subgraph_name_map.at(subgraph_index);
}
sub_graph_profiling_data->set_subgraph_name(subgraph_name);
for (tensorflow::StatsCalculator::Detail& detail :
GetDetailsSortedByRunOrder(stats_calculator)) {
OpProfileData* const op_profile_data =
sub_graph_profiling_data->add_per_op_profiles();
GenerateOpProfileDataFromDetail(&detail, stats_calculator, op_profile_data);
}
}
void ProfileSummaryProtoFormatter::GenerateDelegateProfilingData(
const tensorflow::StatsCalculator* stats_calculator,
DelegateProfilingData* const delegate_profiling_data) const {
for (const tensorflow::StatsCalculator::Detail& detail :
GetDetailsSortedByRunOrder(stats_calculator)) {
OpProfileData* const op_profile_data =
delegate_profiling_data->add_per_op_profiles();
GenerateOpProfileDataFromDetail(&detail, stats_calculator, op_profile_data);
}
}
std::string ProfileSummaryProtoFormatter::GetShortSummary(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
TFLITE_LOG(ERROR) << "GetShortSummary is not supported for proto formatter.";
return "";
}
std::string ProfileSummaryProtoFormatter::GetOutputString(
const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
stats_calculator_map,
const tensorflow::StatsCalculator& delegate_stats_calculator,
const std::map<uint32_t, std::string>& subgraph_name_map) const {
ModelProfilingData model_profiling_data;
for (const auto& stats_calc : stats_calculator_map) {
auto subgraph_index = stats_calc.first;
tensorflow::StatsCalculator* subgraph_stats = stats_calc.second.get();
SubGraphProfilingData* const sub_graph_profiling_data =
model_profiling_data.add_subgraph_profiles();
GenerateSubGraphProfilingData(subgraph_stats, subgraph_index,
subgraph_name_map, sub_graph_profiling_data);
}
if (delegate_stats_calculator.num_runs() > 0) {
DelegateProfilingData* const delegate_profiling_data =
model_profiling_data.add_delegate_profiles();
GenerateDelegateProfilingData(&delegate_stats_calculator,
delegate_profiling_data);
}
return model_profiling_data.SerializeAsString();
}
tensorflow::StatSummarizerOptions
ProfileSummaryProtoFormatter::GetStatSummarizerOptions() const {
auto options = tensorflow::StatSummarizerOptions();
options.show_summary = false;
options.show_memory = false;
return options;
}
void ProfileSummaryProtoFormatter::HandleOutput(
const std::string& init_output, const std::string& run_output,
std::string output_file_path) const {
std::ofstream output_file(output_file_path, std::ios_base::binary);
std::ostream* output_stream = nullptr;
if (output_file.good()) {
output_stream = &output_file;
}
BenchmarkProfilingData benchmark_profiling_data;
if (!init_output.empty()) {
benchmark_profiling_data.mutable_init_profile()->ParseFromString(
init_output);
}
if (!run_output.empty()) {
benchmark_profiling_data.mutable_runtime_profile()->ParseFromString(
run_output);
}
if (output_stream == nullptr) {
TFLITE_LOG(INFO) << benchmark_profiling_data.DebugString();
} else {
benchmark_profiling_data.SerializeToOstream(output_stream);
}
}
}
} | #include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include <cstddef>
#include <fstream>
#include <ios>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/profiling/proto/profiling_info.pb.h"
namespace tflite {
namespace profiling {
namespace {
bool AreOpProfilingStatEqual(const OpProfilingStat& op_profiling_stat_1,
const OpProfilingStat& op_profiling_stat_2) {
auto proto_to_tuple = [](const OpProfilingStat& op_profiling_stat) {
return std::make_tuple(op_profiling_stat.first(), op_profiling_stat.last(),
op_profiling_stat.avg(), op_profiling_stat.stddev(),
op_profiling_stat.variance(),
op_profiling_stat.min(), op_profiling_stat.max(),
op_profiling_stat.sum(), op_profiling_stat.count());
};
return proto_to_tuple(op_profiling_stat_1) ==
proto_to_tuple(op_profiling_stat_2);
}
bool AreOpProfileDataEqual(const OpProfileData& op_profile_data_1,
const OpProfileData& op_profile_data_2) {
auto proto_to_tuple = [](const OpProfileData& op_profile_data) {
return std::make_tuple(op_profile_data.node_type(),
op_profile_data.times_called(),
op_profile_data.name(), op_profile_data.run_order());
};
return (proto_to_tuple(op_profile_data_1) ==
proto_to_tuple(op_profile_data_2)) &&
AreOpProfilingStatEqual(op_profile_data_1.inference_microseconds(),
op_profile_data_2.inference_microseconds()) &&
(AreOpProfilingStatEqual(op_profile_data_1.mem_kb(),
op_profile_data_2.mem_kb()));
}
bool AreSubGraphProfilingDataEqual(
const SubGraphProfilingData& subgraph_profiling_data_1,
const SubGraphProfilingData& subgraph_profiling_data_2) {
auto proto_to_tuple =
[](const SubGraphProfilingData& subgraph_profiling_data) {
return std::make_tuple(
subgraph_profiling_data.subgraph_name(),
subgraph_profiling_data.per_op_profiles().size());
};
if (proto_to_tuple(subgraph_profiling_data_1) ==
proto_to_tuple(subgraph_profiling_data_2)) {
for (size_t i = 0; i < subgraph_profiling_data_1.per_op_profiles().size();
++i) {
auto op_profile_data_1 = subgraph_profiling_data_1.per_op_profiles(i);
auto op_profile_data_2 = subgraph_profiling_data_2.per_op_profiles(i);
if (!AreOpProfileDataEqual(op_profile_data_1, op_profile_data_2)) {
return false;
}
}
return true;
}
return false;
}
bool AreDelegateProfilingDataEqual(
const DelegateProfilingData& delegate_profiling_data_1,
const DelegateProfilingData& delegate_profiling_data_2) {
auto proto_to_tuple =
[](const DelegateProfilingData& delegate_profiling_data) {
return std::make_tuple(
delegate_profiling_data.delegate_name(),
delegate_profiling_data.per_op_profiles().size());
};
if (proto_to_tuple(delegate_profiling_data_1) ==
proto_to_tuple(delegate_profiling_data_2)) {
for (size_t i = 0; i < delegate_profiling_data_1.per_op_profiles().size();
++i) {
auto op_profile_data_1 = delegate_profiling_data_1.per_op_profiles(i);
auto op_profile_data_2 = delegate_profiling_data_2.per_op_profiles(i);
if (!AreOpProfileDataEqual(op_profile_data_1, op_profile_data_2)) {
return false;
}
}
return true;
}
return false;
}
bool AreModelProfilingDataEqual(
const ModelProfilingData& model_profiling_data_1,
const ModelProfilingData& model_profiling_data_2) {
if (model_profiling_data_1.subgraph_profiles().size() !=
model_profiling_data_2.subgraph_profiles().size()) {
return false;
}
for (size_t i = 0; i < model_profiling_data_1.subgraph_profiles().size();
++i) {
auto subgraph_profile_1 = model_profiling_data_1.subgraph_profiles(i);
auto subgraph_profile_2 = model_profiling_data_2.subgraph_profiles(i);
if (!AreSubGraphProfilingDataEqual(subgraph_profile_1,
subgraph_profile_2)) {
return false;
}
}
if (model_profiling_data_1.delegate_profiles().size() !=
model_profiling_data_2.delegate_profiles().size()) {
return false;
}
for (size_t i = 0; i < model_profiling_data_1.delegate_profiles().size();
++i) {
auto delegate_profile_1 = model_profiling_data_1.delegate_profiles(i);
auto delegate_profile_2 = model_profiling_data_2.delegate_profiles(i);
if (!AreDelegateProfilingDataEqual(delegate_profile_1,
delegate_profile_2)) {
return false;
}
}
return true;
}
TEST(SummaryWriterTest, SummaryOptionStdOut) {
ProfileSummaryDefaultFormatter writer;
tensorflow::StatSummarizerOptions options = writer.GetStatSummarizerOptions();
EXPECT_EQ(options.show_summary, false);
EXPECT_EQ(options.show_memory, false);
EXPECT_EQ(options.format_as_csv, false);
}
TEST(SummaryWriterTest, SummaryOptionCSV) {
ProfileSummaryCSVFormatter writer;
tensorflow::StatSummarizerOptions options = writer.GetStatSummarizerOptions();
EXPECT_EQ(options.show_summary, false);
EXPECT_EQ(options.show_memory, false);
EXPECT_EQ(options.format_as_csv, true);
}
TEST(SummaryWriterTest, EmptyOutputString) {
ProfileSummaryDefaultFormatter writer;
std::string output = writer.GetOutputString(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
EXPECT_EQ(output.size(), 0);
}
TEST(SummaryWriterTest, EmptyShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::string output = writer.GetShortSummary(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
EXPECT_EQ(output.size(), 0);
}
TEST(SummaryWriterTest, SingleSubgraphOutputString) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()), {});
ASSERT_TRUE(absl::StrContains(output, "Run Order"));
ASSERT_TRUE(absl::StrContains(output, "Top by Computation Time"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Memory Use"));
ASSERT_TRUE(absl::StrContains(output, "Summary by node type"));
ASSERT_TRUE(absl::StrContains(output, "nodes observed"));
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, SingleSubgraphShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetShortSummary(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}});
ASSERT_TRUE(!absl::StrContains(output, "Run Order"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Computation Time"));
ASSERT_TRUE(!absl::StrContains(output, "Top by Memory Use"));
ASSERT_TRUE(!absl::StrContains(output, "Summary by node type"));
ASSERT_TRUE(absl::StrContains(output, "nodes observed"));
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, MultiSubgraphOutputString) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, MultiSubgraphOutputStringForProto) {
ProfileSummaryProtoFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string kernel_name_1 = "Kernel 1";
std::string kernel_name_2 = "Kernel 2";
std::string kernel_name_3 = "Kernel 3";
std::string op_name_1 = "Convolution";
std::string op_name_2 = "Reshape";
std::string op_name_3 = "Convolution";
stats_calculator_map[0]->AddNodeStats(kernel_name_1, op_name_1, 1, 10, 10000);
stats_calculator_map[0]->AddNodeStats(kernel_name_1, op_name_1, 1, 20, 20000);
stats_calculator_map[0]->AddNodeStats(kernel_name_2, op_name_2, 2, 15, 10000);
stats_calculator_map[0]->UpdateRunTotalUs(25);
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1]->AddNodeStats(kernel_name_3, op_name_3, 3, 10, 10000);
stats_calculator_map[1]->UpdateRunTotalUs(10);
std::string output = writer.GetOutputString(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ModelProfilingData model_profiling_data;
model_profiling_data.ParseFromString(output);
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
ASSERT_EQ(model_profiling_data.subgraph_profiles().size(), 2);
ASSERT_EQ(model_profiling_data.subgraph_profiles(0).subgraph_name(),
"Primary graph");
ASSERT_EQ(model_profiling_data.subgraph_profiles(0).per_op_profiles().size(),
2);
OpProfileData op_profile_data_1;
op_profile_data_1.set_node_type(op_name_1);
OpProfilingStat* inference_microseconds_stat_1 =
op_profile_data_1.mutable_inference_microseconds();
inference_microseconds_stat_1->set_first(10);
inference_microseconds_stat_1->set_last(20);
inference_microseconds_stat_1->set_max(20);
inference_microseconds_stat_1->set_min(10);
inference_microseconds_stat_1->set_avg(15);
inference_microseconds_stat_1->set_stddev(5);
inference_microseconds_stat_1->set_variance(25);
inference_microseconds_stat_1->set_sum(30);
inference_microseconds_stat_1->set_count(2);
OpProfilingStat* memory_stat_1 = op_profile_data_1.mutable_mem_kb();
memory_stat_1->set_first(10);
memory_stat_1->set_last(20);
memory_stat_1->set_max(20);
memory_stat_1->set_min(10);
memory_stat_1->set_avg(15);
memory_stat_1->set_stddev(5);
memory_stat_1->set_variance(25);
memory_stat_1->set_sum(30);
memory_stat_1->set_count(2);
op_profile_data_1.set_name(kernel_name_1);
op_profile_data_1.set_run_order(1);
op_profile_data_1.set_times_called(2);
EXPECT_TRUE(AreOpProfileDataEqual(
model_profiling_data.subgraph_profiles(0).per_op_profiles(0),
op_profile_data_1));
OpProfileData op_profile_data_2;
op_profile_data_2.set_node_type(op_name_2);
OpProfilingStat* inference_microseconds_stat_2 =
op_profile_data_2.mutable_inference_microseconds();
inference_microseconds_stat_2->set_first(15);
inference_microseconds_stat_2->set_last(15);
inference_microseconds_stat_2->set_max(15);
inference_microseconds_stat_2->set_min(15);
inference_microseconds_stat_2->set_avg(15);
inference_microseconds_stat_2->set_stddev(0);
inference_microseconds_stat_2->set_variance(0);
inference_microseconds_stat_2->set_sum(15);
inference_microseconds_stat_2->set_count(1);
OpProfilingStat* memory_stat_2 = op_profile_data_2.mutable_mem_kb();
memory_stat_2->set_first(10);
memory_stat_2->set_last(10);
memory_stat_2->set_max(10);
memory_stat_2->set_min(10);
memory_stat_2->set_avg(10);
memory_stat_2->set_stddev(0);
memory_stat_2->set_variance(0);
memory_stat_2->set_sum(10);
memory_stat_2->set_count(1);
op_profile_data_2.set_times_called(1);
op_profile_data_2.set_name(kernel_name_2);
op_profile_data_2.set_run_order(2);
EXPECT_TRUE(AreOpProfileDataEqual(
model_profiling_data.subgraph_profiles(0).per_op_profiles(1),
op_profile_data_2));
ASSERT_EQ(model_profiling_data.subgraph_profiles(1).subgraph_name(),
"Subgraph 1");
ASSERT_EQ(model_profiling_data.subgraph_profiles(1).per_op_profiles().size(),
1);
OpProfileData op_profile_data_3;
op_profile_data_3.set_node_type(op_name_3);
OpProfilingStat* inference_microseconds_stat_3 =
op_profile_data_3.mutable_inference_microseconds();
inference_microseconds_stat_3->set_first(10);
inference_microseconds_stat_3->set_last(10);
inference_microseconds_stat_3->set_max(10);
inference_microseconds_stat_3->set_min(10);
inference_microseconds_stat_3->set_avg(10);
inference_microseconds_stat_3->set_stddev(0);
inference_microseconds_stat_3->set_variance(0);
inference_microseconds_stat_3->set_sum(10);
inference_microseconds_stat_3->set_count(1);
OpProfilingStat* memory_stat_3 = op_profile_data_3.mutable_mem_kb();
memory_stat_3->set_first(10);
memory_stat_3->set_last(10);
memory_stat_3->set_max(10);
memory_stat_3->set_min(10);
memory_stat_3->set_avg(10);
memory_stat_3->set_stddev(0);
memory_stat_3->set_variance(0);
memory_stat_3->set_sum(10);
memory_stat_3->set_count(1);
op_profile_data_3.set_times_called(1);
op_profile_data_3.set_name(kernel_name_3);
op_profile_data_3.set_run_order(3);
EXPECT_TRUE(AreOpProfileDataEqual(
model_profiling_data.subgraph_profiles(1).per_op_profiles(0),
op_profile_data_3));
}
TEST(SummaryWriterTest, MultiSubgraphHandleOutputForProto) {
ProfileSummaryProtoFormatter writer;
ModelProfilingData model_profiling_data_run;
SubGraphProfilingData* subgraph_profiling_data =
model_profiling_data_run.add_subgraph_profiles();
subgraph_profiling_data->set_subgraph_name("Primary graph");
OpProfileData* op_profile_data_1 =
subgraph_profiling_data->add_per_op_profiles();
op_profile_data_1->set_node_type("Convolution");
OpProfilingStat* inference_stat_1 =
op_profile_data_1->mutable_inference_microseconds();
inference_stat_1->set_first(10);
inference_stat_1->set_avg(10);
OpProfilingStat* mem_stat_1 = op_profile_data_1->mutable_mem_kb();
mem_stat_1->set_first(10);
mem_stat_1->set_avg(10);
op_profile_data_1->set_times_called(1);
op_profile_data_1->set_name("Kernel 1");
op_profile_data_1->set_run_order(1);
OpProfileData* op_profile_data_2 =
subgraph_profiling_data->add_per_op_profiles();
op_profile_data_2->set_node_type("Reshape");
OpProfilingStat* inference_stat_2 =
op_profile_data_2->mutable_inference_microseconds();
inference_stat_2->set_first(15);
inference_stat_2->set_avg(15);
OpProfilingStat* mem_stat_2 = op_profile_data_2->mutable_mem_kb();
mem_stat_2->set_first(10);
mem_stat_2->set_avg(10);
op_profile_data_2->set_times_called(1);
op_profile_data_2->set_name("Kernel 2");
op_profile_data_2->set_run_order(2);
SubGraphProfilingData* subgraph_profiling_data_1 =
model_profiling_data_run.add_subgraph_profiles();
subgraph_profiling_data_1->set_subgraph_name("Subgraph 1");
OpProfileData* op_profile_data_3 =
subgraph_profiling_data_1->add_per_op_profiles();
op_profile_data_3->set_node_type("Convolution");
OpProfilingStat* inference_stat_3 =
op_profile_data_3->mutable_inference_microseconds();
inference_stat_3->set_first(10);
inference_stat_3->set_avg(10);
OpProfilingStat* mem_stat_3 = op_profile_data_3->mutable_mem_kb();
mem_stat_3->set_first(10);
mem_stat_3->set_avg(10);
op_profile_data_3->set_times_called(1);
op_profile_data_3->set_name("Kernel 3");
op_profile_data_3->set_run_order(3);
DelegateProfilingData* delegate_profiling_data =
model_profiling_data_run.add_delegate_profiles();
OpProfileData* op_profile_data_4 =
delegate_profiling_data->add_per_op_profiles();
op_profile_data_4->set_node_type("Convolution");
OpProfilingStat* inference_stat_4 =
op_profile_data_4->mutable_inference_microseconds();
inference_stat_4->set_first(10);
inference_stat_4->set_avg(10);
OpProfilingStat* mem_stat_4 = op_profile_data_4->mutable_mem_kb();
mem_stat_4->set_first(10);
mem_stat_4->set_avg(10);
op_profile_data_4->set_times_called(1);
op_profile_data_4->set_name("Kernel 4");
op_profile_data_4->set_run_order(4);
ModelProfilingData model_profiling_data_init;
SubGraphProfilingData* subgraph_profiling_data_init =
model_profiling_data_init.add_subgraph_profiles();
subgraph_profiling_data_init->set_subgraph_name("Primary graph");
OpProfileData* op_profile_data_init_1 =
subgraph_profiling_data_init->add_per_op_profiles();
op_profile_data_init_1->set_node_type("Convolution");
OpProfilingStat* inference_stat_init_1 =
op_profile_data_init_1->mutable_inference_microseconds();
inference_stat_init_1->set_first(10);
inference_stat_init_1->set_avg(10);
op_profile_data_init_1->set_times_called(1);
OpProfilingStat* mem_stat_init_1 = op_profile_data_init_1->mutable_mem_kb();
mem_stat_init_1->set_first(10);
mem_stat_init_1->set_avg(10);
op_profile_data_init_1->set_name("ModifyGraphWithDelegate");
op_profile_data_init_1->set_run_order(1);
#ifdef __ANDROID__
std::string file_name = "/data/local/tmp/test_file.proto";
#else
std::string file_name = "/tmp/test_file.proto";
#endif
writer.HandleOutput(model_profiling_data_init.SerializeAsString(),
model_profiling_data_run.SerializeAsString(), file_name);
std::ifstream file(file_name, std::ios::binary);
ASSERT_TRUE(file.good());
BenchmarkProfilingData benchmark_profiling_data;
benchmark_profiling_data.ParseFromIstream(&file);
file.close();
ASSERT_TRUE(benchmark_profiling_data.model_name().empty());
EXPECT_TRUE(AreModelProfilingDataEqual(
benchmark_profiling_data.init_profile(), model_profiling_data_init));
EXPECT_TRUE(AreModelProfilingDataEqual(
benchmark_profiling_data.runtime_profile(), model_profiling_data_run));
}
TEST(SummaryWriterTest, MultiSubgraphShortSummary) {
ProfileSummaryDefaultFormatter writer;
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>
stats_calculator_map;
stats_calculator_map[0] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
stats_calculator_map[1] = std::make_unique<tensorflow::StatsCalculator>(
writer.GetStatSummarizerOptions());
std::string output = writer.GetShortSummary(
stats_calculator_map,
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions()),
{{0, "Primary graph"}, {1, "Subgraph 1"}});
ASSERT_TRUE(absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(!absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, DelegationOutputString) {
ProfileSummaryDefaultFormatter writer;
auto delegate_stats_calculator =
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions());
delegate_stats_calculator.UpdateRunTotalUs(1);
std::string output = writer.GetOutputString(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
delegate_stats_calculator, {});
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(absl::StrContains(output, "Delegate internal"));
}
TEST(SummaryWriterTest, DelegationShortSummary) {
ProfileSummaryDefaultFormatter writer;
auto delegate_stats_calculator =
tensorflow::StatsCalculator(writer.GetStatSummarizerOptions());
delegate_stats_calculator.UpdateRunTotalUs(1);
std::string output = writer.GetShortSummary(
std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>(),
delegate_stats_calculator, {});
ASSERT_TRUE(!absl::StrContains(output, "Primary graph"));
ASSERT_TRUE(!absl::StrContains(output, "Subgraph"));
ASSERT_TRUE(absl::StrContains(output, "Delegate internal"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_summary_formatter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_summary_formatter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |