Unnamed: 0
int64 0
409
| Code
stringlengths 131
27.3k
| Unit Test
stringlengths 89
30.5k
|
---|---|---|
200 | #ifndef TENSORSTORE_INTERNAL_JSON_SAME_H_
#define TENSORSTORE_INTERNAL_JSON_SAME_H_
#include <nlohmann/json_fwd.hpp>
namespace tensorstore {
namespace internal_json {
bool JsonSame(const ::nlohmann::json& a, const ::nlohmann::json& b);
}
}
#endif
#include "tensorstore/internal/json/same.h"
#include <variant>
#include "absl/container/inlined_vector.h"
#include <nlohmann/json.hpp>
namespace tensorstore {
namespace internal_json {
bool JsonSame(const ::nlohmann::json& a, const ::nlohmann::json& b) {
using value_t = ::nlohmann::json::value_t;
using array_t = ::nlohmann::json::array_t;
using object_t = ::nlohmann::json::object_t;
struct ArrayIterators {
array_t::const_iterator a_cur, a_end, b_cur;
};
struct ObjectIterators {
object_t::const_iterator a_cur, a_end, b_cur;
};
using StackEntry = std::variant<ArrayIterators, ObjectIterators>;
absl::InlinedVector<StackEntry, 64> stack;
const auto compare_or_defer_values = [&](const ::nlohmann::json& a_value,
const ::nlohmann::json& b_value) {
const auto t = a_value.type();
switch (t) {
case value_t::discarded:
case value_t::null:
return b_value.type() == t;
case value_t::array: {
if (b_value.type() != t) return false;
const auto& a_arr = a_value.get_ref<const array_t&>();
const auto& b_arr = b_value.get_ref<const array_t&>();
if (a_arr.size() != b_arr.size()) return false;
if (a_arr.empty()) return true;
stack.emplace_back(
ArrayIterators{a_arr.begin(), a_arr.end(), b_arr.begin()});
return true;
}
case value_t::object: {
if (b_value.type() != t) return false;
const auto& a_obj = a_value.get_ref<const object_t&>();
const auto& b_obj = b_value.get_ref<const object_t&>();
if (a_obj.size() != b_obj.size()) return false;
if (a_obj.empty()) return true;
stack.emplace_back(
ObjectIterators{a_obj.begin(), a_obj.end(), b_obj.begin()});
return true;
}
default:
return a_value == b_value;
}
};
if (!compare_or_defer_values(a, b)) return false;
while (!stack.empty()) {
auto& e = stack.back();
if (auto* array_iterators = std::get_if<ArrayIterators>(&e)) {
auto& a_v = *array_iterators->a_cur;
auto& b_v = *array_iterators->b_cur;
if (++array_iterators->a_cur == array_iterators->a_end) {
stack.pop_back();
} else {
++array_iterators->b_cur;
}
if (!compare_or_defer_values(a_v, b_v)) {
return false;
}
} else {
auto* object_iterators = std::get_if<ObjectIterators>(&e);
auto& a_kv = *object_iterators->a_cur;
auto& b_kv = *object_iterators->b_cur;
if (++object_iterators->a_cur == object_iterators->a_end) {
stack.pop_back();
} else {
++object_iterators->b_cur;
}
if (a_kv.first != b_kv.first ||
!compare_or_defer_values(a_kv.second, b_kv.second)) {
return false;
}
}
}
return true;
}
}
} | #include "tensorstore/internal/json/same.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
TEST(JsonSame, Basic) {
EXPECT_TRUE(tensorstore::internal_json::JsonSame(1.0, 1));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
::nlohmann::json::value_t::discarded, ::nlohmann::json::value_t::null));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::value_t::discarded,
::nlohmann::json::value_t::discarded));
EXPECT_TRUE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{1, {1, 2, 3, {{"a", 5}, {"b", 7}}}, 3},
{1, {1, 2, 3, {{"a", 5}, {"b", 7}}}, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::array_t{}, ::nlohmann::json::array_t{}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
::nlohmann::json::object_t{}, ::nlohmann::json::object_t{}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2, 4}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame({1, 2, 3}, {1, 2}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{1, ::nlohmann::json::value_t::discarded, 3},
{1, ::nlohmann::json::value_t::discarded, 3}));
EXPECT_TRUE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 4}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"c", 3}}));
EXPECT_FALSE(tensorstore::internal_json::JsonSame(
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}},
{{"a", ::nlohmann::json::value_t::discarded}, {"b", 3}, {"d", 4}}));
const auto make_nested = [](int depth) {
::nlohmann::json value;
::nlohmann::json* tail = &value;
for (int i = 0; i < depth; ++i) {
*tail = ::nlohmann::json::object_t();
auto& obj = tail->get_ref<::nlohmann::json::object_t&>();
tail = &obj["a"];
}
return value;
};
auto nested = make_nested(10000);
EXPECT_TRUE(tensorstore::internal_json::JsonSame(nested, nested));
}
} |
201 | #ifndef AROLLA_QTYPE_TYPED_SLOT_H_
#define AROLLA_QTYPE_TYPED_SLOT_H_
#include <array>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <typeinfo>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/status.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
class TypedSlot {
public:
template <typename T>
using Slot = FrameLayout::Slot<T>;
TypedSlot(const TypedSlot&) = default;
TypedSlot& operator=(const TypedSlot&) = default;
static TypedSlot UnsafeFromOffset(QTypePtr type, size_t byte_offset) {
return TypedSlot(type, byte_offset);
}
template <typename T>
static TypedSlot FromSlot(Slot<T> slot, QTypePtr type) {
DCHECK(type->type_info() == typeid(T)) << "Type mismatch";
return TypedSlot(type, slot.byte_offset());
}
template <typename T>
static TypedSlot FromSlot(Slot<T> slot) {
return TypedSlot(GetQType<T>(), slot.byte_offset());
}
template <class T>
absl::StatusOr<Slot<T>> ToSlot() const {
RETURN_IF_ERROR(VerifyType(typeid(T)));
return Slot<T>::UnsafeSlotFromOffset(byte_offset_);
}
template <class T>
Slot<T> UnsafeToSlot() const {
DCHECK(GetType()->type_info() == typeid(T));
return Slot<T>::UnsafeSlotFromOffset(byte_offset_);
}
int64_t SubSlotCount() const { return type_->type_fields().size(); }
TypedSlot SubSlot(int64_t index) const {
DCHECK_GE(index, 0);
DCHECK_LT(index, SubSlotCount());
const auto& field = type_->type_fields()[index];
return TypedSlot(field.GetType(), byte_offset_ + field.byte_offset());
}
QTypePtr GetType() const { return type_; }
size_t byte_offset() const { return byte_offset_; }
void CopyTo(ConstFramePtr source_frame, TypedSlot destination_slot,
FramePtr destination_frame) const {
DCHECK_EQ(type_, destination_slot.type_) << "Type mismatch";
source_frame.DCheckFieldType(byte_offset_, type_->type_info());
destination_frame.DCheckFieldType(destination_slot.byte_offset_,
destination_slot.type_->type_info());
type_->UnsafeCopy(
source_frame.GetRawPointer(byte_offset_),
destination_frame.GetRawPointer(destination_slot.byte_offset_));
}
void Reset(FramePtr frame) const {
frame.DCheckFieldType(byte_offset_, type_->type_info());
const auto& layout = type_->type_layout();
void* ptr = frame.GetRawPointer(byte_offset_);
layout.DestroyAlloc(ptr);
layout.InitializeAlignedAlloc(ptr);
}
friend bool operator==(const TypedSlot& a, const TypedSlot& b) {
return a.type_ == b.type_ && a.byte_offset_ == b.byte_offset_;
}
friend bool operator!=(const TypedSlot& a, const TypedSlot& b) {
return !(a == b);
}
template <typename H>
friend H AbslHashValue(H h, const TypedSlot& a) {
return H::combine(std::move(h), a.type_, a.byte_offset_);
}
friend std::ostream& operator<<(std::ostream& out, const TypedSlot& ts) {
return out << "TypedSlot<" << ts.GetType()->name() << ">@"
<< ts.byte_offset_;
}
template <typename... Ts>
static absl::StatusOr<std::tuple<FrameLayout::Slot<Ts>...>> ToSlots(
absl::Span<const TypedSlot> slots) {
if (slots.size() != sizeof...(Ts)) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("wrong number of slots: expected %d, got %d",
sizeof...(Ts), slots.size()));
}
return ToSlotsImpl<std::tuple<FrameLayout::Slot<Ts>...>>(
slots, std::index_sequence_for<Ts...>{});
}
private:
TypedSlot(const QType* type, size_t byte_offset)
: type_(type), byte_offset_(byte_offset) {}
absl::Status VerifyType(const std::type_info& tpe) const;
template <typename ResultTuple, std::size_t... is>
static absl::StatusOr<ResultTuple> ToSlotsImpl(
absl::Span<const TypedSlot> slots, std::index_sequence<is...>) {
(void)slots;
return LiftStatusUp(slots[is]
.ToSlot<typename std::tuple_element_t<
is, ResultTuple>::value_type>()...);
}
QTypePtr type_;
size_t byte_offset_;
};
template <typename... Slots>
std::array<TypedSlot, sizeof...(Slots)> ToTypedSlots(Slots... slots) {
return std::array<TypedSlot, sizeof...(Slots)>{TypedSlot::FromSlot(slots)...};
}
std::vector<QTypePtr> SlotsToTypes(absl::Span<const TypedSlot> slots);
absl::flat_hash_map<std::string, QTypePtr> SlotsToTypes(
const absl::flat_hash_map<std::string, TypedSlot>& slots);
inline TypedSlot AddSlot(QTypePtr type, FrameLayout::Builder* layout_builder) {
return TypedSlot::UnsafeFromOffset(
type, layout_builder->AddSubFrame(type->type_layout()).byte_offset());
}
std::vector<TypedSlot> AddSlots(absl::Span<const QTypePtr> types,
FrameLayout::Builder* layout_builder);
std::vector<std::pair<std::string, TypedSlot>> AddNamedSlots(
absl::Span<const std::pair<std::string, QTypePtr>> types,
FrameLayout::Builder* layout_builder);
absl::flat_hash_map<std::string, TypedSlot> AddSlotsMap(
const absl::flat_hash_map<std::string, QTypePtr>& types,
FrameLayout::Builder* layout_builder);
absl::Status RegisterUnsafeSlots(absl::Span<const TypedSlot> slots,
FrameLayout::Builder* layout_builder);
absl::Status RegisterUnsafeSlotsMap(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
FrameLayout::Builder* layout_builder);
absl::StatusOr<std::vector<std::optional<TypedSlot>>>
MaybeFindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots);
absl::StatusOr<std::vector<TypedSlot>> FindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots);
absl::Status VerifySlotTypes(
const absl::flat_hash_map<std::string, QTypePtr>& types,
const absl::flat_hash_map<std::string, TypedSlot>& slots,
bool verify_unwanted_slots = true, bool verify_missed_slots = true);
}
#endif
#include "arolla/qtype/typed_slot.h"
#include <algorithm>
#include <optional>
#include <string>
#include <typeinfo>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/demangle.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
std::string TypeMismatchError(absl::string_view name, QTypePtr expected_type,
QTypePtr actual_type) {
return absl::StrFormat("%s{expected:%s, actual:%s}", name,
expected_type->name(), actual_type->name());
}
absl::Status SlotTypesError(std::vector<std::string> missed_slots,
std::vector<std::string> type_mismatch,
std::vector<std::string> unwanted_slots) {
if (missed_slots.empty() && type_mismatch.empty() && unwanted_slots.empty()) {
return absl::OkStatus();
}
std::string msg = "slots/types match errors:";
if (!missed_slots.empty()) {
std::sort(missed_slots.begin(), missed_slots.end());
msg +=
absl::StrFormat("missed slots: %s;", absl::StrJoin(missed_slots, ","));
}
if (!type_mismatch.empty()) {
std::sort(type_mismatch.begin(), type_mismatch.end());
msg += absl::StrFormat("slot types mismatch: %s;",
absl::StrJoin(type_mismatch, ","));
}
if (!unwanted_slots.empty()) {
std::sort(unwanted_slots.begin(), unwanted_slots.end());
msg += absl::StrFormat("unwanted slots: %s;",
absl::StrJoin(unwanted_slots, ","));
}
return absl::FailedPreconditionError(msg);
}
}
std::vector<QTypePtr> SlotsToTypes(absl::Span<const TypedSlot> slots) {
std::vector<QTypePtr> types;
types.reserve(slots.size());
for (const auto& slot : slots) {
types.push_back(slot.GetType());
}
return types;
}
absl::Status TypedSlot::VerifyType(const std::type_info& tpe) const {
if (GetType()->type_info() != tpe) {
return absl::InvalidArgumentError(absl::StrFormat(
"slot type does not match C++ type: expected %s, got %s", TypeName(tpe),
TypeName(GetType()->type_info())));
}
return absl::OkStatus();
}
absl::flat_hash_map<std::string, QTypePtr> SlotsToTypes(
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
absl::flat_hash_map<std::string, QTypePtr> types;
types.reserve(slots.size());
for (const auto& kv : slots) {
types[kv.first] = kv.second.GetType();
}
return types;
}
std::vector<TypedSlot> AddSlots(absl::Span<const QTypePtr> types,
FrameLayout::Builder* layout_builder) {
std::vector<TypedSlot> slots;
slots.reserve(types.size());
for (const auto* type : types) {
slots.push_back(AddSlot(type, layout_builder));
}
return slots;
}
std::vector<std::pair<std::string, TypedSlot>> AddNamedSlots(
absl::Span<const std::pair<std::string, QTypePtr>> types,
FrameLayout::Builder* layout_builder) {
std::vector<std::pair<std::string, TypedSlot>> slots;
slots.reserve(types.size());
for (const auto& [name, type] : types) {
slots.emplace_back(name, AddSlot(type, layout_builder));
}
return slots;
}
absl::flat_hash_map<std::string, TypedSlot> AddSlotsMap(
const absl::flat_hash_map<std::string, const QType*>& types,
FrameLayout::Builder* layout_builder) {
absl::flat_hash_map<std::string, TypedSlot> slots;
slots.reserve(types.size());
for (const auto& name_type : types) {
slots.insert({name_type.first, AddSlot(name_type.second, layout_builder)});
}
return slots;
}
absl::Status RegisterUnsafeSlots(absl::Span<const TypedSlot> slots,
FrameLayout::Builder* layout_builder) {
for (const auto& slot : slots) {
RETURN_IF_ERROR(layout_builder->RegisterUnsafeSlot(
slot.byte_offset(), slot.GetType()->type_layout().AllocSize(),
slot.GetType()->type_info()));
}
return absl::OkStatus();
}
absl::Status RegisterUnsafeSlotsMap(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
FrameLayout::Builder* layout_builder) {
for (const auto& name_slot : slots) {
const auto& slot = name_slot.second;
RETURN_IF_ERROR(layout_builder->RegisterUnsafeSlot(
slot.byte_offset(), slot.GetType()->type_layout().AllocSize(),
slot.GetType()->type_info()));
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::optional<TypedSlot>>>
MaybeFindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
std::vector<std::string> type_mismatch;
std::vector<std::optional<TypedSlot>> res_slots;
res_slots.reserve(types_in_order.size());
for (const auto& [name, type] : types_in_order) {
auto it = slots.find(name);
if (it == slots.end()) {
res_slots.push_back(std::nullopt);
continue;
}
res_slots.push_back({it->second});
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
RETURN_IF_ERROR(SlotTypesError({}, std::move(type_mismatch),
{}));
return {std::move(res_slots)};
}
absl::StatusOr<std::vector<TypedSlot>> FindSlotsAndVerifyTypes(
absl::Span<const std::pair<std::string, QTypePtr>> types_in_order,
const absl::flat_hash_map<std::string, TypedSlot>& slots) {
std::vector<std::string> missed_slots;
std::vector<std::string> type_mismatch;
std::vector<TypedSlot> res_slots;
res_slots.reserve(types_in_order.size());
for (const auto& [name, type] : types_in_order) {
auto it = slots.find(name);
if (it == slots.end()) {
missed_slots.push_back(name);
continue;
}
res_slots.push_back({it->second});
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
RETURN_IF_ERROR(SlotTypesError(std::move(missed_slots),
std::move(type_mismatch),
{}));
return {std::move(res_slots)};
}
absl::Status VerifySlotTypes(
const absl::flat_hash_map<std::string, QTypePtr>& types,
const absl::flat_hash_map<std::string, TypedSlot>& slots,
bool verify_unwanted_slots, bool verify_missed_slots) {
std::vector<std::string> missed_slots;
std::vector<std::string> type_mismatch;
std::vector<std::string> unwanted_slots;
for (const auto& [name, type] : types) {
auto it = slots.find(name);
if (it == slots.end()) {
if (verify_missed_slots) {
missed_slots.push_back(name);
}
continue;
}
if (it->second.GetType() != type) {
type_mismatch.push_back(
TypeMismatchError(name, type, it->second.GetType()));
}
}
if (verify_unwanted_slots) {
for (const auto& [name, _] : slots) {
if (!types.contains(name)) {
unwanted_slots.push_back(name);
}
}
}
return SlotTypesError(std::move(missed_slots), std::move(type_mismatch),
std::move(unwanted_slots));
}
} | #include "arolla/qtype/typed_slot.h"
#include <cstdint>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
using ::testing::Pair;
using ::testing::StrEq;
using ::testing::UnorderedElementsAre;
TEST(TypedSlotTest, Copy) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<int>();
auto slot2 = layout_builder.AddSlot<float>();
auto typed_slot = TypedSlot::FromSlot(slot);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
auto typed_slot_copy = typed_slot;
EXPECT_EQ(typed_slot.GetType(), typed_slot_copy.GetType());
EXPECT_EQ(typed_slot, typed_slot_copy);
typed_slot_copy = typed_slot2;
EXPECT_EQ(typed_slot2.GetType(), typed_slot_copy.GetType());
EXPECT_EQ(typed_slot2, typed_slot_copy);
}
TEST(TypedSlotTest, PrimitiveTypes) {
FrameLayout::Builder layout_builder;
auto slot = layout_builder.AddSlot<int32_t>();
auto typed_slot = TypedSlot::FromSlot(slot);
EXPECT_EQ(typed_slot.GetType(), GetQType<int32_t>());
FrameLayout::Slot<int32_t> new_slot = typed_slot.ToSlot<int32_t>().value();
EXPECT_EQ(slot.byte_offset(), new_slot.byte_offset());
EXPECT_THAT(typed_slot.ToSlot<int64_t>().status(),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(TypedSlotTest, SlotsToTypes) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
auto typed_slot1 = TypedSlot::FromSlot(slot1);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
EXPECT_THAT(SlotsToTypes(std::vector<TypedSlot>{typed_slot1, typed_slot2}),
ElementsAre(GetQType<int32_t>(), GetQType<float>()));
EXPECT_THAT(SlotsToTypes(absl::flat_hash_map<std::string, TypedSlot>{
{"X", typed_slot1}, {"Y", typed_slot2}}),
UnorderedElementsAre(Pair("X", GetQType<int32_t>()),
Pair("Y", GetQType<float>())));
}
TEST(TypedSlotTest, UnsafeFromOffset) {
const QType* i32 = GetQType<int32_t>();
auto typed_slot = TypedSlot::UnsafeFromOffset(i32, 10);
EXPECT_EQ(typed_slot.byte_offset(), 10);
EXPECT_EQ(typed_slot.GetType(), i32);
}
TEST(TypedSlotTest, AddSlots) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
std::vector<TypedSlot> slots = AddSlots({i32, i64}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ(i32, slots[0].GetType());
EXPECT_EQ(i64, slots[1].GetType());
}
TEST(TypedSlotTest, AddNamedSlots) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
std::vector<std::pair<std::string, TypedSlot>> slots =
AddNamedSlots({{"c", i32}, {"b", i64}}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ("c", slots[0].first);
EXPECT_EQ(i32, slots[0].second.GetType());
EXPECT_EQ("b", slots[1].first);
EXPECT_EQ(i64, slots[1].second.GetType());
}
TEST(TypedSlotTest, AddSlotsMap) {
FrameLayout::Builder layout_builder;
const QType* i32 = GetQType<int32_t>();
const QType* i64 = GetQType<int64_t>();
absl::flat_hash_map<std::string, TypedSlot> slots =
AddSlotsMap({{"a", i32}, {"b", i64}}, &layout_builder);
ASSERT_EQ(slots.size(), 2);
EXPECT_EQ(i32, slots.at("a").GetType());
EXPECT_EQ(i64, slots.at("b").GetType());
}
TEST(TypedSlotTest, RegisterUnsafeSlots) {
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<int64_t>();
const QType* i32 = GetQType<int32_t>();
const QType* f32 = GetQType<float>();
auto slot_i32 = TypedSlot::UnsafeFromOffset(i32, 0);
auto slot_f32 = TypedSlot::UnsafeFromOffset(f32, 4);
ASSERT_OK(RegisterUnsafeSlots({slot_i32, slot_f32}, &layout_builder));
#ifndef NDEBUG
ASSERT_FALSE(RegisterUnsafeSlots({slot_i32}, &layout_builder).ok());
#endif
auto layout = std::move(layout_builder).Build();
layout.HasField(0, typeid(int32_t));
layout.HasField(4, typeid(float));
}
TEST(TypedSlotTest, RegisterUnsafeSlotsMap) {
FrameLayout::Builder layout_builder;
layout_builder.AddSlot<int64_t>();
const QType* i32 = GetQType<int32_t>();
const QType* f32 = GetQType<float>();
auto slot_i32 = TypedSlot::UnsafeFromOffset(i32, 0);
auto slot_f32 = TypedSlot::UnsafeFromOffset(f32, 4);
ASSERT_OK(RegisterUnsafeSlotsMap({{"a", slot_i32}, {"b", slot_f32}},
&layout_builder));
#ifndef NDEBUG
ASSERT_FALSE(RegisterUnsafeSlotsMap({{"a", slot_i32}}, &layout_builder).ok());
#endif
auto layout = std::move(layout_builder).Build();
layout.HasField(0, typeid(int32_t));
layout.HasField(4, typeid(float));
}
TEST(TypedSlotTest, GetSubslots) {
FrameLayout::Builder layout_builder;
auto opt_float_slot = layout_builder.AddSlot<OptionalValue<float>>();
auto opt_int32_slot = layout_builder.AddSlot<OptionalValue<int32_t>>();
auto float64_slot = layout_builder.AddSlot<double>();
FrameLayout layout = std::move(layout_builder).Build();
TypedSlot opt_float_tslot = TypedSlot::FromSlot(opt_float_slot);
TypedSlot opt_int32_tslot = TypedSlot::FromSlot(opt_int32_slot);
TypedSlot float64_tslot = TypedSlot::FromSlot(float64_slot);
EXPECT_EQ(opt_float_tslot.SubSlotCount(), 2);
EXPECT_EQ(opt_int32_tslot.SubSlotCount(), 2);
EXPECT_EQ(float64_tslot.SubSlotCount(), 0);
EXPECT_EQ(opt_float_tslot.SubSlot(0),
TypedSlot::FromSlot(opt_float_slot.GetSubslot<0>()));
EXPECT_EQ(opt_float_tslot.SubSlot(1),
TypedSlot::FromSlot(opt_float_slot.GetSubslot<1>()));
EXPECT_EQ(opt_int32_tslot.SubSlot(0),
TypedSlot::FromSlot(opt_int32_slot.GetSubslot<0>()));
EXPECT_EQ(opt_int32_tslot.SubSlot(1),
TypedSlot::FromSlot(opt_int32_slot.GetSubslot<1>()));
MemoryAllocation alloc_holder(&layout);
FramePtr frame = alloc_holder.frame();
frame.Set(opt_float_slot, OptionalValue<float>(1.0));
frame.Set(opt_int32_slot, OptionalValue<int32_t>());
auto float_present_slot = opt_float_tslot.SubSlot(0).ToSlot<bool>().value();
auto int32_present_slot = opt_int32_tslot.SubSlot(0).ToSlot<bool>().value();
EXPECT_EQ(frame.Get(float_present_slot), true);
EXPECT_EQ(frame.Get(int32_present_slot), false);
auto int32_value_slot = opt_int32_tslot.SubSlot(1).ToSlot<int32_t>().value();
frame.Set(int32_present_slot, true);
frame.Set(int32_value_slot, 2);
EXPECT_EQ(frame.Get(opt_int32_slot), OptionalValue<int32_t>(2));
}
TEST(TypedSlotTest, DebugPrintTypedSlot) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
auto slot3 = layout_builder.AddSlot<Bytes>();
auto typed_slot1 = TypedSlot::FromSlot(slot1);
auto typed_slot2 = TypedSlot::FromSlot(slot2);
auto typed_slot3 = TypedSlot::FromSlot(slot3);
std::stringstream buffer;
buffer << "typed_slot1 is: " << typed_slot1 << ", ";
buffer << "typed_slot2 is: " << typed_slot2 << ", ";
buffer << "typed_slot3 is: " << typed_slot3 << ".";
EXPECT_THAT(buffer.str(), StrEq("typed_slot1 is: TypedSlot<INT32>@0, "
"typed_slot2 is: TypedSlot<FLOAT32>@4, "
"typed_slot3 is: TypedSlot<BYTES>@8."));
}
TEST(TypedSlotTest, ToSlots) {
FrameLayout::Builder layout_builder;
auto slot1 = layout_builder.AddSlot<int32_t>();
auto slot2 = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto slots_tuple,
(TypedSlot::ToSlots<int32_t, float>(
{TypedSlot::FromSlot(slot1), TypedSlot::FromSlot(slot2)})));
EXPECT_THAT(std::get<0>(slots_tuple).byte_offset(), Eq(slot1.byte_offset()));
EXPECT_THAT(std::get<1>(slots_tuple).byte_offset(), Eq(slot2.byte_offset()));
EXPECT_THAT(TypedSlot::ToSlots<float>({TypedSlot::FromSlot(slot1)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match C++ type")));
EXPECT_THAT(
(TypedSlot::ToSlots<int32_t, float>({TypedSlot::FromSlot(slot1)})),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("wrong number of slots: expected 2, got 1")));
}
TEST(TypedSlotTest, MaybeFindSlotsAndVerifyTypesErrors) {
FrameLayout::Builder layout_builder;
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
MaybeFindSlotsAndVerifyTypes({{"a", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*a.*expected:INT32.*actual:FLOAT32.*")));
}
TEST(TypedSlotTest, MaybeFindSlotsAndVerifyTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
MaybeFindSlotsAndVerifyTypes(
{{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"b", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}),
IsOkAndHolds(ElementsAre(TypedSlot::FromSlot(int_slot), std::nullopt)));
}
TEST(TypedSlotTest, FindSlotsAndVerifyTypesErrors) {
FrameLayout::Builder layout_builder;
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_THAT(
FindSlotsAndVerifyTypes({{"NAME", GetQType<int>()}},
{{"NAME", TypedSlot::FromSlot(float_slot)}}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*NAME.*expected:INT32.*actual:FLOAT32.*")));
EXPECT_THAT(FindSlotsAndVerifyTypes({{"FAKE", GetQType<int>()}},
{{"b", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*FAKE.*")));
EXPECT_THAT(
FindSlotsAndVerifyTypes(
{{"NAME", GetQType<int>()}, {"FAKE", GetQType<int>()}},
{{"NAME", TypedSlot::FromSlot(float_slot)}}),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*FAKE.*slot types mismatch:.*NAME.*")));
}
TEST(TypedSlotTest, FindSlotsAndVerifyTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
auto int8_slot = layout_builder.AddSlot<int32_t>();
EXPECT_THAT(FindSlotsAndVerifyTypes(
{{"c", GetQType<float>()}, {"a", GetQType<int>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"b", TypedSlot::FromSlot(int8_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}),
IsOkAndHolds(ElementsAre(TypedSlot::FromSlot(float_slot),
TypedSlot::FromSlot(int_slot))));
}
TEST(TypedSlotTest, VerifySlotTypes) {
FrameLayout::Builder layout_builder;
auto int_slot = layout_builder.AddSlot<int>();
auto float_slot = layout_builder.AddSlot<float>();
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}}));
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"c", TypedSlot::FromSlot(float_slot)}},
true,
false));
EXPECT_OK(VerifySlotTypes({{"a", GetQType<int>()}},
{{"c", TypedSlot::FromSlot(float_slot)},
{"a", TypedSlot::FromSlot(int_slot)}},
false));
EXPECT_THAT(
VerifySlotTypes({{"a", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types "
"mismatch.*a.*expected:INT32.*actual:FLOAT32.*")));
EXPECT_THAT(
VerifySlotTypes({{"a", GetQType<int>()}, {"c", GetQType<float>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*c.*slot types mismatch:.*a.*")));
EXPECT_THAT(
VerifySlotTypes({{"d", GetQType<int>()}},
{{"a", TypedSlot::FromSlot(float_slot)}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*missed slots:.*d.*unwanted slots:.*a.*")));
}
}
} |
202 | #ifndef AROLLA_QTYPE_UNSPECIFIED_QTYPE_H_
#define AROLLA_QTYPE_UNSPECIFIED_QTYPE_H_
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
namespace arolla {
QTypePtr GetUnspecifiedQType();
const TypedValue& GetUnspecifiedQValue();
}
#endif
#include "arolla/qtype/unspecified_qtype.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/indestructible.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
struct Unspecified {};
class UnspecifiedQType final : public QType {
public:
UnspecifiedQType()
: QType(ConstructorArgs{.name = "UNSPECIFIED",
.type_info = typeid(Unspecified),
.type_layout = MakeTypeLayout<Unspecified>()}) {}
ReprToken UnsafeReprToken(const void* source) const override {
return ReprToken{"unspecified"};
}
void UnsafeCopy(const void* ,
void* ) const override {}
void UnsafeCombineToFingerprintHasher(
const void* , FingerprintHasher* hasher) const override {
hasher->Combine(absl::string_view("::arolla::UnspecifiedQValue"));
}
};
}
QTypePtr GetUnspecifiedQType() {
static const Indestructible<UnspecifiedQType> result;
return result.get();
}
const TypedValue& GetUnspecifiedQValue() {
static const Indestructible<TypedValue> result(
TypedValue::UnsafeFromTypeDefaultConstructed(GetUnspecifiedQType()));
return *result;
}
} | #include "arolla/qtype/unspecified_qtype.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
class UnspecifiedQTypeTest : public ::testing::Test {
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(UnspecifiedQTypeTest, UnspecifiedQType) {
const auto unspecified_qtype = GetUnspecifiedQType();
EXPECT_EQ(unspecified_qtype->name(), "UNSPECIFIED");
EXPECT_EQ(unspecified_qtype->type_layout().AllocSize(), 1);
EXPECT_EQ(unspecified_qtype->type_layout().AllocAlignment().value, 1);
EXPECT_TRUE(unspecified_qtype->type_fields().empty());
EXPECT_EQ(unspecified_qtype->value_qtype(), nullptr);
}
TEST_F(UnspecifiedQTypeTest, UnspecifiedQValue) {
const auto unspecified_qvalue = GetUnspecifiedQValue();
EXPECT_EQ(unspecified_qvalue.GetType(), GetUnspecifiedQType());
EXPECT_THAT(unspecified_qvalue.GenReprToken(), ReprTokenEq("unspecified"));
}
}
} |
203 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_ANDROID_HARDWARE_BUFFER_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_ANDROID_HARDWARE_BUFFER_H_
#include <stdint.h>
#ifdef __ANDROID__
#include <android/hardware_buffer.h>
#else
extern "C" {
typedef struct AHardwareBuffer AHardwareBuffer;
typedef struct AHardwareBuffer_Desc AHardwareBuffer_Desc;
struct AHardwareBuffer_Desc {
uint32_t width;
uint32_t height;
uint32_t layers;
uint32_t format;
uint64_t usage;
uint32_t stride;
uint32_t rfu0;
uint64_t rfu1;
};
}
#endif
namespace tflite::gpu {
class OptionalAndroidHardwareBuffer {
public:
static OptionalAndroidHardwareBuffer& Instance() {
static OptionalAndroidHardwareBuffer instance;
return instance;
}
bool Supported() { return supported_; }
int IsSupported(const AHardwareBuffer_Desc* description) {
return is_supported_(description);
}
int Allocate(const AHardwareBuffer_Desc* description,
AHardwareBuffer** buffer) {
return allocate_(description, buffer);
}
void Acquire(AHardwareBuffer* buffer) { return acquire_(buffer); }
void Release(AHardwareBuffer* buffer) { return release_(buffer); }
void Describe(AHardwareBuffer* buffer, AHardwareBuffer_Desc* desc) {
return describe_(buffer, desc);
}
private:
void* dlopen_handle_;
int (*is_supported_)(const AHardwareBuffer_Desc* desc);
int (*allocate_)(const AHardwareBuffer_Desc* desc, AHardwareBuffer** buffer);
void (*acquire_)(AHardwareBuffer* buffer);
void (*release_)(AHardwareBuffer* buffer);
void (*describe_)(AHardwareBuffer* buffer, AHardwareBuffer_Desc* desc);
bool supported_;
OptionalAndroidHardwareBuffer();
OptionalAndroidHardwareBuffer(const OptionalAndroidHardwareBuffer&) = delete;
~OptionalAndroidHardwareBuffer() = default;
};
}
#endif
#include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include <dlfcn.h>
namespace tflite::gpu {
OptionalAndroidHardwareBuffer::OptionalAndroidHardwareBuffer() {
#ifdef __ANDROID__
dlopen_handle_ = dlopen("libnativewindow.so", RTLD_NOW);
if (dlopen_handle_ == nullptr) {
supported_ = false;
return;
}
allocate_ = reinterpret_cast<decltype(allocate_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_allocate"));
acquire_ = reinterpret_cast<decltype(acquire_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_acquire"));
release_ = reinterpret_cast<decltype(release_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_release"));
describe_ = reinterpret_cast<decltype(describe_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_describe"));
is_supported_ = reinterpret_cast<decltype(is_supported_)>(
dlsym(dlopen_handle_, "AHardwareBuffer_isSupported"));
supported_ =
(allocate_ != nullptr && acquire_ != nullptr && release_ != nullptr &&
describe_ != nullptr && is_supported_ != nullptr);
#else
dlopen_handle_ = nullptr;
allocate_ = nullptr;
acquire_ = nullptr;
release_ = nullptr;
describe_ = nullptr;
is_supported_ = nullptr;
supported_ = false;
#endif
}
} | #include "tensorflow/lite/delegates/gpu/android_hardware_buffer.h"
#include <gtest/gtest.h>
using tflite::gpu::OptionalAndroidHardwareBuffer;
auto Instance = OptionalAndroidHardwareBuffer::Instance;
namespace {
#ifndef __ANDROID__
TEST(OptionalAndroidHardwareBufferTest, NotSupportedOnNonAndroid) {
EXPECT_EQ(Instance().Supported(), false);
}
#else
TEST(OptionalAndroidHardwareBufferTest, SupportedOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
}
TEST(OptionalAndroidHardwareBufferTest, CanAllocateAndReleaseOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
AHardwareBuffer* buffer;
AHardwareBuffer_Desc description{};
description.width = 1600;
description.height = 1;
description.layers = 1;
description.rfu0 = 0;
description.rfu1 = 0;
description.stride = 1;
description.format = AHARDWAREBUFFER_FORMAT_BLOB;
description.usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
EXPECT_TRUE(Instance().IsSupported(&description));
EXPECT_EQ(Instance().Allocate(&description, &buffer), 0);
Instance().Release(buffer);
}
TEST(OptionalAndroidHardwareBufferTest, CanAcquireAndReleaseOnAndroid) {
EXPECT_EQ(Instance().Supported(), true);
AHardwareBuffer* buffer;
AHardwareBuffer_Desc description{};
description.width = 1600;
description.height = 1;
description.layers = 1;
description.rfu0 = 0;
description.rfu1 = 0;
description.stride = 1;
description.format = AHARDWAREBUFFER_FORMAT_BLOB;
description.usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
EXPECT_TRUE(Instance().IsSupported(&description));
EXPECT_EQ(Instance().Allocate(&description, &buffer), 0);
Instance().Acquire(buffer);
Instance().Release(buffer);
Instance().Release(buffer);
}
#endif
} |
204 | #ifndef QUICHE_QUIC_TOOLS_SIMPLE_TICKET_CRYPTER_H_
#define QUICHE_QUIC_TOOLS_SIMPLE_TICKET_CRYPTER_H_
#include "openssl/aead.h"
#include "quiche/quic/core/crypto/proof_source.h"
#include "quiche/quic/core/quic_clock.h"
#include "quiche/quic/core/quic_time.h"
namespace quic {
class QUIC_NO_EXPORT SimpleTicketCrypter
: public quic::ProofSource::TicketCrypter {
public:
explicit SimpleTicketCrypter(QuicClock* clock);
~SimpleTicketCrypter() override;
size_t MaxOverhead() override;
std::vector<uint8_t> Encrypt(absl::string_view in,
absl::string_view encryption_key) override;
void Decrypt(
absl::string_view in,
std::shared_ptr<quic::ProofSource::DecryptCallback> callback) override;
private:
std::vector<uint8_t> Decrypt(absl::string_view in);
void MaybeRotateKeys();
static constexpr size_t kKeySize = 16;
struct Key {
uint8_t key[kKeySize];
bssl::ScopedEVP_AEAD_CTX aead_ctx;
QuicTime expiration = QuicTime::Zero();
};
std::unique_ptr<Key> NewKey();
std::unique_ptr<Key> current_key_;
std::unique_ptr<Key> previous_key_;
uint8_t key_epoch_ = 0;
QuicClock* clock_;
};
}
#endif
#include "quiche/quic/tools/simple_ticket_crypter.h"
#include <memory>
#include <utility>
#include <vector>
#include "openssl/aead.h"
#include "openssl/rand.h"
namespace quic {
namespace {
constexpr QuicTime::Delta kTicketKeyLifetime =
QuicTime::Delta::FromSeconds(60 * 60 * 24 * 7);
constexpr size_t kEpochSize = 1;
constexpr size_t kIVSize = 16;
constexpr size_t kAuthTagSize = 16;
constexpr size_t kIVOffset = kEpochSize;
constexpr size_t kMessageOffset = kIVOffset + kIVSize;
}
SimpleTicketCrypter::SimpleTicketCrypter(QuicClock* clock) : clock_(clock) {
RAND_bytes(&key_epoch_, 1);
current_key_ = NewKey();
}
SimpleTicketCrypter::~SimpleTicketCrypter() = default;
size_t SimpleTicketCrypter::MaxOverhead() {
return kEpochSize + kIVSize + kAuthTagSize;
}
std::vector<uint8_t> SimpleTicketCrypter::Encrypt(
absl::string_view in, absl::string_view encryption_key) {
QUICHE_DCHECK(encryption_key.empty());
MaybeRotateKeys();
std::vector<uint8_t> out(in.size() + MaxOverhead());
out[0] = key_epoch_;
RAND_bytes(out.data() + kIVOffset, kIVSize);
size_t out_len;
const EVP_AEAD_CTX* ctx = current_key_->aead_ctx.get();
if (!EVP_AEAD_CTX_seal(ctx, out.data() + kMessageOffset, &out_len,
out.size() - kMessageOffset, out.data() + kIVOffset,
kIVSize, reinterpret_cast<const uint8_t*>(in.data()),
in.size(), nullptr, 0)) {
return std::vector<uint8_t>();
}
out.resize(out_len + kMessageOffset);
return out;
}
std::vector<uint8_t> SimpleTicketCrypter::Decrypt(absl::string_view in) {
MaybeRotateKeys();
if (in.size() < kMessageOffset) {
return std::vector<uint8_t>();
}
const uint8_t* input = reinterpret_cast<const uint8_t*>(in.data());
std::vector<uint8_t> out(in.size() - kMessageOffset);
size_t out_len;
const EVP_AEAD_CTX* ctx = current_key_->aead_ctx.get();
if (input[0] != key_epoch_) {
if (input[0] == static_cast<uint8_t>(key_epoch_ - 1) && previous_key_) {
ctx = previous_key_->aead_ctx.get();
} else {
return std::vector<uint8_t>();
}
}
if (!EVP_AEAD_CTX_open(ctx, out.data(), &out_len, out.size(),
input + kIVOffset, kIVSize, input + kMessageOffset,
in.size() - kMessageOffset, nullptr, 0)) {
return std::vector<uint8_t>();
}
out.resize(out_len);
return out;
}
void SimpleTicketCrypter::Decrypt(
absl::string_view in,
std::shared_ptr<quic::ProofSource::DecryptCallback> callback) {
callback->Run(Decrypt(in));
}
void SimpleTicketCrypter::MaybeRotateKeys() {
QuicTime now = clock_->ApproximateNow();
if (current_key_->expiration < now) {
previous_key_ = std::move(current_key_);
current_key_ = NewKey();
key_epoch_++;
}
}
std::unique_ptr<SimpleTicketCrypter::Key> SimpleTicketCrypter::NewKey() {
auto key = std::make_unique<SimpleTicketCrypter::Key>();
RAND_bytes(key->key, kKeySize);
EVP_AEAD_CTX_init(key->aead_ctx.get(), EVP_aead_aes_128_gcm(), key->key,
kKeySize, EVP_AEAD_DEFAULT_TAG_LENGTH, nullptr);
key->expiration = clock_->ApproximateNow() + kTicketKeyLifetime;
return key;
}
} | #include "quiche/quic/tools/simple_ticket_crypter.h"
#include <memory>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
namespace test {
namespace {
constexpr QuicTime::Delta kOneDay = QuicTime::Delta::FromSeconds(60 * 60 * 24);
}
class DecryptCallback : public quic::ProofSource::DecryptCallback {
public:
explicit DecryptCallback(std::vector<uint8_t>* out) : out_(out) {}
void Run(std::vector<uint8_t> plaintext) override { *out_ = plaintext; }
private:
std::vector<uint8_t>* out_;
};
absl::string_view StringPiece(const std::vector<uint8_t>& in) {
return absl::string_view(reinterpret_cast<const char*>(in.data()), in.size());
}
class SimpleTicketCrypterTest : public QuicTest {
public:
SimpleTicketCrypterTest() : ticket_crypter_(&mock_clock_) {}
protected:
MockClock mock_clock_;
SimpleTicketCrypter ticket_crypter_;
};
TEST_F(SimpleTicketCrypterTest, EncryptDecrypt) {
std::vector<uint8_t> plaintext = {1, 2, 3, 4, 5};
std::vector<uint8_t> ciphertext =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_NE(plaintext, ciphertext);
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(StringPiece(ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_EQ(out_plaintext, plaintext);
}
TEST_F(SimpleTicketCrypterTest, CiphertextsDiffer) {
std::vector<uint8_t> plaintext = {1, 2, 3, 4, 5};
std::vector<uint8_t> ciphertext1 =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
std::vector<uint8_t> ciphertext2 =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_NE(ciphertext1, ciphertext2);
}
TEST_F(SimpleTicketCrypterTest, DecryptionFailureWithModifiedCiphertext) {
std::vector<uint8_t> plaintext = {1, 2, 3, 4, 5};
std::vector<uint8_t> ciphertext =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_NE(plaintext, ciphertext);
for (size_t i = 0; i < ciphertext.size(); i++) {
SCOPED_TRACE(i);
std::vector<uint8_t> munged_ciphertext = ciphertext;
munged_ciphertext[i] ^= 1;
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(StringPiece(munged_ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_TRUE(out_plaintext.empty());
}
}
TEST_F(SimpleTicketCrypterTest, DecryptionFailureWithEmptyCiphertext) {
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(absl::string_view(),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_TRUE(out_plaintext.empty());
}
TEST_F(SimpleTicketCrypterTest, KeyRotation) {
std::vector<uint8_t> plaintext = {1, 2, 3};
std::vector<uint8_t> ciphertext =
ticket_crypter_.Encrypt(StringPiece(plaintext), {});
EXPECT_FALSE(ciphertext.empty());
mock_clock_.AdvanceTime(kOneDay * 8);
std::vector<uint8_t> out_plaintext;
ticket_crypter_.Decrypt(StringPiece(ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_EQ(out_plaintext, plaintext);
mock_clock_.AdvanceTime(kOneDay * 8);
ticket_crypter_.Decrypt(StringPiece(ciphertext),
std::make_unique<DecryptCallback>(&out_plaintext));
EXPECT_TRUE(out_plaintext.empty());
}
}
} |
205 | #ifndef AROLLA_QTYPE_ANY_QTYPE_H_
#define AROLLA_QTYPE_ANY_QTYPE_H_
#include <any>
#include <cstdint>
#include <functional>
#include <type_traits>
#include <typeinfo>
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/fingerprint.h"
namespace arolla {
class Any {
public:
Any() = default;
Any(Any&&) = default;
Any(const Any&) = default;
Any& operator=(Any&&) = default;
Any& operator=(const Any&) = default;
template <typename T,
typename = std::enable_if_t<!std::is_same_v<Any, std::decay_t<T>>>>
explicit Any(T&& v) : value_(std::forward<T>(v)) {
absl::BitGen b;
fingerprint_val1_ = absl::Uniform<uint64_t>(b);
fingerprint_val2_ = absl::Uniform<uint64_t>(b);
}
template <typename T>
absl::StatusOr<std::reference_wrapper<const T>> As() const {
const T* v = std::any_cast<T>(&value_);
if (v) {
return *v;
} else {
return InvalidCast(typeid(T));
}
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
hasher->Combine(fingerprint_val1_, fingerprint_val2_);
}
private:
std::any value_;
uint64_t fingerprint_val1_, fingerprint_val2_;
absl::Status InvalidCast(const std::type_info& t) const;
};
AROLLA_DECLARE_SIMPLE_QTYPE(ANY, Any);
}
#endif
#include "arolla/qtype/any_qtype.h"
#include <typeinfo>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/demangle.h"
namespace arolla {
absl::Status Any::InvalidCast(const std::type_info& t) const {
if (value_.has_value()) {
return absl::FailedPreconditionError(absl::StrFormat(
"can not cast Any(%s) to %s", TypeName(value_.type()), TypeName(t)));
} else {
return absl::FailedPreconditionError("can not cast an empty ::arolla::Any");
}
}
AROLLA_DEFINE_SIMPLE_QTYPE(ANY, Any);
} | #include "arolla/qtype/any_qtype.h"
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
TEST(AnyQType, AnyConstructorRegression) {
Any any;
Any copy_1 = any;
Any copy_2(any);
Any copy_3 = std::move(any);
Any copy_4(std::move(copy_2));
}
TEST(AnyQType, Any) {
int v1 = 5;
std::string v2 = "string";
TypedValue tv1 = TypedValue::FromValue(Any(v1));
TypedValue tv2 = TypedValue::FromValue(Any(v2));
TypedValue tv3 = TypedValue::FromValue(Any());
ASSERT_OK_AND_ASSIGN(const Any& a1, tv1.As<Any>());
ASSERT_OK_AND_ASSIGN(const Any& a2, tv2.As<Any>());
ASSERT_OK_AND_ASSIGN(const Any& a3, tv3.As<Any>());
EXPECT_THAT(a1.As<int>(), IsOkAndHolds(v1));
EXPECT_THAT(a1.As<double>(), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast Any")));
ASSERT_OK_AND_ASSIGN(const std::string& v2_res, a2.As<std::string>());
EXPECT_EQ(v2, v2_res);
EXPECT_THAT(a2.As<double>(), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast Any")));
EXPECT_THAT(a3.As<double>(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("can not cast an empty ::arolla::Any")));
}
TEST(AnyQType, Fingerprint) {
Any a = Any(1);
Any b = Any(1);
Any a_copy = a;
EXPECT_NE(TypedValue::FromValue(a).GetFingerprint(),
TypedValue::FromValue(b).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(a).GetFingerprint(),
TypedValue::FromValue(a_copy).GetFingerprint());
}
}
} |
206 | #ifndef XLA_SERVICE_GPU_RUNTIME_KERNEL_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_KERNEL_THUNK_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
namespace xla {
namespace gpu {
class GpuExecutable;
class KernelThunk : public Thunk {
public:
KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim, int64_t shmem_bytes);
KernelThunk(const KernelThunk&) = delete;
KernelThunk& operator=(const KernelThunk&) = delete;
~KernelThunk() override = default;
std::string ToString(int indent) const override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const std::vector<BufferAllocation::Slice>& arguments() const {
return args_;
}
const std::vector<bool>& written() const { return written_; }
const std::string& kernel_name() const { return kernel_name_; }
const LaunchDimensions& launch_dimensions() const {
return launch_dimensions_;
}
int64_t shmem_bytes() const { return shmem_bytes_; }
private:
std::vector<BufferAllocation::Slice> args_;
std::vector<bool> written_;
const std::string kernel_name_;
const LaunchDimensions launch_dimensions_;
const std::optional<se::ClusterDim> cluster_dim_;
int64_t shmem_bytes_;
mutable absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>>
kernel_cache_ ABSL_GUARDED_BY(mutex_);
};
class CustomKernelThunk : public Thunk {
public:
CustomKernelThunk(const HloInstruction* inst, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments);
std::string ToString(int indent) const override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
const CustomKernel& custom_kernel() const { return custom_kernel_; }
const std::vector<BufferAllocation::Slice>& arguments() const {
return args_;
}
std::string_view custom_kernel_name() const { return custom_kernel_.name(); }
const std::vector<bool>& written() const { return written_; }
LaunchDimensions launch_dimensions() const {
return LaunchDimensions(custom_kernel_.block_dims(),
custom_kernel_.thread_dims());
}
int64_t shmem_bytes() const { return custom_kernel_.shared_memory_bytes(); }
private:
std::vector<BufferAllocation::Slice> args_;
std::vector<bool> written_;
CustomKernel custom_kernel_;
mutable absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>>
kernel_cache_ ABSL_GUARDED_BY(mutex_);
};
}
}
#endif
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
KernelThunk::KernelThunk(const HloInstruction* instr, std::string kernel_name,
absl::Span<const KernelArgument> kernel_arguments,
LaunchDimensions launch_dimensions,
std::optional<se::ClusterDim> cluster_dim,
int64_t shmem_bytes)
: Thunk(Kind::kKernel, Thunk::ThunkInfo::WithProfileAnnotation(instr)),
kernel_name_(std::move(kernel_name)),
launch_dimensions_(std::move(launch_dimensions)),
cluster_dim_(std::move(cluster_dim)),
shmem_bytes_(shmem_bytes) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string KernelThunk::ToString(int indent) const {
return absl::StrFormat(
", kernel = %s, launch dimensions = %s, cluster_dim = %s", kernel_name_,
launch_dimensions_.ToString(),
cluster_dim_.has_value() ? cluster_dim_->ToString() : "nullopt");
}
absl::Status KernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::Kernel> kernel,
CreateKernel(kernel_name_, args_.size(), params.src.text,
params.src.binary, params.executor, shmem_bytes_));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
static void PrintBufferContents(
se::Stream* stream, absl::Span<const se::DeviceMemoryBase> buffer_args) {
int input_idx = 0;
for (const se::DeviceMemoryBase& buf : buffer_args) {
auto host_buffer = std::make_unique<char[]>(buf.size());
CHECK_OK(stream->Memcpy(host_buffer.get(), buf, buf.size()));
CHECK_OK(stream->BlockHostUntilDone());
std::string buffer_contents;
for (int i = 0; i < buf.size(); i++) {
absl::StrAppendFormat(&buffer_contents, "%x ",
static_cast<unsigned>(host_buffer[i]));
}
VLOG(100) << "BUF(" << input_idx++ << ") = " << buffer_contents;
}
}
absl::Status KernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
LaunchDimensions launch_dimensions;
std::optional<se::ClusterDim> cluster_dim;
const se::Kernel* kernel = nullptr;
TF_ASSIGN_OR_RETURN(
se::Stream * stream,
GetStreamForExecution(Thunk::execution_stream_id(), params));
{
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(executor);
CHECK(it != kernel_cache_.end())
<< "Initialize() not called for StreamExecutor " << executor;
launch_dimensions = launch_dimensions_;
cluster_dim = cluster_dim_;
kernel = it->second.get();
}
VLOG(3) << "Launching " << kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(stream, buffer_args);
}
if (cluster_dim.has_value()) {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
cluster_dim.value(), stream);
} else {
return ExecuteKernelOnStream(*kernel, buffer_args, launch_dimensions,
stream);
}
}
CustomKernelThunk::CustomKernelThunk(
const HloInstruction* instr, CustomKernel custom_kernel,
absl::Span<const KernelArgument> kernel_arguments)
: Thunk(Kind::kCustomKernel,
Thunk::ThunkInfo::WithProfileAnnotation(instr)),
custom_kernel_(std::move(custom_kernel)) {
args_.reserve(kernel_arguments.size());
written_.reserve(kernel_arguments.size());
for (const auto& kernel_argument : kernel_arguments) {
if (!kernel_argument.first_with_same_slice().has_value()) {
args_.push_back(kernel_argument.slice());
written_.push_back(kernel_argument.written());
}
}
}
std::string CustomKernelThunk::ToString(int indent) const {
return custom_kernel_.ToString();
}
absl::Status CustomKernelThunk::Initialize(const InitializeParams& params) {
absl::MutexLock lock(&mutex_);
auto it = kernel_cache_.find(params.executor);
if (kernel_cache_.end() == it) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel,
se::KernelFactory::Create(
params.executor, custom_kernel_.kernel_spec()));
kernel_cache_.emplace(params.executor, std::move(kernel));
}
return absl::OkStatus();
}
absl::Status CustomKernelThunk::ExecuteOnStream(const ExecuteParams& params) {
se::StreamExecutor* executor = params.stream->parent();
const se::Kernel* kernel = [&] {
absl::MutexLock lock(&mutex_);
return kernel_cache_[executor].get();
}();
VLOG(3) << "Launching " << custom_kernel_.ToString() << " as device kernel "
<< kernel->name();
absl::InlinedVector<se::DeviceMemoryBase, 4> buffer_args;
for (const BufferAllocation::Slice& arg : args_) {
se::DeviceMemoryBase buf = params.buffer_allocations->GetDeviceAddress(arg);
VLOG(3) << " Arg: alloc #" << arg.index() << ", offset: " << arg.offset()
<< ": " << buf.opaque() << " (" << buf.size() << "B)";
buffer_args.push_back(buf);
}
if (VLOG_IS_ON(100)) {
PrintBufferContents(params.stream, buffer_args);
}
se::KernelArgsDeviceMemoryArray args(buffer_args,
custom_kernel_.shared_memory_bytes());
if (auto cluster = custom_kernel_.cluster_dims(); cluster.has_value()) {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *cluster, *kernel,
args);
} else {
return params.stream->Launch(custom_kernel_.thread_dims(),
custom_kernel_.block_dims(), *kernel, args);
}
}
}
} | #include "xla/service/cpu/runtime/kernel_thunk.h"
#include <cstddef>
#include <cstdint>
#include <string_view>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/cpu/runtime/buffer_allocations.h"
#include "xla/service/cpu/runtime/thunk.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/host/host_kernel_c_api.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class AddF32HostKernels : public Thunk::HostKernels {
public:
absl::StatusOr<SE_HOST_Kernel*> Find(std::string_view name) override {
return +[](const SE_HOST_KernelCallFrame* call_frame) {
const SE_HOST_KernelArg& in = call_frame->args[0];
const SE_HOST_KernelArg& out = call_frame->args[1];
float* in_ptr = reinterpret_cast<float*>(in.data);
float* out_ptr = reinterpret_cast<float*>(out.data);
uint64_t i = call_frame->thread->x;
*(out_ptr + i) = *(in_ptr + i) + *(in_ptr + i);
return static_cast<SE_HOST_KernelError*>(nullptr);
};
}
};
TEST(KernelThunkTest, CheckAlignment) {
auto thunk = KernelThunk::Create({"test"}, {}, {}, "test", se::ThreadDim(),
3);
EXPECT_TRUE(absl::StrContains(thunk.status().message(),
"minimum alignment 3 is not a power of 2"));
}
TEST(KernelThunkTest, AddF32) {
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<float> in = {1.0, 2.0, 3.0, 4.0};
std::vector<float> out(4, 0.0);
size_t size_in_bytes = in.size() * sizeof(float);
buffers.emplace_back(se::DeviceMemoryBase(in.data(), size_in_bytes));
buffers.emplace_back(se::DeviceMemoryBase(out.data(), size_in_bytes));
BufferAllocations allocations(buffers);
BufferAllocation in_alloc(0, size_in_bytes, 0);
BufferAllocation out_alloc(1, size_in_bytes, 0);
BufferAllocation::Slice in_slice(&in_alloc, 0, size_in_bytes);
BufferAllocation::Slice out_slice(&out_alloc, 0, size_in_bytes);
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, KernelThunk::Create({"add_f32"}, {in_slice}, {out_slice},
"add_f32", se::ThreadDim(4)));
AddF32HostKernels host_kernels;
Thunk::ExecuteParams params = {&host_kernels, &allocations};
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
std::vector<float> expected = {2.0, 4.0, 6.0, 8.0};
EXPECT_EQ(out, expected);
}
}
} |
207 | #ifndef TENSORFLOW_TSL_PROFILER_UTILS_PARSE_ANNOTATION_H_
#define TENSORFLOW_TSL_PROFILER_UTILS_PARSE_ANNOTATION_H_
#include <vector>
#include "absl/strings/string_view.h"
namespace tsl {
namespace profiler {
struct Annotation {
absl::string_view name;
struct Metadata {
absl::string_view key;
absl::string_view value;
};
std::vector<Metadata> metadata;
};
Annotation ParseAnnotation(absl::string_view annotation);
inline bool HasMetadata(absl::string_view annotation) {
constexpr char kUserMetadataMarker = '#';
return !annotation.empty() && annotation.back() == kUserMetadataMarker;
}
std::vector<Annotation> ParseAnnotationStack(
absl::string_view annotation_stack);
}
}
#endif
#include "tsl/profiler/utils/parse_annotation.h"
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/ascii.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
namespace tsl {
namespace profiler {
namespace {
std::vector<absl::string_view> SplitNameAndMetadata(
absl::string_view annotation) {
std::vector<absl::string_view> parts;
if (!HasMetadata(annotation)) {
parts.emplace_back(annotation);
} else {
annotation.remove_suffix(1);
parts = absl::StrSplit(annotation, '#');
if (parts.size() > 2) {
parts.resize(2);
}
}
while (parts.size() < 2) {
parts.emplace_back();
}
return parts;
}
std::vector<absl::string_view> SplitPairs(absl::string_view metadata) {
std::vector<absl::string_view> key_value_pairs;
std::stack<char> quotes;
size_t start = 0, end = 0;
for (; end < metadata.size(); ++end) {
char ch = metadata[end];
switch (ch) {
case '\"':
case '\'':
if (quotes.empty() || quotes.top() != ch) {
quotes.push(ch);
} else {
quotes.pop();
}
break;
case '{':
case '(':
case '[':
quotes.push(ch);
break;
case '}':
if (!quotes.empty() && quotes.top() == '{') {
quotes.pop();
}
break;
case ')':
if (!quotes.empty() && quotes.top() == '(') {
quotes.pop();
}
break;
case ']':
if (!quotes.empty() && quotes.top() == '[') {
quotes.pop();
}
break;
case ',':
if (quotes.empty()) {
if (end - start > 1) {
key_value_pairs.emplace_back(metadata.data() + start, end - start);
}
start = end + 1;
}
break;
}
}
if (end - start > 1) {
key_value_pairs.emplace_back(metadata.data() + start, end - start);
}
return key_value_pairs;
}
std::vector<std::pair<absl::string_view, absl::string_view>> ParseMetadata(
absl::string_view metadata) {
std::vector<std::pair<absl::string_view, absl::string_view>> key_values;
for (absl::string_view pair : SplitPairs(metadata)) {
std::vector<absl::string_view> parts =
absl::StrSplit(pair, absl::MaxSplits('=', 1));
if (parts.size() == 2) {
absl::string_view key = absl::StripAsciiWhitespace(parts[0]);
absl::string_view value = absl::StripAsciiWhitespace(parts[1]);
if (!key.empty() && !value.empty()) {
key_values.push_back({key, value});
}
}
}
return key_values;
}
}
Annotation ParseAnnotation(absl::string_view annotation) {
Annotation result;
std::vector<absl::string_view> parts = SplitNameAndMetadata(annotation);
if (!parts.empty()) {
result.name = absl::StripAsciiWhitespace(parts[0]);
for (const auto& key_value : ParseMetadata(parts[1])) {
result.metadata.push_back({key_value.first, key_value.second});
}
}
return result;
}
std::vector<Annotation> ParseAnnotationStack(
absl::string_view annotation_stack) {
std::vector<Annotation> annotations;
const std::string kAnnotationDelimiter = "::";
for (absl::string_view annotation : absl::StrSplit(
annotation_stack, kAnnotationDelimiter, absl::SkipEmpty())) {
annotations.emplace_back(ParseAnnotation(annotation));
}
return annotations;
}
}
} | #include "tsl/profiler/utils/parse_annotation.h"
#include <vector>
#include "absl/strings/string_view.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ParseAnnotationStackTest, EmptyAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("");
ASSERT_TRUE(annotations.empty());
}
TEST(ParseAnnotationStackTest, SingleAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("name");
ASSERT_FALSE(annotations.empty());
EXPECT_EQ(annotations.back().name, "name");
EXPECT_TRUE(annotations.back().metadata.empty());
}
TEST(ParseAnnotationStackTest, MultiLevelAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("outer::inner");
ASSERT_EQ(annotations.size(), 2);
EXPECT_EQ(annotations.front().name, "outer");
EXPECT_TRUE(annotations.front().metadata.empty());
EXPECT_EQ(annotations.back().name, "inner");
EXPECT_TRUE(annotations.back().metadata.empty());
}
TEST(ParseAnnotationTest, EmptyAnnotationTest) {
Annotation annotation = ParseAnnotation("");
EXPECT_TRUE(annotation.name.empty());
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SimpleNameTest) {
Annotation annotation = ParseAnnotation("name");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SimpleNameWithWhitespaceTest) {
Annotation annotation = ParseAnnotation("name ");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, EmptyMetadataTest) {
Annotation annotation = ParseAnnotation("name#");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
annotation = ParseAnnotation("name1##");
EXPECT_EQ(annotation.name, "name1");
EXPECT_TRUE(annotation.metadata.empty());
annotation = ParseAnnotation("name2###");
EXPECT_EQ(annotation.name, "name2");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SingleMetadataTest) {
Annotation annotation = ParseAnnotation("name#key=value#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 1);
EXPECT_EQ(annotation.metadata.at(0).key, "key");
EXPECT_EQ(annotation.metadata.at(0).value, "value");
}
TEST(ParseAnnotationTest, MultipleMetadataTest) {
Annotation annotation = ParseAnnotation("name#k1=v1,k2=v2,k3=v3#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 3);
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "v2");
EXPECT_EQ(annotation.metadata.at(2).key, "k3");
EXPECT_EQ(annotation.metadata.at(2).value, "v3");
}
TEST(ParseAnnotationTest, MultipleMetadataWithWhitespaceTest) {
Annotation annotation = ParseAnnotation("name # k1 = v1, ,k2=v2 #");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 2);
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "v2");
}
TEST(ParseAnnotationTest, KeyValueSeparatorTest) {
Annotation annotation = ParseAnnotation("name#=v1,k2=,k3==v3,k4=v4=#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 2);
EXPECT_EQ(annotation.metadata.at(0).key, "k3");
EXPECT_EQ(annotation.metadata.at(0).value, "=v3");
EXPECT_EQ(annotation.metadata.at(1).key, "k4");
EXPECT_EQ(annotation.metadata.at(1).value, "v4=");
}
TEST(ParseAnnotationTest, ExtraMetadataSeparatorTest) {
Annotation annotation = ParseAnnotation("name##k1=v1#");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, QuotedMetadata) {
Annotation annotation = ParseAnnotation(
"name#k1=(v11,v12),k2=[v21,v22,v23],k3={v31,v32}, k4=\"v41,v42\","
"(k51,k52)='v51,v52'#");
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "(v11,v12)");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "[v21,v22,v23]");
EXPECT_EQ(annotation.metadata.at(2).key, "k3");
EXPECT_EQ(annotation.metadata.at(2).value, "{v31,v32}");
EXPECT_EQ(annotation.metadata.at(3).key, "k4");
EXPECT_EQ(annotation.metadata.at(3).value, "\"v41,v42\"");
EXPECT_EQ(annotation.metadata.at(4).key, "(k51,k52)");
EXPECT_EQ(annotation.metadata.at(4).value, "'v51,v52'");
}
TEST(ParseAnnotationTest, UnmatchedQuotedMetadata) {
Annotation annotation = ParseAnnotation("name#k1=v1,k2=(v2,k3=v3#");
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "(v2,k3=v3");
}
}
}
} |
208 | #ifndef TENSORSTORE_INTERNAL_JSON_GTEST_H_
#define TENSORSTORE_INTERNAL_JSON_GTEST_H_
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <nlohmann/json.hpp>
namespace nlohmann {
inline void PrintTo(json const& j, std::ostream* os) { *os << j.dump(); }
}
namespace tensorstore {
::testing::Matcher<::nlohmann::json> MatchesJson(::nlohmann::json j);
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer,
::testing::Matcher<::nlohmann::json> value_matcher);
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer, ::nlohmann::json value_matcher);
::testing::Matcher<::nlohmann::json> JsonSubValuesMatch(
std::vector<std::pair<std::string, ::nlohmann::json>> matchers);
}
#endif
#include "tensorstore/internal/json_gtest.h"
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/same.h"
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/quote_string.h"
namespace tensorstore {
namespace {
class JsonMatcherImpl : public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonMatcherImpl(::nlohmann::json value) : value_(std::move(value)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
if (!internal_json::JsonSame(value_, value_untyped)) {
if (listener->IsInterested()) {
*listener << "where the difference is:\n"
<< ::nlohmann::json::diff(value_, value_untyped).dump(2);
}
return false;
}
return true;
}
void DescribeTo(std::ostream* os) const override {
*os << "matches json " << value_;
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not match json " << value_;
}
private:
::nlohmann::json value_;
};
}
::testing::Matcher<::nlohmann::json> MatchesJson(::nlohmann::json j) {
return ::testing::MakeMatcher(new JsonMatcherImpl(std::move(j)));
}
namespace {
class JsonPointerMatcherImpl
: public ::testing::MatcherInterface<::nlohmann::json> {
public:
JsonPointerMatcherImpl(std::string sub_value_pointer,
::testing::Matcher<::nlohmann::json> sub_value_matcher)
: sub_value_pointer_(std::move(sub_value_pointer)),
sub_value_matcher_(std::move(sub_value_matcher)) {}
bool MatchAndExplain(
::nlohmann::json value_untyped,
::testing::MatchResultListener* listener) const override {
auto sub_value =
json_pointer::Dereference(value_untyped, sub_value_pointer_);
if (!sub_value.ok()) {
if (listener->IsInterested()) {
*listener << "where the pointer could not be resolved: "
<< sub_value.status();
}
return false;
}
if (listener->IsInterested()) {
::testing::StringMatchResultListener s;
if (!sub_value_matcher_.MatchAndExplain(**sub_value, &s)) {
*listener << "whose sub value doesn't match";
auto str = s.str();
if (!str.empty()) {
*listener << ", " << str;
}
return false;
}
return true;
}
return sub_value_matcher_.Matches(**sub_value);
}
void DescribeTo(std::ostream* os) const override {
*os << "has sub value " << tensorstore::QuoteString(sub_value_pointer_)
<< " that ";
sub_value_matcher_.DescribeTo(os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "does not have sub value "
<< tensorstore::QuoteString(sub_value_pointer_) << " that ";
sub_value_matcher_.DescribeTo(os);
}
private:
std::string sub_value_pointer_;
::testing::Matcher<nlohmann::json> sub_value_matcher_;
};
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer,
::testing::Matcher<::nlohmann::json> value_matcher) {
return ::testing::MakeMatcher(new JsonPointerMatcherImpl(
std::move(json_pointer), std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValueMatches(
std::string json_pointer, ::nlohmann::json value_matcher) {
return JsonSubValueMatches(std::move(json_pointer),
MatchesJson(std::move(value_matcher)));
}
::testing::Matcher<::nlohmann::json> JsonSubValuesMatch(
std::vector<std::pair<std::string, ::nlohmann::json>> matchers) {
std::vector<::testing::Matcher<::nlohmann::json>> all;
all.reserve(matchers.size());
for (const auto& p : matchers) {
all.push_back(JsonSubValueMatches(p.first, p.second));
}
return ::testing::AllOfArray(all);
}
} | #include "tensorstore/internal/json_gtest.h"
#include <sstream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
namespace {
using ::tensorstore::JsonSubValueMatches;
using ::tensorstore::JsonSubValuesMatch;
using ::tensorstore::MatchesJson;
template <typename MatcherType>
std::string Describe(const MatcherType& m) {
std::ostringstream ss;
m.DescribeTo(&ss);
return ss.str();
}
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(JsonSubValueMatchesTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValueMatches("/a", 123));
EXPECT_THAT(obj, JsonSubValueMatches("/b/c", "xyz"));
EXPECT_THAT(obj,
JsonSubValueMatches("/b/c", ::testing::Not(MatchesJson("xy"))));
EXPECT_THAT(Describe(JsonSubValueMatches("/a", 123)),
"has sub value \"/a\" that matches json 123");
EXPECT_THAT(Explain(JsonSubValueMatches("/a", 124), obj),
::testing::StartsWith(
"whose sub value doesn't match, where the difference is:"));
}
TEST(JsonSubValuesMatchTest, Example) {
::nlohmann::json obj{{"a", 123}, {"b", {{"c", "xyz"}}}};
EXPECT_THAT(obj, JsonSubValuesMatch({{"/a", 123}, {"/b/c", "xyz"}}));
}
} |
209 | #ifndef TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_TRANSFORM_GRAPH_H_
#define TENSORFLOW_TOOLS_GRAPH_TRANSFORMS_TRANSFORM_GRAPH_H_
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
int ParseFlagsAndTransformGraph(int argc, char* argv[], bool init_main);
typedef std::vector<std::pair<string, TransformFuncParameters>>
TransformParameters;
Status ParseTransformParameters(const string& transforms_string,
TransformParameters* params_list);
Status TransformGraph(const std::vector<string>& inputs,
const std::vector<string>& outputs,
const TransformParameters& transform_params,
GraphDef* graph_def);
}
}
#endif
#include "tensorflow/tools/graph_transforms/transform_graph.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/tools/graph_transforms/file_utils.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
#if !defined(PLATFORM_WINDOWS)
#include <pwd.h>
#include <unistd.h>
#endif
namespace tensorflow {
namespace graph_transforms {
using tensorflow::strings::Scanner;
Status ParseTransformParameters(const string& transforms_string,
TransformParameters* params_list) {
params_list->clear();
enum {
TRANSFORM_NAME,
TRANSFORM_PARAM_NAME,
TRANSFORM_PARAM_VALUE,
} state = TRANSFORM_NAME;
StringPiece remaining(transforms_string);
StringPiece match;
StringPiece transform_name;
StringPiece parameter_name;
StringPiece parameter_value;
TransformFuncParameters func_parameters;
while (!remaining.empty()) {
if (state == TRANSFORM_NAME) {
func_parameters.clear();
Scanner(remaining).AnySpace().GetResult(&remaining, &match);
if (remaining.empty()) {
return OkStatus();
}
const bool found_transform_name =
Scanner(remaining)
.Many(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &transform_name);
if (!found_transform_name) {
return errors::InvalidArgument("Looking for transform name, but found ",
string(remaining).c_str());
}
if (Scanner(remaining).OneLiteral("(").GetResult(&remaining, &match)) {
state = TRANSFORM_PARAM_NAME;
} else {
params_list->push_back({string(transform_name), func_parameters});
transform_name = "";
state = TRANSFORM_NAME;
}
} else if (state == TRANSFORM_PARAM_NAME) {
if (Scanner(remaining).OneLiteral(")").GetResult(&remaining, &match)) {
params_list->push_back({string(transform_name), func_parameters});
transform_name = "";
state = TRANSFORM_NAME;
} else {
Scanner(remaining).ZeroOrOneLiteral(",").GetResult(&remaining, &match);
Scanner(remaining).AnySpace().GetResult(&remaining, &match);
const bool found_parameter_name =
Scanner(remaining)
.Many(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, ¶meter_name);
if (!found_parameter_name) {
return errors::InvalidArgument(
"Looking for parameter name, but found ",
string(remaining).c_str());
}
if (Scanner(remaining).OneLiteral("=").GetResult(&remaining, &match)) {
state = TRANSFORM_PARAM_VALUE;
} else {
return errors::InvalidArgument("Looking for =, but found ",
string(remaining).c_str());
}
}
} else if (state == TRANSFORM_PARAM_VALUE) {
bool found_parameter_value;
if (Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match)) {
found_parameter_value =
Scanner(remaining).ScanEscapedUntil('"').GetResult(
&remaining, ¶meter_value);
if (found_parameter_value) {
Scanner(remaining).OneLiteral("\"").GetResult(&remaining, &match);
}
} else {
found_parameter_value =
Scanner(remaining)
.Many(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE)
.GetResult(&remaining, ¶meter_value);
}
if (!found_parameter_value) {
return errors::InvalidArgument("Looking for parameter name, but found ",
string(remaining).c_str());
}
func_parameters[string(parameter_name)].emplace_back(parameter_value);
Scanner(remaining).ZeroOrOneLiteral("\"").GetResult(&remaining, &match);
Scanner(remaining).ZeroOrOneLiteral("'").GetResult(&remaining, &match);
state = TRANSFORM_PARAM_NAME;
}
}
return OkStatus();
}
std::string ExpandPath(const std::string& path_string) {
#if defined(PLATFORM_WINDOWS)
return path_string;
#else
if (path_string.empty() || path_string[0] != '~') {
return path_string;
}
const char* home = nullptr;
std::string::size_type prefix = path_string.find_first_of('/');
if (path_string.length() == 1 || prefix == 1) {
home = getenv("HOME");
if (!home) {
struct passwd* pw = getpwuid(getuid());
if (pw) {
home = pw->pw_dir;
}
}
} else {
std::string user(path_string, 1, (prefix == std::string::npos)
? std::string::npos
: prefix - 1);
struct passwd* pw = getpwnam(user.c_str());
if (pw) {
home = pw->pw_dir;
}
}
if (!home) {
return path_string;
}
string path(home);
if (prefix == std::string::npos) {
return path;
}
if (path.length() == 0 || path[path.length() - 1] != '/') {
path += '/';
}
path += path_string.substr(prefix + 1);
return path;
#endif
}
int ParseFlagsAndTransformGraph(int argc, char* argv[], bool init_main) {
string in_graph_string = "";
string out_graph_string = "";
string inputs_string = "";
string outputs_string = "";
string transforms_string = "";
bool output_as_text = false;
std::vector<Flag> flag_list = {
Flag("in_graph", &in_graph_string, "input graph file name"),
Flag("out_graph", &out_graph_string, "output graph file name"),
Flag("inputs", &inputs_string, "inputs"),
Flag("outputs", &outputs_string, "outputs"),
Flag("transforms", &transforms_string, "list of transforms"),
Flag("output_as_text", &output_as_text,
"whether to write the graph in text protobuf format"),
};
string usage = Flags::Usage(argv[0], flag_list);
usage += "\nTransforms are:\n";
TransformRegistry* transform_registry = GetTransformRegistry();
for (const auto& pair : *transform_registry) {
usage += pair.first + "\n";
}
const bool parse_result = Flags::Parse(&argc, argv, flag_list);
if (init_main) {
port::InitMain(argv[0], &argc, &argv);
}
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << ".\n" << usage;
return -1;
}
if (in_graph_string.empty()) {
LOG(ERROR) << "in_graph graph can't be empty.\n" << usage;
return -1;
}
if (out_graph_string.empty()) {
LOG(ERROR) << "out_graph graph can't be empty.\n" << usage;
return -1;
}
if (transforms_string.empty()) {
LOG(ERROR) << "You must specify at least one transform.\n" << usage;
return -1;
}
string in_graph = ExpandPath(in_graph_string);
string out_graph = ExpandPath(out_graph_string);
std::vector<string> inputs = str_util::Split(inputs_string, ',');
std::vector<string> outputs = str_util::Split(outputs_string, ',');
TransformParameters transform_params;
Status parse_status =
ParseTransformParameters(transforms_string, &transform_params);
if (!parse_status.ok()) {
LOG(ERROR) << "Failed to parse --transform argument, error was "
<< parse_status.message();
return -1;
}
if (transform_params.empty()) {
LOG(ERROR) << "You must specify at least one transform.\n" << usage;
return -1;
}
GraphDef graph_def;
Status load_status = LoadTextOrBinaryGraphFile(in_graph, &graph_def);
if (!load_status.ok()) {
LOG(ERROR) << "Loading graph '" << in_graph_string << "' failed with "
<< load_status.message();
LOG(ERROR) << usage;
return -1;
}
Status transform_result =
TransformGraph(inputs, outputs, transform_params, &graph_def);
if (!transform_result.ok()) {
LOG(ERROR) << transform_result.message();
LOG(ERROR) << usage;
return -1;
}
Status save_status;
if (output_as_text) {
save_status = WriteTextProto(Env::Default(), out_graph, graph_def);
} else {
save_status = WriteBinaryProto(Env::Default(), out_graph, graph_def);
}
if (!save_status.ok()) {
LOG(ERROR) << "Saving graph '" << out_graph_string << "' failed with "
<< save_status.message();
return -1;
}
return 0;
}
Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params,
bool* ignore_errors) {
*ignore_errors = false;
if (transform_params.count("ignore_errors") &&
(!transform_params.at("ignore_errors").empty())) {
const string& ignore_errors_string =
absl::AsciiStrToLower(transform_params.at("ignore_errors").at(0));
if (ignore_errors_string == "true") {
*ignore_errors = true;
} else if (ignore_errors_string == "false") {
*ignore_errors = false;
} else {
return errors::InvalidArgument(
"ignore_errors should be true or false, found ",
ignore_errors_string);
}
}
return OkStatus();
}
Status TransformGraph(const std::vector<string>& inputs,
const std::vector<string>& outputs,
const TransformParameters& transform_params,
GraphDef* graph_def) {
TransformRegistry* transform_registry = GetTransformRegistry();
for (const auto& transform_info : transform_params) {
const string& transform_name = transform_info.first;
if (transform_name.empty()) {
continue;
}
if (!transform_registry->count(transform_name)) {
return errors::InvalidArgument("Transform '", transform_name,
"' not recognized.");
}
LOG(INFO) << "Applying " << transform_name;
const TransformFunc& transform_func =
transform_registry->at(transform_name);
TransformFuncContext context;
context.input_names = inputs;
context.output_names = outputs;
context.params = transform_info.second;
bool ignore_errors;
TF_RETURN_IF_ERROR(
ShouldIgnoreErrors(transform_info.second, &ignore_errors));
GraphDef transformed_graph_def;
Status transform_result =
transform_func(*graph_def, context, &transformed_graph_def);
if (!transform_result.ok()) {
if (ignore_errors) {
LOG(ERROR) << transform_name << ": Ignoring error "
<< transform_result.message();
transformed_graph_def = *graph_def;
} else {
return transform_result;
}
}
*transformed_graph_def.mutable_library() = graph_def->library();
TF_RETURN_IF_ERROR(IsGraphValid(transformed_graph_def));
*graph_def = transformed_graph_def;
}
return OkStatus();
}
}
} | #include "tensorflow/tools/graph_transforms/transform_graph.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status ShouldIgnoreErrors(const TransformFuncParameters& transform_params,
bool* ignore_errors);
namespace {
Status test_empty_graph_transform(const GraphDef& graph_def,
const TransformFuncContext& context,
GraphDef* result) {
result->Clear();
return OkStatus();
}
}
REGISTER_GRAPH_TRANSFORM("test_empty_graph_transform",
test_empty_graph_transform);
class TransformGraphTest : public ::testing::Test {
protected:
void TestConstantFolding() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
const int width = 100;
Tensor a_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&a_data, 1.0f);
Output a_const =
Const(root.WithOpName("a_expect_removed"), Input::Initializer(a_data));
Tensor b_data(DT_FLOAT, TensorShape({width}));
test::FillIota<float>(&b_data, 1.0f);
Output b_const =
Const(root.WithOpName("b_expect_removed"), Input::Initializer(b_data));
Output add = Add(root.WithOpName("add_expect_removed"), a_const, b_const);
Output placeholder =
Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT);
Output mul =
Mul(root.WithOpName("output_expect_remains"), add, placeholder);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
string graph_def_serialized;
graph_def.SerializeToString(&graph_def_serialized);
const string dir = testing::TmpDir();
const string in_filename_pb = io::JoinPath(dir, "in_graphdef.pb");
const string out_filename_pb = io::JoinPath(dir, "out_graphdef.pb");
TF_ASSERT_OK(WriteStringToFile(Env::Default(), in_filename_pb,
graph_def_serialized));
std::vector<string> args = {"some_binary",
"--in_graph=" + in_filename_pb,
"--out_graph=" + out_filename_pb,
"--inputs=placeholder_expect_remains",
"--outputs=output_expect_remains",
"--transforms=fold_constants"};
const int argc = 6;
EXPECT_EQ(argc, args.size());
char* argv[argc];
std::vector<char*> char_strings;
for (int i = 0; i < argc; ++i) {
string arg = args[i];
char* char_string = new char[arg.size() + 1];
std::copy_n(arg.c_str(), arg.size() + 1, char_string);
argv[i] = char_string;
char_strings.push_back(char_string);
}
ParseFlagsAndTransformGraph(argc, argv, false);
for (char* char_string : char_strings) {
delete[] char_string;
}
GraphDef out_graph_def;
TF_EXPECT_OK(
ReadBinaryProto(Env::Default(), out_filename_pb, &out_graph_def));
std::map<string, const NodeDef*> out_node_map;
graph_transforms::MapNamesToNodes(out_graph_def, &out_node_map);
for (const NodeDef& node : out_graph_def.node()) {
const int occurrence_count = out_node_map.count(node.name());
if (str_util::EndsWith(node.name(), "expect_removed")) {
EXPECT_EQ(0, occurrence_count) << "node.name()=" << node.name();
}
if (str_util::EndsWith(node.name(), "expect_remains")) {
EXPECT_EQ(1, occurrence_count) << "node.name()=" << node.name();
}
}
}
void TestTransformRegistration() {
auto root = tensorflow::Scope::NewRootScope();
using namespace ::tensorflow::ops;
Output placeholder =
Placeholder(root.WithOpName("placeholder_expect_remains"), DT_FLOAT);
GraphDef graph_def;
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
EXPECT_EQ(1, graph_def.node().size());
TF_ASSERT_OK(TransformGraph({}, {}, {{"test_empty_graph_transform", {}}},
&graph_def));
EXPECT_EQ(0, graph_def.node().size());
TF_ASSERT_OK(root.ToGraphDef(&graph_def));
Status no_such_status =
TransformGraph({}, {}, {{"test_no_such_transform", {}}}, &graph_def);
EXPECT_TRUE(absl::StrContains(no_such_status.ToString(), "not recognized"));
}
void TestParseTransformParameters() {
TransformParameters params_list;
TF_EXPECT_OK(ParseTransformParameters("foo", ¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
TF_EXPECT_OK(ParseTransformParameters("foo bar", ¶ms_list));
EXPECT_EQ(2, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
EXPECT_EQ("bar", params_list[1].first);
EXPECT_TRUE(params_list[1].second.empty());
TF_EXPECT_OK(ParseTransformParameters("foo() bar()", ¶ms_list));
EXPECT_EQ(2, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
EXPECT_EQ("bar", params_list[1].first);
EXPECT_TRUE(params_list[1].second.empty());
TF_EXPECT_OK(
ParseTransformParameters("foo(bob_something=sue)", ¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_EQ(1, params_list[0].second.count("bob_something"));
EXPECT_EQ(1, params_list[0].second["bob_something"].size());
EXPECT_EQ("sue", params_list[0].second["bob_something"][0]);
TF_EXPECT_OK(ParseTransformParameters("bar(a=1, b=2, a=3)", ¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("bar", params_list[0].first);
EXPECT_EQ(1, params_list[0].second.count("a"));
EXPECT_EQ(2, params_list[0].second["a"].size());
EXPECT_EQ("1", params_list[0].second["a"][0]);
EXPECT_EQ("3", params_list[0].second["a"][1]);
EXPECT_EQ(1, params_list[0].second.count("b"));
EXPECT_EQ(1, params_list[0].second["b"].size());
EXPECT_EQ("2", params_list[0].second["b"][0]);
TF_EXPECT_OK(ParseTransformParameters("bar(a=\"1\", b=\"1,2,3\", a=3)",
¶ms_list));
EXPECT_EQ(1, params_list.size());
EXPECT_EQ("bar", params_list[0].first);
EXPECT_EQ(1, params_list[0].second.count("a"));
EXPECT_EQ(2, params_list[0].second["a"].size());
EXPECT_EQ("1", params_list[0].second["a"][0]);
EXPECT_EQ("3", params_list[0].second["a"][1]);
EXPECT_EQ(1, params_list[0].second.count("b"));
EXPECT_EQ(1, params_list[0].second["b"].size());
EXPECT_EQ("1,2,3", params_list[0].second["b"][0]);
}
void TestParseEscapedNewline() {
TransformParameters params_list;
ParseTransformParameters("\\\n", ¶ms_list).IgnoreError();
EXPECT_EQ(0, params_list.size());
}
void TestParseExtraSpaces() {
TransformParameters params_list;
ParseTransformParameters(" ", ¶ms_list).IgnoreError();
EXPECT_EQ(0, params_list.size());
TF_EXPECT_OK(ParseTransformParameters(" foo bar \\\n", ¶ms_list));
EXPECT_EQ(2, params_list.size());
EXPECT_EQ("foo", params_list[0].first);
EXPECT_TRUE(params_list[0].second.empty());
EXPECT_EQ("bar", params_list[1].first);
EXPECT_TRUE(params_list[1].second.empty());
}
void TestShouldIgnoreErrors() {
bool ignore_errors;
TF_EXPECT_OK(
ShouldIgnoreErrors({{"ignore_errors", {"true"}}}, &ignore_errors));
EXPECT_TRUE(ignore_errors);
TF_EXPECT_OK(
ShouldIgnoreErrors({{"ignore_errors", {"false"}}}, &ignore_errors));
EXPECT_FALSE(ignore_errors);
TF_EXPECT_OK(ShouldIgnoreErrors({}, &ignore_errors));
EXPECT_FALSE(ignore_errors);
EXPECT_FALSE(
ShouldIgnoreErrors({{"ignore_errors", {"foo"}}}, &ignore_errors).ok());
}
};
TEST_F(TransformGraphTest, TestConstantFolding) { TestConstantFolding(); }
TEST_F(TransformGraphTest, TestTransformRegistration) {
TestTransformRegistration();
}
TEST_F(TransformGraphTest, TestParseTransformParameters) {
TestParseTransformParameters();
}
TEST_F(TransformGraphTest, TestParseEscapedNewline) {
TestParseEscapedNewline();
}
TEST_F(TransformGraphTest, TestShouldIgnoreErrors) { TestShouldIgnoreErrors(); }
}
} |
210 | #ifndef TENSORFLOW_CORE_DATA_COMPRESSION_UTILS_H_
#define TENSORFLOW_CORE_DATA_COMPRESSION_UTILS_H_
#include <vector>
#include "tensorflow/core/framework/dataset.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace data {
Status CompressElement(const std::vector<Tensor>& element,
CompressedElement* out);
Status UncompressElement(const CompressedElement& compressed,
std::vector<Tensor>* out);
}
}
#endif
#include "tensorflow/core/data/compression_utils.h"
#include <limits>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/snappy.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr int kCompressedElementVersion = 0;
}
class Iov {
public:
explicit Iov(size_t size) : iov_(size), idx_(0), num_bytes_(0) {}
void Add(void* base, size_t len) {
iov_[idx_].iov_base = base;
iov_[idx_].iov_len = len;
num_bytes_ += len;
++idx_;
}
iovec* Data() { return iov_.data(); }
size_t NumBytes() const { return num_bytes_; }
size_t NumPieces() const { return iov_.size(); }
private:
std::vector<struct iovec> iov_;
size_t idx_;
size_t num_bytes_;
};
Status CompressElement(const std::vector<Tensor>& element,
CompressedElement* out) {
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
std::vector<TensorProto> nonmemcpyable_components;
size_t total_nonmemcpyable_size = 0;
for (const auto& component : element) {
if (component.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += component.NumElements();
} else if (!DataTypeCanUseMemcpy(component.dtype())) {
nonmemcpyable_components.emplace_back();
component.AsProtoTensorContent(&nonmemcpyable_components.back());
total_nonmemcpyable_size +=
nonmemcpyable_components.back().ByteSizeLong();
}
}
Iov iov{element.size() + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
int nonmemcpyable_component_index = 0;
for (int i = 0; i < element.size(); ++i) {
const auto& component = element[i];
CompressedComponentMetadata* metadata =
out->mutable_component_metadata()->Add();
metadata->set_dtype(component.dtype());
component.shape().AsProto(metadata->mutable_tensor_shape());
if (DataTypeCanUseMemcpy(component.dtype())) {
const TensorBuffer* buffer = DMAHelper::buffer(&component);
if (buffer) {
iov.Add(buffer->data(), buffer->size());
metadata->add_uncompressed_bytes(buffer->size());
}
} else if (component.dtype() == DT_STRING) {
const auto& flats = component.unaligned_flat<tstring>();
for (int i = 0; i < flats.size(); ++i) {
iov.Add(const_cast<char*>(flats.data()[i].data()),
flats.data()[i].size());
metadata->add_uncompressed_bytes(flats.data()[i].size());
}
} else {
TensorProto& proto =
nonmemcpyable_components[nonmemcpyable_component_index++];
proto.SerializeToArray(nonmemcpyable_pos, proto.ByteSizeLong());
iov.Add(nonmemcpyable_pos, proto.ByteSizeLong());
nonmemcpyable_pos += proto.ByteSizeLong();
metadata->add_uncompressed_bytes(proto.ByteSizeLong());
}
}
if (iov.NumBytes() > kuint32max) {
return errors::OutOfRange("Encountered dataset element of size ",
iov.NumBytes(),
", exceeding the 4GB Snappy limit.");
}
if (!port::Snappy_CompressFromIOVec(iov.Data(), iov.NumBytes(),
out->mutable_data())) {
return errors::Internal("Failed to compress using snappy.");
}
out->set_version(kCompressedElementVersion);
VLOG(3) << "Compressed element from " << iov.NumBytes() << " bytes to "
<< out->data().size() << " bytes";
return absl::OkStatus();
}
Status UncompressElement(const CompressedElement& compressed,
std::vector<Tensor>* out) {
if (compressed.version() != kCompressedElementVersion) {
return errors::Internal("Unsupported compressed element version: ",
compressed.version());
}
int num_components = compressed.component_metadata_size();
out->clear();
out->reserve(num_components);
size_t num_string_tensors = 0;
size_t num_string_tensor_strings = 0;
size_t total_nonmemcpyable_size = 0;
for (const auto& metadata : compressed.component_metadata()) {
if (metadata.dtype() == DT_STRING) {
++num_string_tensors;
num_string_tensor_strings += metadata.uncompressed_bytes_size();
} else if (!DataTypeCanUseMemcpy(metadata.dtype())) {
total_nonmemcpyable_size += metadata.uncompressed_bytes(0);
}
}
Iov iov{num_components + num_string_tensor_strings - num_string_tensors};
tstring nonmemcpyable;
nonmemcpyable.resize_uninitialized(total_nonmemcpyable_size);
char* nonmemcpyable_pos = nonmemcpyable.mdata();
for (const auto& metadata : compressed.component_metadata()) {
if (DataTypeCanUseMemcpy(metadata.dtype())) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
TensorBuffer* buffer = DMAHelper::buffer(&out->back());
if (buffer) {
iov.Add(buffer->data(), metadata.uncompressed_bytes(0));
}
} else if (metadata.dtype() == DT_STRING) {
out->emplace_back(metadata.dtype(), metadata.tensor_shape());
const auto& flats = out->back().unaligned_flat<tstring>();
for (int i = 0; i < metadata.uncompressed_bytes_size(); ++i) {
flats.data()[i].resize(metadata.uncompressed_bytes(i));
iov.Add(flats.data()[i].mdata(), metadata.uncompressed_bytes(i));
}
} else {
out->emplace_back();
iov.Add(nonmemcpyable_pos, metadata.uncompressed_bytes(0));
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
const std::string& compressed_data = compressed.data();
size_t uncompressed_size;
if (!port::Snappy_GetUncompressedLength(
compressed_data.data(), compressed_data.size(), &uncompressed_size)) {
return errors::Internal(
"Could not get snappy uncompressed length. Compressed data size: ",
compressed_data.size());
}
if (uncompressed_size != static_cast<size_t>(iov.NumBytes())) {
return errors::Internal(
"Uncompressed size mismatch. Snappy expects ", uncompressed_size,
" whereas the tensor metadata suggests ", iov.NumBytes());
}
if (!port::Snappy_UncompressToIOVec(compressed_data.data(),
compressed_data.size(), iov.Data(),
iov.NumPieces())) {
return errors::Internal("Failed to perform snappy decompression.");
}
nonmemcpyable_pos = nonmemcpyable.mdata();
for (int i = 0; i < num_components; ++i) {
const CompressedComponentMetadata& metadata =
compressed.component_metadata(i);
if (!DataTypeCanUseMemcpy(metadata.dtype()) &&
metadata.dtype() != DT_STRING) {
TensorProto tp;
if (!tp.ParseFromString(
{nonmemcpyable_pos,
static_cast<size_t>(metadata.uncompressed_bytes(0))})) {
return errors::Internal("Could not parse TensorProto");
}
if (!out->at(i).FromProto(tp)) {
return errors::Internal("Could not parse Tensor");
}
nonmemcpyable_pos += metadata.uncompressed_bytes(0);
}
}
return absl::OkStatus();
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(CompressedElement,
"tensorflow.data.CompressedElement");
}
} | #include "tensorflow/core/data/compression_utils.h"
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
TEST(CompressionUtilsTest, Exceeds4GB) {
std::vector<Tensor> element = {
CreateTensor<int64_t>(TensorShape{1024, 1024, 513})};
CompressedElement compressed;
EXPECT_THAT(CompressElement(element, &compressed),
StatusIs(error::OUT_OF_RANGE,
HasSubstr("exceeding the 4GB Snappy limit")));
}
std::vector<std::vector<Tensor>> TestCases() {
return {
CreateTensors<int64_t>(TensorShape{1}, {{1}}),
CreateTensors<int64_t>(TensorShape{1}, {{1}, {2}}),
CreateTensors<tstring>(TensorShape{1}, {{"a"}, {"b"}}),
{CreateTensor<tstring>(TensorShape{1, 2}, {"abc", "xyz"}),
CreateTensor<tstring>(TensorShape{2, 1}, {"ijk", "mnk"})},
{CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{1}, {1})},
{},
{CreateTensor<int64_t>(TensorShape{1, 0})},
{CreateTensor<int64_t>(TensorShape{128, 128}),
CreateTensor<int64_t>(TensorShape{64, 2})},
{
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {1, 2, 3}),
CreateTensor<tstring>(TensorShape{}, {"abc"})}),
DatasetOpsTestBase::CreateTestVariantTensor(
{CreateTensor<int64_t>(TensorShape{3, 1}, {10, 11, 12}),
CreateTensor<tstring>(TensorShape{}, {"xyz"})}),
},
};
}
class ParameterizedCompressionUtilsTest
: public DatasetOpsTestBase,
public ::testing::WithParamInterface<std::vector<Tensor>> {};
TEST_P(ParameterizedCompressionUtilsTest, RoundTrip) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
std::vector<Tensor> round_trip_element;
TF_ASSERT_OK(UncompressElement(compressed, &round_trip_element));
TF_EXPECT_OK(
ExpectEqual(element, round_trip_element, true));
}
TEST_P(ParameterizedCompressionUtilsTest, CompressedElementVersion) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
EXPECT_EQ(0, compressed.version());
}
TEST_P(ParameterizedCompressionUtilsTest, VersionMismatch) {
std::vector<Tensor> element = GetParam();
CompressedElement compressed;
TF_ASSERT_OK(CompressElement(element, &compressed));
compressed.set_version(1);
std::vector<Tensor> round_trip_element;
EXPECT_THAT(UncompressElement(compressed, &round_trip_element),
StatusIs(error::INTERNAL));
}
INSTANTIATE_TEST_SUITE_P(Instantiation, ParameterizedCompressionUtilsTest,
::testing::ValuesIn(TestCases()));
}
}
} |
211 | #ifndef THIRD_PARTY_CEL_CPP_EVAL_EVAL_UNKNOWNS_UTILITY_H_
#define THIRD_PARTY_CEL_CPP_EVAL_EVAL_UNKNOWNS_UTILITY_H_
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "base/attribute.h"
#include "base/attribute_set.h"
#include "base/function_descriptor.h"
#include "base/function_result_set.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "eval/eval/attribute_trail.h"
namespace google::api::expr::runtime {
class AttributeUtility {
public:
class Accumulator {
public:
Accumulator(const Accumulator&) = delete;
Accumulator& operator=(const Accumulator&) = delete;
Accumulator(Accumulator&&) = delete;
Accumulator& operator=(Accumulator&&) = delete;
void Add(const cel::UnknownValue& v);
void Add(const AttributeTrail& attr);
void MaybeAdd(const cel::Value& v);
bool IsEmpty() const;
cel::UnknownValue Build() &&;
private:
explicit Accumulator(const AttributeUtility& parent)
: parent_(parent), unknown_present_(false) {}
friend class AttributeUtility;
const AttributeUtility& parent_;
cel::AttributeSet attribute_set_;
cel::FunctionResultSet function_result_set_;
bool unknown_present_;
};
AttributeUtility(
absl::Span<const cel::AttributePattern> unknown_patterns,
absl::Span<const cel::AttributePattern> missing_attribute_patterns,
cel::ValueManager& value_factory)
: unknown_patterns_(unknown_patterns),
missing_attribute_patterns_(missing_attribute_patterns),
value_factory_(value_factory) {}
AttributeUtility(const AttributeUtility&) = delete;
AttributeUtility& operator=(const AttributeUtility&) = delete;
AttributeUtility(AttributeUtility&&) = delete;
AttributeUtility& operator=(AttributeUtility&&) = delete;
bool CheckForMissingAttribute(const AttributeTrail& trail) const;
bool CheckForUnknown(const AttributeTrail& trail, bool use_partial) const;
bool CheckForUnknownExact(const AttributeTrail& trail) const {
return CheckForUnknown(trail, false);
}
bool CheckForUnknownPartial(const AttributeTrail& trail) const {
return CheckForUnknown(trail, true);
}
cel::AttributeSet CheckForUnknowns(absl::Span<const AttributeTrail> args,
bool use_partial) const;
absl::optional<cel::UnknownValue> MergeUnknowns(
absl::Span<const cel::Value> args) const;
cel::UnknownValue MergeUnknownValues(const cel::UnknownValue& left,
const cel::UnknownValue& right) const;
absl::optional<cel::UnknownValue> IdentifyAndMergeUnknowns(
absl::Span<const cel::Value> args, absl::Span<const AttributeTrail> attrs,
bool use_partial) const;
cel::UnknownValue CreateUnknownSet(cel::Attribute attr) const;
absl::StatusOr<cel::ErrorValue> CreateMissingAttributeError(
const cel::Attribute& attr) const;
cel::UnknownValue CreateUnknownSet(
const cel::FunctionDescriptor& fn_descriptor, int64_t expr_id,
absl::Span<const cel::Value> args) const;
Accumulator CreateAccumulator() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return Accumulator(*this);
}
private:
cel::ValueManager& value_manager() const { return value_factory_; }
void Add(Accumulator& a, const cel::UnknownValue& v) const;
void Add(Accumulator& a, const AttributeTrail& attr) const;
absl::Span<const cel::AttributePattern> unknown_patterns_;
absl::Span<const cel::AttributePattern> missing_attribute_patterns_;
cel::ValueManager& value_factory_;
};
}
#endif
#include "eval/eval/attribute_utility.h"
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "base/attribute.h"
#include "base/attribute_set.h"
#include "base/function_descriptor.h"
#include "base/function_result.h"
#include "base/function_result_set.h"
#include "base/internal/unknown_set.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/internal/errors.h"
#include "internal/status_macros.h"
namespace google::api::expr::runtime {
using ::cel::AttributeSet;
using ::cel::Cast;
using ::cel::ErrorValue;
using ::cel::FunctionResult;
using ::cel::FunctionResultSet;
using ::cel::InstanceOf;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::base_internal::UnknownSet;
using Accumulator = AttributeUtility::Accumulator;
bool AttributeUtility::CheckForMissingAttribute(
const AttributeTrail& trail) const {
if (trail.empty()) {
return false;
}
for (const auto& pattern : missing_attribute_patterns_) {
if (pattern.IsMatch(trail.attribute()) ==
cel::AttributePattern::MatchType::FULL) {
return true;
}
}
return false;
}
bool AttributeUtility::CheckForUnknown(const AttributeTrail& trail,
bool use_partial) const {
if (trail.empty()) {
return false;
}
for (const auto& pattern : unknown_patterns_) {
auto current_match = pattern.IsMatch(trail.attribute());
if (current_match == cel::AttributePattern::MatchType::FULL ||
(use_partial &&
current_match == cel::AttributePattern::MatchType::PARTIAL)) {
return true;
}
}
return false;
}
absl::optional<UnknownValue> AttributeUtility::MergeUnknowns(
absl::Span<const cel::Value> args) const {
absl::optional<UnknownSet> result_set;
for (const auto& value : args) {
if (!value->Is<cel::UnknownValue>()) continue;
if (!result_set.has_value()) {
result_set.emplace();
}
const auto& current_set = value.As<cel::UnknownValue>();
cel::base_internal::UnknownSetAccess::Add(
*result_set, UnknownSet(current_set.attribute_set(),
current_set.function_result_set()));
}
if (!result_set.has_value()) {
return absl::nullopt;
}
return value_factory_.CreateUnknownValue(
result_set->unknown_attributes(), result_set->unknown_function_results());
}
UnknownValue AttributeUtility::MergeUnknownValues(
const UnknownValue& left, const UnknownValue& right) const {
AttributeSet attributes;
FunctionResultSet function_results;
attributes.Add(left.attribute_set());
function_results.Add(left.function_result_set());
attributes.Add(right.attribute_set());
function_results.Add(right.function_result_set());
return value_factory_.CreateUnknownValue(std::move(attributes),
std::move(function_results));
}
AttributeSet AttributeUtility::CheckForUnknowns(
absl::Span<const AttributeTrail> args, bool use_partial) const {
AttributeSet attribute_set;
for (const auto& trail : args) {
if (CheckForUnknown(trail, use_partial)) {
attribute_set.Add(trail.attribute());
}
}
return attribute_set;
}
absl::optional<UnknownValue> AttributeUtility::IdentifyAndMergeUnknowns(
absl::Span<const cel::Value> args, absl::Span<const AttributeTrail> attrs,
bool use_partial) const {
absl::optional<UnknownSet> result_set;
cel::AttributeSet attr_set = CheckForUnknowns(attrs, use_partial);
if (!attr_set.empty()) {
result_set.emplace(std::move(attr_set));
}
absl::optional<UnknownValue> arg_unknowns = MergeUnknowns(args);
if (!result_set.has_value()) {
return arg_unknowns;
}
if (arg_unknowns.has_value()) {
cel::base_internal::UnknownSetAccess::Add(
*result_set, UnknownSet((*arg_unknowns).attribute_set(),
(*arg_unknowns).function_result_set()));
}
return value_factory_.CreateUnknownValue(
result_set->unknown_attributes(), result_set->unknown_function_results());
}
UnknownValue AttributeUtility::CreateUnknownSet(cel::Attribute attr) const {
return value_factory_.CreateUnknownValue(AttributeSet({std::move(attr)}));
}
absl::StatusOr<ErrorValue> AttributeUtility::CreateMissingAttributeError(
const cel::Attribute& attr) const {
CEL_ASSIGN_OR_RETURN(std::string message, attr.AsString());
return value_factory_.CreateErrorValue(
cel::runtime_internal::CreateMissingAttributeError(message));
}
UnknownValue AttributeUtility::CreateUnknownSet(
const cel::FunctionDescriptor& fn_descriptor, int64_t expr_id,
absl::Span<const cel::Value> args) const {
return value_factory_.CreateUnknownValue(
FunctionResultSet(FunctionResult(fn_descriptor, expr_id)));
}
void AttributeUtility::Add(Accumulator& a, const cel::UnknownValue& v) const {
a.attribute_set_.Add(v.attribute_set());
a.function_result_set_.Add(v.function_result_set());
}
void AttributeUtility::Add(Accumulator& a, const AttributeTrail& attr) const {
a.attribute_set_.Add(attr.attribute());
}
void Accumulator::Add(const UnknownValue& value) {
unknown_present_ = true;
parent_.Add(*this, value);
}
void Accumulator::Add(const AttributeTrail& attr) { parent_.Add(*this, attr); }
void Accumulator::MaybeAdd(const Value& v) {
if (InstanceOf<UnknownValue>(v)) {
Add(Cast<UnknownValue>(v));
}
}
bool Accumulator::IsEmpty() const {
return !unknown_present_ && attribute_set_.empty() &&
function_result_set_.empty();
}
cel::UnknownValue Accumulator::Build() && {
return parent_.value_manager().CreateUnknownValue(
std::move(attribute_set_), std::move(function_result_set_));
}
} | #include "eval/eval/attribute_utility.h"
#include <vector>
#include "base/attribute_set.h"
#include "base/type_provider.h"
#include "common/type_factory.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_value.h"
#include "eval/public/unknown_attribute_set.h"
#include "eval/public/unknown_set.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
using ::cel::AttributeSet;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::extensions::ProtoMemoryManagerRef;
using testing::Eq;
using testing::SizeIs;
using testing::UnorderedPointwise;
class AttributeUtilityTest : public ::testing::Test {
public:
AttributeUtilityTest()
: value_factory_(ProtoMemoryManagerRef(&arena_),
cel::TypeProvider::Builtin()) {}
protected:
google::protobuf::Arena arena_;
cel::common_internal::LegacyValueManager value_factory_;
};
TEST_F(AttributeUtilityTest, UnknownsUtilityCheckUnknowns) {
std::vector<CelAttributePattern> unknown_patterns = {
CelAttributePattern("unknown0", {CreateCelAttributeQualifierPattern(
CelValue::CreateInt64(1))}),
CelAttributePattern("unknown0", {CreateCelAttributeQualifierPattern(
CelValue::CreateInt64(2))}),
CelAttributePattern("unknown1", {}),
CelAttributePattern("unknown2", {}),
};
std::vector<CelAttributePattern> missing_attribute_patterns;
AttributeUtility utility(unknown_patterns, missing_attribute_patterns,
value_factory_);
ASSERT_FALSE(utility.CheckForUnknown(AttributeTrail(), true));
ASSERT_FALSE(utility.CheckForUnknown(AttributeTrail(), false));
AttributeTrail unknown_trail0("unknown0");
{ ASSERT_FALSE(utility.CheckForUnknown(unknown_trail0, false)); }
{ ASSERT_TRUE(utility.CheckForUnknown(unknown_trail0, true)); }
{
ASSERT_TRUE(utility.CheckForUnknown(
unknown_trail0.Step(
CreateCelAttributeQualifier(CelValue::CreateInt64(1))),
false));
}
{
ASSERT_TRUE(utility.CheckForUnknown(
unknown_trail0.Step(
CreateCelAttributeQualifier(CelValue::CreateInt64(1))),
true));
}
}
TEST_F(AttributeUtilityTest, UnknownsUtilityMergeUnknownsFromValues) {
std::vector<CelAttributePattern> unknown_patterns;
std::vector<CelAttributePattern> missing_attribute_patterns;
CelAttribute attribute0("unknown0", {});
CelAttribute attribute1("unknown1", {});
AttributeUtility utility(unknown_patterns, missing_attribute_patterns,
value_factory_);
UnknownValue unknown_set0 =
value_factory_.CreateUnknownValue(AttributeSet({attribute0}));
UnknownValue unknown_set1 =
value_factory_.CreateUnknownValue(AttributeSet({attribute1}));
std::vector<cel::Value> values = {
unknown_set0,
unknown_set1,
value_factory_.CreateBoolValue(true),
value_factory_.CreateIntValue(1),
};
absl::optional<UnknownValue> unknown_set = utility.MergeUnknowns(values);
ASSERT_TRUE(unknown_set.has_value());
EXPECT_THAT((*unknown_set).attribute_set(),
UnorderedPointwise(
Eq(), std::vector<CelAttribute>{attribute0, attribute1}));
}
TEST_F(AttributeUtilityTest, UnknownsUtilityCheckForUnknownsFromAttributes) {
std::vector<CelAttributePattern> unknown_patterns = {
CelAttributePattern("unknown0",
{CelAttributeQualifierPattern::CreateWildcard()}),
};
std::vector<CelAttributePattern> missing_attribute_patterns;
AttributeTrail trail0("unknown0");
AttributeTrail trail1("unknown1");
CelAttribute attribute1("unknown1", {});
UnknownSet unknown_set1(UnknownAttributeSet({attribute1}));
AttributeUtility utility(unknown_patterns, missing_attribute_patterns,
value_factory_);
UnknownSet unknown_attr_set(utility.CheckForUnknowns(
{
AttributeTrail(),
trail0.Step(CreateCelAttributeQualifier(CelValue::CreateInt64(1))),
trail0.Step(CreateCelAttributeQualifier(CelValue::CreateInt64(2))),
},
false));
UnknownSet unknown_set(unknown_set1, unknown_attr_set);
ASSERT_THAT(unknown_set.unknown_attributes(), SizeIs(3));
}
TEST_F(AttributeUtilityTest, UnknownsUtilityCheckForMissingAttributes) {
std::vector<CelAttributePattern> unknown_patterns;
std::vector<CelAttributePattern> missing_attribute_patterns;
AttributeTrail trail("destination");
trail =
trail.Step(CreateCelAttributeQualifier(CelValue::CreateStringView("ip")));
AttributeUtility utility0(unknown_patterns, missing_attribute_patterns,
value_factory_);
EXPECT_FALSE(utility0.CheckForMissingAttribute(trail));
missing_attribute_patterns.push_back(CelAttributePattern(
"destination",
{CreateCelAttributeQualifierPattern(CelValue::CreateStringView("ip"))}));
AttributeUtility utility1(unknown_patterns, missing_attribute_patterns,
value_factory_);
EXPECT_TRUE(utility1.CheckForMissingAttribute(trail));
}
TEST_F(AttributeUtilityTest, CreateUnknownSet) {
AttributeTrail trail("destination");
trail =
trail.Step(CreateCelAttributeQualifier(CelValue::CreateStringView("ip")));
std::vector<CelAttributePattern> empty_patterns;
AttributeUtility utility(empty_patterns, empty_patterns, value_factory_);
UnknownValue set = utility.CreateUnknownSet(trail.attribute());
ASSERT_THAT(set.attribute_set(), SizeIs(1));
ASSERT_OK_AND_ASSIGN(auto elem, set.attribute_set().begin()->AsString());
EXPECT_EQ(elem, "destination.ip");
}
} |
212 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_FUSE_AUTO_INPUT_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_FUSE_AUTO_INPUT_H_
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
namespace tflite {
namespace gpu {
namespace gl {
class FuseAutoInput : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final;
};
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include <any>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/types/any.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
std::pair<std::string, std::string> MakeValueReplacement(int n, int k) {
return {absl::StrCat("value_", n), absl::StrCat("value_", k)};
}
std::pair<std::string, std::string> MakeDataReplacement(int n, int k) {
return {absl::StrCat("input_data_", n), absl::StrCat("input_data_", k)};
}
}
TransformResult FuseAutoInput::ApplyToNode(Node* node, GraphFloat32* graph) {
auto& node_attr =
std::any_cast<CompiledNodeAttributes&>(node->operation.attributes);
auto& node_code = node_attr.code;
if (node_code.input != IOStructure::AUTO) {
return {TransformStatus::SKIPPED, ""};
}
uint3 workgroup = node_code.workgroup;
auto node_outputs = graph->FindOutputs(node->id);
std::vector<std::pair<Node*, int>> nodes_to_fuse;
std::vector<std::pair<ValueId, int>> input_values;
int input_num = -1;
for (auto input_value : graph->FindInputs(node->id)) {
input_num++;
const ValueId input_id = input_value->id;
input_values.push_back({input_id, input_num});
if (graph->FindConsumers(input_id).size() > 1) {
continue;
}
Node* input_producer = graph->FindProducer(input_id);
if (input_producer == nullptr) {
continue;
}
if (graph->FindOutputs(input_producer->id).size() != 1) {
continue;
}
auto& input_producer_attr = std::any_cast<const CompiledNodeAttributes&>(
input_producer->operation.attributes);
if (input_producer_attr.code.output != IOStructure::AUTO) {
continue;
}
if (input_producer_attr.code.workload != node_code.workload &&
uint3() != input_producer_attr.code.workload) {
continue;
}
if (input_producer_attr.code.workgroup != uint3()) {
if (workgroup != uint3()) {
continue;
}
workgroup = input_producer_attr.code.workgroup;
}
nodes_to_fuse.push_back({input_producer, input_num});
input_values.pop_back();
}
if (nodes_to_fuse.empty()) {
return {TransformStatus::SKIPPED, ""};
}
{
absl::flat_hash_set<ValueId> all_inputs;
for (const auto& node_to_fuse : nodes_to_fuse) {
for (const auto& input : graph->FindInputs(node_to_fuse.first->id)) {
if (all_inputs.find(input->id) != all_inputs.end()) {
return {TransformStatus::SKIPPED, ""};
}
all_inputs.insert(input->id);
}
}
for (const auto& input : graph->FindInputs(node->id)) {
if (all_inputs.find(input->id) != all_inputs.end()) {
return {TransformStatus::SKIPPED, ""};
}
all_inputs.insert(input->id);
}
}
for (auto value : graph->FindInputs(node->id)) {
if (!graph->RemoveConsumer(node->id, value->id).ok()) {
return {TransformStatus::INVALID, ""};
}
}
std::string operation_type;
std::string source_code;
std::string values;
std::swap(source_code, node_code.source_code);
int extra_input_num = input_num;
input_num = 0;
for (auto input_and_num : nodes_to_fuse) {
auto& input = input_and_num.first;
auto& attr =
std::any_cast<CompiledNodeAttributes&>(input->operation.attributes);
auto super_inputs = graph->FindInputs(input->id);
std::vector<std::pair<std::string, std::string>> replacements;
for (int i = 0; i < super_inputs.size(); ++i) {
int value_index = i == 0 ? input_and_num.second : ++extra_input_num;
replacements.push_back(MakeValueReplacement(i, value_index));
replacements.push_back(MakeDataReplacement(i, input_num));
if (attr.code.input == IOStructure::AUTO) {
absl::StrAppend(&values, " value_", value_index, " = $input_data_",
input_num, "[gid.x, gid.y, gid.z]$;\n");
}
if (!graph->AddConsumer(node->id, super_inputs[i]->id).ok()) {
return {TransformStatus::INVALID, ""};
}
input_num++;
}
for (auto& param : attr.code.parameters) {
param.name = absl::StrReplaceAll(param.name, replacements);
}
attr.code.source_code =
absl::StrReplaceAll(attr.code.source_code, replacements);
if (!MergeCode(&attr, &node_attr).ok()) {
return {TransformStatus::INVALID, "Unable to merge the code"};
}
absl::StrAppend(&node_attr.code.source_code, "{\n", attr.code.source_code,
"\n}");
if (!operation_type.empty()) {
operation_type += ",";
}
operation_type += input->operation.type;
if (!graph->DeleteNode(input->id).ok()) {
return {TransformStatus::INVALID, ""};
}
}
for (int i = 0; i < input_values.size(); i++) {
if (node_code.input == IOStructure::AUTO) {
absl::StrAppend(&values, " value_", input_values[i].second,
" = $input_data_", input_num,
"[gid.x, gid.y, gid.z]$;\n");
}
if (!graph->AddConsumer(node->id, input_values[i].first).ok()) {
return {TransformStatus::INVALID, ""};
}
input_num++;
}
node_code.input = IOStructure::ONLY_DEFINITIONS;
absl::StrAppend(&node->operation.type, "(", operation_type, ")");
node_code.source_code =
absl::StrCat(values, node_code.source_code, "{
node->operation.type, "\n", source_code, "\n}");
return {TransformStatus::APPLIED, ""};
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(FuseAutoInputTest, SkipsDiamond) {
GraphFloat32 graph;
auto* v0 = graph.NewValue();
auto* v1 = graph.NewValue();
auto* v2 = graph.NewValue();
auto* v3 = graph.NewValue();
auto* n1 = graph.NewNode();
CompiledNodeAttributes a1;
a1.code.output = IOStructure::AUTO;
n1->operation.attributes = std::move(a1);
ASSERT_OK(graph.AddConsumer(n1->id, v0->id));
ASSERT_OK(graph.SetProducer(n1->id, v1->id));
auto* n2 = graph.NewNode();
CompiledNodeAttributes a2;
a2.code.output = IOStructure::AUTO;
n2->operation.attributes = std::move(a2);
ASSERT_OK(graph.AddConsumer(n2->id, v0->id));
ASSERT_OK(graph.SetProducer(n2->id, v2->id));
auto* n3 = graph.NewNode();
CompiledNodeAttributes a3;
a3.code.input = IOStructure::AUTO;
n3->operation.attributes = std::move(a3);
ASSERT_OK(graph.AddConsumer(n3->id, v1->id));
ASSERT_OK(graph.AddConsumer(n3->id, v2->id));
ASSERT_OK(graph.SetProducer(n3->id, v3->id));
FuseAutoInput fuse_auto_input;
EXPECT_EQ(fuse_auto_input.ApplyToNode(n3, &graph).status,
TransformStatus::SKIPPED);
}
TEST(FuseAutoInputTest, SkipsTriangle) {
GraphFloat32 graph;
auto* v0 = graph.NewValue();
auto* v1 = graph.NewValue();
auto* v2 = graph.NewValue();
auto* n1 = graph.NewNode();
CompiledNodeAttributes a1;
a1.code.output = IOStructure::AUTO;
n1->operation.attributes = std::move(a1);
ASSERT_OK(graph.AddConsumer(n1->id, v0->id));
ASSERT_OK(graph.SetProducer(n1->id, v1->id));
auto* n2 = graph.NewNode();
CompiledNodeAttributes a2;
a2.code.input = IOStructure::AUTO;
n2->operation.attributes = std::move(a2);
ASSERT_OK(graph.AddConsumer(n2->id, v0->id));
ASSERT_OK(graph.AddConsumer(n2->id, v1->id));
ASSERT_OK(graph.SetProducer(n2->id, v2->id));
FuseAutoInput fuse_auto_input;
EXPECT_EQ(fuse_auto_input.ApplyToNode(n2, &graph).status,
TransformStatus::SKIPPED);
}
}
}
}
} |
213 | #ifndef QUICHE_QUIC_CORE_QUIC_PATH_VALIDATOR_H_
#define QUICHE_QUIC_CORE_QUIC_PATH_VALIDATOR_H_
#include <memory>
#include <ostream>
#include "absl/container/inlined_vector.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_alarm.h"
#include "quiche/quic/core/quic_alarm_factory.h"
#include "quiche/quic/core/quic_arena_scoped_ptr.h"
#include "quiche/quic/core/quic_clock.h"
#include "quiche/quic/core/quic_connection_context.h"
#include "quiche/quic/core/quic_one_block_arena.h"
#include "quiche/quic/core/quic_packet_writer.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_export.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
namespace quic {
namespace test {
class QuicPathValidatorPeer;
}
class QuicConnection;
enum class PathValidationReason {
kReasonUnknown,
kMultiPort,
kReversePathValidation,
kServerPreferredAddressMigration,
kPortMigration,
kConnectionMigration,
kMaxValue,
};
class QUICHE_EXPORT QuicPathValidationContext {
public:
QuicPathValidationContext(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address)
: self_address_(self_address),
peer_address_(peer_address),
effective_peer_address_(peer_address) {}
QuicPathValidationContext(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicSocketAddress& effective_peer_address)
: self_address_(self_address),
peer_address_(peer_address),
effective_peer_address_(effective_peer_address) {}
virtual ~QuicPathValidationContext() = default;
virtual QuicPacketWriter* WriterToUse() = 0;
const QuicSocketAddress& self_address() const { return self_address_; }
const QuicSocketAddress& peer_address() const { return peer_address_; }
const QuicSocketAddress& effective_peer_address() const {
return effective_peer_address_;
}
private:
QUICHE_EXPORT friend std::ostream& operator<<(
std::ostream& os, const QuicPathValidationContext& context);
QuicSocketAddress self_address_;
QuicSocketAddress peer_address_;
QuicSocketAddress effective_peer_address_;
};
class QUICHE_EXPORT QuicPathValidator {
public:
static const uint16_t kMaxRetryTimes = 2;
class QUICHE_EXPORT SendDelegate {
public:
virtual ~SendDelegate() = default;
virtual bool SendPathChallenge(
const QuicPathFrameBuffer& data_buffer,
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicSocketAddress& effective_peer_address,
QuicPacketWriter* writer) = 0;
virtual QuicTime GetRetryTimeout(const QuicSocketAddress& peer_address,
QuicPacketWriter* writer) const = 0;
};
class QUICHE_EXPORT ResultDelegate {
public:
virtual ~ResultDelegate() = default;
virtual void OnPathValidationSuccess(
std::unique_ptr<QuicPathValidationContext> context,
QuicTime start_time) = 0;
virtual void OnPathValidationFailure(
std::unique_ptr<QuicPathValidationContext> context) = 0;
};
QuicPathValidator(QuicAlarmFactory* alarm_factory, QuicConnectionArena* arena,
SendDelegate* delegate, QuicRandom* random,
const QuicClock* clock, QuicConnectionContext* context);
void StartPathValidation(std::unique_ptr<QuicPathValidationContext> context,
std::unique_ptr<ResultDelegate> result_delegate,
PathValidationReason reason);
void OnPathResponse(const QuicPathFrameBuffer& probing_data,
QuicSocketAddress self_address);
void CancelPathValidation();
bool HasPendingPathValidation() const;
QuicPathValidationContext* GetContext() const;
std::unique_ptr<QuicPathValidationContext> ReleaseContext();
PathValidationReason GetPathValidationReason() const { return reason_; }
void OnRetryTimeout();
bool IsValidatingPeerAddress(const QuicSocketAddress& effective_peer_address);
void MaybeWritePacketToAddress(const char* buffer, size_t buf_len,
const QuicSocketAddress& peer_address);
private:
friend class test::QuicPathValidatorPeer;
const QuicPathFrameBuffer& GeneratePathChallengePayload();
void SendPathChallengeAndSetAlarm();
void ResetPathValidation();
struct QUICHE_EXPORT ProbingData {
explicit ProbingData(QuicTime send_time) : send_time(send_time) {}
QuicPathFrameBuffer frame_buffer;
QuicTime send_time;
};
absl::InlinedVector<ProbingData, 3> probing_data_;
SendDelegate* send_delegate_;
QuicRandom* random_;
const QuicClock* clock_;
std::unique_ptr<QuicPathValidationContext> path_context_;
std::unique_ptr<ResultDelegate> result_delegate_;
QuicArenaScopedPtr<QuicAlarm> retry_timer_;
size_t retry_count_;
PathValidationReason reason_ = PathValidationReason::kReasonUnknown;
};
}
#endif
#include "quiche/quic/core/quic_path_validator.h"
#include <memory>
#include <ostream>
#include <utility>
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
namespace quic {
class RetryAlarmDelegate : public QuicAlarm::DelegateWithContext {
public:
explicit RetryAlarmDelegate(QuicPathValidator* path_validator,
QuicConnectionContext* context)
: QuicAlarm::DelegateWithContext(context),
path_validator_(path_validator) {}
RetryAlarmDelegate(const RetryAlarmDelegate&) = delete;
RetryAlarmDelegate& operator=(const RetryAlarmDelegate&) = delete;
void OnAlarm() override { path_validator_->OnRetryTimeout(); }
private:
QuicPathValidator* path_validator_;
};
std::ostream& operator<<(std::ostream& os,
const QuicPathValidationContext& context) {
return os << " from " << context.self_address_ << " to "
<< context.peer_address_;
}
QuicPathValidator::QuicPathValidator(QuicAlarmFactory* alarm_factory,
QuicConnectionArena* arena,
SendDelegate* send_delegate,
QuicRandom* random, const QuicClock* clock,
QuicConnectionContext* context)
: send_delegate_(send_delegate),
random_(random),
clock_(clock),
retry_timer_(alarm_factory->CreateAlarm(
arena->New<RetryAlarmDelegate>(this, context), arena)),
retry_count_(0u) {}
void QuicPathValidator::OnPathResponse(const QuicPathFrameBuffer& probing_data,
QuicSocketAddress self_address) {
if (!HasPendingPathValidation()) {
return;
}
QUIC_DVLOG(1) << "Match PATH_RESPONSE received on " << self_address;
QUIC_BUG_IF(quic_bug_12402_1, !path_context_->self_address().IsInitialized())
<< "Self address should have been known by now";
if (self_address != path_context_->self_address()) {
QUIC_DVLOG(1) << "Expect the response to be received on "
<< path_context_->self_address();
return;
}
for (auto it = probing_data_.begin(); it != probing_data_.end(); ++it) {
if (it->frame_buffer == probing_data) {
result_delegate_->OnPathValidationSuccess(std::move(path_context_),
it->send_time);
ResetPathValidation();
return;
}
}
QUIC_DVLOG(1) << "PATH_RESPONSE with payload " << probing_data.data()
<< " doesn't match the probing data.";
}
void QuicPathValidator::StartPathValidation(
std::unique_ptr<QuicPathValidationContext> context,
std::unique_ptr<ResultDelegate> result_delegate,
PathValidationReason reason) {
QUICHE_DCHECK(context);
QUIC_DLOG(INFO) << "Start validating path " << *context
<< " via writer: " << context->WriterToUse();
if (path_context_ != nullptr) {
QUIC_BUG(quic_bug_10876_1)
<< "There is an on-going validation on path " << *path_context_;
ResetPathValidation();
}
reason_ = reason;
path_context_ = std::move(context);
result_delegate_ = std::move(result_delegate);
SendPathChallengeAndSetAlarm();
}
void QuicPathValidator::ResetPathValidation() {
path_context_ = nullptr;
result_delegate_ = nullptr;
retry_timer_->Cancel();
retry_count_ = 0;
reason_ = PathValidationReason::kReasonUnknown;
}
void QuicPathValidator::CancelPathValidation() {
if (path_context_ == nullptr) {
return;
}
QUIC_DVLOG(1) << "Cancel validation on path" << *path_context_;
result_delegate_->OnPathValidationFailure(std::move(path_context_));
ResetPathValidation();
}
bool QuicPathValidator::HasPendingPathValidation() const {
return path_context_ != nullptr;
}
QuicPathValidationContext* QuicPathValidator::GetContext() const {
return path_context_.get();
}
std::unique_ptr<QuicPathValidationContext> QuicPathValidator::ReleaseContext() {
auto ret = std::move(path_context_);
ResetPathValidation();
return ret;
}
const QuicPathFrameBuffer& QuicPathValidator::GeneratePathChallengePayload() {
probing_data_.emplace_back(clock_->Now());
random_->RandBytes(probing_data_.back().frame_buffer.data(),
sizeof(QuicPathFrameBuffer));
return probing_data_.back().frame_buffer;
}
void QuicPathValidator::OnRetryTimeout() {
++retry_count_;
if (retry_count_ > kMaxRetryTimes) {
CancelPathValidation();
return;
}
QUIC_DVLOG(1) << "Send another PATH_CHALLENGE on path " << *path_context_;
SendPathChallengeAndSetAlarm();
}
void QuicPathValidator::SendPathChallengeAndSetAlarm() {
bool should_continue = send_delegate_->SendPathChallenge(
GeneratePathChallengePayload(), path_context_->self_address(),
path_context_->peer_address(), path_context_->effective_peer_address(),
path_context_->WriterToUse());
if (!should_continue) {
CancelPathValidation();
return;
}
retry_timer_->Set(send_delegate_->GetRetryTimeout(
path_context_->peer_address(), path_context_->WriterToUse()));
}
bool QuicPathValidator::IsValidatingPeerAddress(
const QuicSocketAddress& effective_peer_address) {
return path_context_ != nullptr &&
path_context_->effective_peer_address() == effective_peer_address;
}
void QuicPathValidator::MaybeWritePacketToAddress(
const char* buffer, size_t buf_len, const QuicSocketAddress& peer_address) {
if (!HasPendingPathValidation() ||
path_context_->peer_address() != peer_address) {
return;
}
QUIC_DVLOG(1) << "Path validator is sending packet of size " << buf_len
<< " from " << path_context_->self_address() << " to "
<< path_context_->peer_address();
path_context_->WriterToUse()->WritePacket(
buffer, buf_len, path_context_->self_address().host(),
path_context_->peer_address(), nullptr, QuicPacketWriterParams());
}
} | #include "quiche/quic/core/quic_path_validator.h"
#include <memory>
#include "quiche/quic/core/frames/quic_path_challenge_frame.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/mock_random.h"
#include "quiche/quic/test_tools/quic_path_validator_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::_;
using testing::Invoke;
using testing::Return;
namespace quic {
namespace test {
class MockSendDelegate : public QuicPathValidator::SendDelegate {
public:
MOCK_METHOD(bool, SendPathChallenge,
(const QuicPathFrameBuffer&, const QuicSocketAddress&,
const QuicSocketAddress&, const QuicSocketAddress&,
QuicPacketWriter*),
(override));
MOCK_METHOD(QuicTime, GetRetryTimeout,
(const QuicSocketAddress&, QuicPacketWriter*), (const, override));
};
class QuicPathValidatorTest : public QuicTest {
public:
QuicPathValidatorTest()
: path_validator_(&alarm_factory_, &arena_, &send_delegate_, &random_,
&clock_,
nullptr),
context_(new MockQuicPathValidationContext(
self_address_, peer_address_, effective_peer_address_, &writer_)),
result_delegate_(
new testing::StrictMock<MockQuicPathValidationResultDelegate>()) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
ON_CALL(send_delegate_, GetRetryTimeout(_, _))
.WillByDefault(
Return(clock_.ApproximateNow() +
3 * QuicTime::Delta::FromMilliseconds(kInitialRttMs)));
}
protected:
quic::test::MockAlarmFactory alarm_factory_;
MockSendDelegate send_delegate_;
MockRandom random_;
MockClock clock_;
QuicConnectionArena arena_;
QuicPathValidator path_validator_;
QuicSocketAddress self_address_{QuicIpAddress::Any4(), 443};
QuicSocketAddress peer_address_{QuicIpAddress::Loopback4(), 443};
QuicSocketAddress effective_peer_address_{QuicIpAddress::Loopback4(), 12345};
MockPacketWriter writer_;
MockQuicPathValidationContext* context_;
MockQuicPathValidationResultDelegate* result_delegate_;
};
TEST_F(QuicPathValidatorTest, PathValidationSuccessOnFirstRound) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_));
const QuicTime expected_start_time = clock_.Now();
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
EXPECT_TRUE(path_validator_.HasPendingPathValidation());
EXPECT_EQ(PathValidationReason::kMultiPort,
path_validator_.GetPathValidationReason());
EXPECT_TRUE(path_validator_.IsValidatingPeerAddress(effective_peer_address_));
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, _))
.WillOnce(Invoke([=](std::unique_ptr<QuicPathValidationContext> context,
QuicTime start_time) {
EXPECT_EQ(context.get(), context_);
EXPECT_EQ(start_time, expected_start_time);
}));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(kInitialRttMs));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
TEST_F(QuicPathValidatorTest, RespondWithDifferentSelfAddress) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_));
const QuicTime expected_start_time = clock_.Now();
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
const QuicSocketAddress kAlternativeSelfAddress(QuicIpAddress::Any6(), 54321);
EXPECT_NE(kAlternativeSelfAddress, self_address_);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(kInitialRttMs));
path_validator_.OnPathResponse(challenge_data, kAlternativeSelfAddress);
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, _))
.WillOnce(Invoke([=](std::unique_ptr<QuicPathValidationContext> context,
QuicTime start_time) {
EXPECT_EQ(context->self_address(), self_address_);
EXPECT_EQ(start_time, expected_start_time);
}));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(kInitialRttMs));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
TEST_F(QuicPathValidatorTest, RespondAfter1stRetry) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
EXPECT_NE(payload, challenge_data);
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(2u);
const QuicTime start_time = clock_.Now();
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
random_.ChangeValue();
alarm_factory_.FireAlarm(
QuicPathValidatorPeer::retry_timer(&path_validator_));
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, start_time));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
}
TEST_F(QuicPathValidatorTest, RespondToRetryChallenge) {
QuicPathFrameBuffer challenge_data;
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}))
.WillOnce(Invoke([&](const QuicPathFrameBuffer& payload,
const QuicSocketAddress&, const QuicSocketAddress&,
const QuicSocketAddress&, QuicPacketWriter*) {
EXPECT_NE(challenge_data, payload);
memcpy(challenge_data.data(), payload.data(), payload.size());
return true;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(2u);
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
const QuicTime start_time = clock_.Now();
random_.ChangeValue();
alarm_factory_.FireAlarm(
QuicPathValidatorPeer::retry_timer(&path_validator_));
EXPECT_CALL(*result_delegate_, OnPathValidationSuccess(_, start_time));
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
}
TEST_F(QuicPathValidatorTest, ValidationTimeOut) {
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.Times(3u)
.WillRepeatedly(Return(true));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(3u);
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
QuicPathFrameBuffer challenge_data;
memset(challenge_data.data(), 'a', challenge_data.size());
path_validator_.OnPathResponse(challenge_data, self_address_);
EXPECT_CALL(*result_delegate_, OnPathValidationFailure(_))
.WillOnce(Invoke([=](std::unique_ptr<QuicPathValidationContext> context) {
EXPECT_EQ(context_, context.get());
}));
for (size_t i = 0; i <= QuicPathValidator::kMaxRetryTimes; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
alarm_factory_.FireAlarm(
QuicPathValidatorPeer::retry_timer(&path_validator_));
}
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
TEST_F(QuicPathValidatorTest, SendPathChallengeError) {
EXPECT_CALL(send_delegate_,
SendPathChallenge(_, self_address_, peer_address_,
effective_peer_address_, &writer_))
.WillOnce(Invoke([&](const QuicPathFrameBuffer&, const QuicSocketAddress&,
const QuicSocketAddress&, const QuicSocketAddress&,
QuicPacketWriter*) {
path_validator_.CancelPathValidation();
return false;
}));
EXPECT_CALL(send_delegate_, GetRetryTimeout(peer_address_, &writer_))
.Times(0u);
EXPECT_CALL(*result_delegate_, OnPathValidationFailure(_));
path_validator_.StartPathValidation(
std::unique_ptr<QuicPathValidationContext>(context_),
std::unique_ptr<MockQuicPathValidationResultDelegate>(result_delegate_),
PathValidationReason::kMultiPort);
EXPECT_FALSE(path_validator_.HasPendingPathValidation());
EXPECT_FALSE(QuicPathValidatorPeer::retry_timer(&path_validator_)->IsSet());
EXPECT_EQ(PathValidationReason::kReasonUnknown,
path_validator_.GetPathValidationReason());
}
}
} |
214 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_VARIABLE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_VARIABLE_H_
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
namespace tflite {
namespace resource {
class ResourceVariable : public ResourceBase {
public:
ResourceVariable();
ResourceVariable(ResourceVariable&& other);
ResourceVariable(const ResourceVariable&) = delete;
ResourceVariable& operator=(const ResourceVariable&) = delete;
~ResourceVariable() override;
TfLiteStatus AssignFrom(const TfLiteTensor* tensor);
TfLiteTensor* GetTensor() { return is_initialized_ ? &tensor_ : nullptr; }
bool IsInitialized() override { return is_initialized_; }
size_t GetMemoryUsage() override {
return is_initialized_ ? tensor_.bytes : 0;
}
protected:
TfLiteTensor tensor_;
bool is_initialized_ = false;
};
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id);
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id);
bool IsBuiltinResource(const TfLiteTensor* tensor);
}
}
#endif
#include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <cstdlib>
#include <cstring>
#include <map>
#include <memory>
#include "tensorflow/lite/core/c/c_api_types.h"
namespace tflite {
namespace resource {
ResourceVariable::ResourceVariable() {
memset(&tensor_, 0, sizeof(TfLiteTensor));
}
ResourceVariable::ResourceVariable(ResourceVariable&& other) {
tensor_ = other.tensor_;
is_initialized_ = other.is_initialized_;
memset(&other.tensor_, 0, sizeof(TfLiteTensor));
other.is_initialized_ = false;
}
ResourceVariable::~ResourceVariable() {
if (is_initialized_) {
free(tensor_.data.raw);
if (tensor_.dims) {
TfLiteIntArrayFree(tensor_.dims);
}
}
}
TfLiteStatus ResourceVariable::AssignFrom(const TfLiteTensor* tensor) {
char* old_raw = tensor_.data.raw;
size_t old_bytes = tensor_.bytes;
TfLiteIntArray* old_dims = tensor_.dims;
memset(&tensor_, 0, sizeof(tensor_));
tensor_.name = "ResourceVariable";
tensor_.allocation_type = kTfLiteDynamic;
tensor_.type = tensor->type;
tensor_.params = tensor->params;
tensor_.quantization = tensor->quantization;
if (TfLiteIntArrayEqual(old_dims, tensor->dims)) {
tensor_.dims = old_dims;
} else {
TfLiteIntArrayFree(old_dims);
tensor_.dims = TfLiteIntArrayCopy(tensor->dims);
}
tensor_.data.raw = old_raw;
if (old_bytes != tensor->bytes) {
TfLiteTensorRealloc(tensor->bytes, &tensor_);
} else {
tensor_.bytes = old_bytes;
}
memcpy(tensor_.data.raw, tensor->data.raw, tensor_.bytes);
is_initialized_ = true;
return kTfLiteOk;
}
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id) {
if (resources->count(resource_id) != 0) {
return;
}
resources->emplace(resource_id, std::make_unique<ResourceVariable>());
}
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id) {
auto it = resources->find(resource_id);
if (it != resources->end()) {
return static_cast<ResourceVariable*>(it->second.get());
}
return nullptr;
}
bool IsBuiltinResource(const TfLiteTensor* tensor) {
return tensor && tensor->type == kTfLiteResource &&
tensor->delegate == nullptr;
}
}
} | #include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace resource {
void InitTensor(const std::vector<int>& shape, TfLiteAllocationType alloc_type,
float default_value, TfLiteTensor* tensor) {
memset(tensor, 0, sizeof(TfLiteTensor));
int num_elements = 1;
for (auto dim : shape) num_elements *= dim;
if (shape.empty()) num_elements = 0;
float* buf = static_cast<float*>(malloc(sizeof(float) * num_elements));
for (int i = 0; i < num_elements; ++i) buf[i] = default_value;
const int bytes = num_elements * sizeof(buf[0]);
auto* dims = ConvertArrayToTfLiteIntArray(shape.size(), shape.data());
TfLiteTensorReset(TfLiteType::kTfLiteFloat32, nullptr, dims, {},
reinterpret_cast<char*>(buf), bytes, alloc_type, nullptr,
false, tensor);
}
TEST(ResourceTest, NonDynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, DynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteDynamic, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, AssignSameSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {1};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({1}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(ResourceTest, AssignDifferentSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {2};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float) * 2, value->bytes);
ASSERT_THAT(value, DimsAre({2}));
EXPECT_EQ(4.0f, value->data.f[0]);
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(IsBuiltinResource, IsBuiltinResourceTest) {
TfLiteTensor tensor;
tensor.type = kTfLiteResource;
tensor.delegate = nullptr;
EXPECT_TRUE(IsBuiltinResource(&tensor));
EXPECT_FALSE(IsBuiltinResource(nullptr));
tensor.type = kTfLiteFloat32;
EXPECT_FALSE(IsBuiltinResource(&tensor));
tensor.type = kTfLiteResource;
TfLiteDelegate delegate;
tensor.delegate = &delegate;
EXPECT_FALSE(IsBuiltinResource(&tensor));
}
TEST(ResourceTest, GetMemoryUsage) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {100};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(100 * sizeof(float), value->bytes);
ASSERT_THAT(value, DimsAre({100}));
EXPECT_EQ(1.0f, value->data.f[0]);
EXPECT_EQ(100 * sizeof(float), var.GetMemoryUsage());
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
}
} |
215 | #ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
#include <cstdint>
#include "leveldb/export.h"
#include "leveldb/slice.h"
namespace leveldb {
class LEVELDB_EXPORT Cache;
LEVELDB_EXPORT Cache* NewLRUCache(size_t capacity);
class LEVELDB_EXPORT Cache {
public:
Cache() = default;
Cache(const Cache&) = delete;
Cache& operator=(const Cache&) = delete;
virtual ~Cache();
struct Handle {};
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) = 0;
virtual Handle* Lookup(const Slice& key) = 0;
virtual void Release(Handle* handle) = 0;
virtual void* Value(Handle* handle) = 0;
virtual void Erase(const Slice& key) = 0;
virtual uint64_t NewId() = 0;
virtual void Prune() {}
virtual size_t TotalCharge() const = 0;
};
}
#endif
#include "leveldb/cache.h"
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/hash.h"
#include "util/mutexlock.h"
namespace leveldb {
Cache::~Cache() {}
namespace {
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge;
size_t key_length;
bool in_cache;
uint32_t refs;
uint32_t hash;
char key_data[1];
Slice key() const {
assert(next != this);
return Slice(key_data, key_length);
}
};
class HandleTable {
public:
HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
return *FindPointer(key, hash);
}
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
if (old == nullptr) {
++elems_;
if (elems_ > length_) {
Resize();
}
}
return old;
}
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
return result;
}
private:
uint32_t length_;
uint32_t elems_;
LRUHandle** list_;
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void Resize() {
uint32_t new_length = 4;
while (new_length < elems_) {
new_length *= 2;
}
LRUHandle** new_list = new LRUHandle*[new_length];
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
*ptr = h;
h = next;
count++;
}
}
assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
};
class LRUCache {
public:
LRUCache();
~LRUCache();
void SetCapacity(size_t capacity) { capacity_ = capacity; }
Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
void Prune();
size_t TotalCharge() const {
MutexLock l(&mutex_);
return usage_;
}
private:
void LRU_Remove(LRUHandle* e);
void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
size_t capacity_;
mutable port::Mutex mutex_;
size_t usage_ GUARDED_BY(mutex_);
LRUHandle lru_ GUARDED_BY(mutex_);
LRUHandle in_use_ GUARDED_BY(mutex_);
HandleTable table_ GUARDED_BY(mutex_);
};
LRUCache::LRUCache() : capacity_(0), usage_(0) {
lru_.next = &lru_;
lru_.prev = &lru_;
in_use_.next = &in_use_;
in_use_.prev = &in_use_;
}
LRUCache::~LRUCache() {
assert(in_use_.next == &in_use_);
for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
assert(e->refs == 1);
Unref(e);
e = next;
}
}
void LRUCache::Ref(LRUHandle* e) {
if (e->refs == 1 && e->in_cache) {
LRU_Remove(e);
LRU_Append(&in_use_, e);
}
e->refs++;
}
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
if (e->refs == 0) {
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
} else if (e->in_cache && e->refs == 1) {
LRU_Remove(e);
LRU_Append(&lru_, e);
}
}
void LRUCache::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev;
e->prev->next = e->next;
}
void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
e->next = list;
e->prev = list->prev;
e->prev->next = e;
e->next->prev = e;
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
Ref(e);
}
return reinterpret_cast<Cache::Handle*>(e);
}
void LRUCache::Release(Cache::Handle* handle) {
MutexLock l(&mutex_);
Unref(reinterpret_cast<LRUHandle*>(handle));
}
Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key,
void* value)) {
MutexLock l(&mutex_);
LRUHandle* e =
reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
e->in_cache = false;
e->refs = 1;
std::memcpy(e->key_data, key.data(), key.size());
if (capacity_ > 0) {
e->refs++;
e->in_cache = true;
LRU_Append(&in_use_, e);
usage_ += charge;
FinishErase(table_.Insert(e));
} else {
e->next = nullptr;
}
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->refs == 1);
bool erased = FinishErase(table_.Remove(old->key(), old->hash));
if (!erased) {
assert(erased);
}
}
return reinterpret_cast<Cache::Handle*>(e);
}
bool LRUCache::FinishErase(LRUHandle* e) {
if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
e->in_cache = false;
usage_ -= e->charge;
Unref(e);
}
return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
FinishErase(table_.Remove(key, hash));
}
void LRUCache::Prune() {
MutexLock l(&mutex_);
while (lru_.next != &lru_) {
LRUHandle* e = lru_.next;
assert(e->refs == 1);
bool erased = FinishErase(table_.Remove(e->key(), e->hash));
if (!erased) {
assert(erased);
}
}
}
static const int kNumShardBits = 4;
static const int kNumShards = 1 << kNumShardBits;
class ShardedLRUCache : public Cache {
private:
LRUCache shard_[kNumShards];
port::Mutex id_mutex_;
uint64_t last_id_;
static inline uint32_t HashSlice(const Slice& s) {
return Hash(s.data(), s.size(), 0);
}
static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
~ShardedLRUCache() override {}
Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
Handle* Lookup(const Slice& key) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Lookup(key, hash);
}
void Release(Handle* handle) override {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
void Erase(const Slice& key) override {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
void* Value(Handle* handle) override {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
uint64_t NewId() override {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
void Prune() override {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
size_t TotalCharge() const override {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
}
return total;
}
};
}
Cache* NewLRUCache(size_t capacity) { return new ShardedLRUCache(capacity); }
} | #include "leveldb/cache.h"
#include <vector>
#include "gtest/gtest.h"
#include "util/coding.h"
namespace leveldb {
static std::string EncodeKey(int k) {
std::string result;
PutFixed32(&result, k);
return result;
}
static int DecodeKey(const Slice& k) {
assert(k.size() == 4);
return DecodeFixed32(k.data());
}
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); }
class CacheTest : public testing::Test {
public:
static void Deleter(const Slice& key, void* v) {
current_->deleted_keys_.push_back(DecodeKey(key));
current_->deleted_values_.push_back(DecodeValue(v));
}
static constexpr int kCacheSize = 1000;
std::vector<int> deleted_keys_;
std::vector<int> deleted_values_;
Cache* cache_;
CacheTest() : cache_(NewLRUCache(kCacheSize)) { current_ = this; }
~CacheTest() { delete cache_; }
int Lookup(int key) {
Cache::Handle* handle = cache_->Lookup(EncodeKey(key));
const int r = (handle == nullptr) ? -1 : DecodeValue(cache_->Value(handle));
if (handle != nullptr) {
cache_->Release(handle);
}
return r;
}
void Insert(int key, int value, int charge = 1) {
cache_->Release(cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter));
}
Cache::Handle* InsertAndReturnHandle(int key, int value, int charge = 1) {
return cache_->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter);
}
void Erase(int key) { cache_->Erase(EncodeKey(key)); }
static CacheTest* current_;
};
CacheTest* CacheTest::current_;
TEST_F(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(100));
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
ASSERT_EQ(102, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
}
TEST_F(CacheTest, Erase) {
Erase(200);
ASSERT_EQ(0, deleted_keys_.size());
Insert(100, 101);
Insert(200, 201);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1, deleted_keys_.size());
}
TEST_F(CacheTest, EntriesArePinned) {
Insert(100, 101);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
Insert(100, 102);
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
ASSERT_EQ(0, deleted_keys_.size());
cache_->Release(h1);
ASSERT_EQ(1, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(1, deleted_keys_.size());
cache_->Release(h2);
ASSERT_EQ(2, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[1]);
ASSERT_EQ(102, deleted_values_[1]);
}
TEST_F(CacheTest, EvictionPolicy) {
Insert(100, 101);
Insert(200, 201);
Insert(300, 301);
Cache::Handle* h = cache_->Lookup(EncodeKey(300));
for (int i = 0; i < kCacheSize + 100; i++) {
Insert(1000 + i, 2000 + i);
ASSERT_EQ(2000 + i, Lookup(1000 + i));
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(301, Lookup(300));
cache_->Release(h);
}
TEST_F(CacheTest, UseExceedsCacheSize) {
std::vector<Cache::Handle*> h;
for (int i = 0; i < kCacheSize + 100; i++) {
h.push_back(InsertAndReturnHandle(1000 + i, 2000 + i));
}
for (int i = 0; i < h.size(); i++) {
ASSERT_EQ(2000 + i, Lookup(1000 + i));
}
for (int i = 0; i < h.size(); i++) {
cache_->Release(h[i]);
}
}
TEST_F(CacheTest, HeavyEntries) {
const int kLight = 1;
const int kHeavy = 10;
int added = 0;
int index = 0;
while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
Insert(index, 1000 + index, weight);
added += weight;
index++;
}
int cached_weight = 0;
for (int i = 0; i < index; i++) {
const int weight = (i & 1 ? kLight : kHeavy);
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
ASSERT_EQ(1000 + i, r);
}
}
ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST_F(CacheTest, NewId) {
uint64_t a = cache_->NewId();
uint64_t b = cache_->NewId();
ASSERT_NE(a, b);
}
TEST_F(CacheTest, Prune) {
Insert(1, 100);
Insert(2, 200);
Cache::Handle* handle = cache_->Lookup(EncodeKey(1));
ASSERT_TRUE(handle);
cache_->Prune();
cache_->Release(handle);
ASSERT_EQ(100, Lookup(1));
ASSERT_EQ(-1, Lookup(2));
}
TEST_F(CacheTest, ZeroSizeCache) {
delete cache_;
cache_ = NewLRUCache(0);
Insert(1, 100);
ASSERT_EQ(-1, Lookup(1));
}
} |
216 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_PREPROCESSOR_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_PREPROCESSOR_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace gl {
enum class RewriteStatus {
SUCCESS = 0,
NOT_RECOGNIZED = 1,
ERROR = 2,
};
class InlineRewrite {
public:
virtual ~InlineRewrite() = default;
virtual RewriteStatus Rewrite(absl::string_view input,
std::string* output) = 0;
};
class TextPreprocessor {
public:
TextPreprocessor(char inline_delimiter, bool keep_unknown_rewrites)
: inline_delimiter_(inline_delimiter),
keep_unknown_rewrites_(keep_unknown_rewrites) {}
void AddRewrite(InlineRewrite* rewrite) {
inline_rewrites_.push_back(rewrite);
}
absl::Status Rewrite(const std::string& input, std::string* output);
private:
const char inline_delimiter_;
const bool keep_unknown_rewrites_;
std::vector<InlineRewrite*> inline_rewrites_;
};
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::string_view FindInlineBlock(absl::string_view s, char delimiter) {
size_t start = s.find(delimiter);
if (start != absl::string_view::npos) {
size_t end = s.find(delimiter, start + 1);
if (end != std::string::npos) {
return s.substr(start, end - start + 1);
}
return s.substr(start, 1);
}
return s.substr(s.size(), 0);
}
absl::string_view PastSubstr(absl::string_view s, absl::string_view subs) {
return s.substr(subs.data() + subs.size() - s.data());
}
}
absl::Status TextPreprocessor::Rewrite(const std::string& input,
std::string* output) {
absl::string_view s = input;
std::string result;
while (true) {
absl::string_view inline_block = FindInlineBlock(s, inline_delimiter_);
result.append(s.data(), inline_block.data() - s.data());
if (inline_block.empty()) {
break;
}
if (inline_block.size() == 1) {
return absl::NotFoundError("Unable to find end of inline block");
}
s = PastSubstr(s, inline_block);
bool processed = false;
for (auto& rewrite : inline_rewrites_) {
if (processed) {
break;
}
switch (rewrite->Rewrite(inline_block.substr(1, inline_block.size() - 2),
&result)) {
case RewriteStatus::NOT_RECOGNIZED:
break;
case RewriteStatus::SUCCESS:
processed = true;
break;
case RewriteStatus::ERROR:
return absl::InternalError(absl::StrCat("Error while rewriting '",
inline_block, "': ", result));
}
}
if (!processed) {
if (!keep_unknown_rewrites_) {
return absl::NotFoundError(absl::StrCat(
"Didn't find inline rewrite for '", inline_block, "'"));
}
absl::StrAppend(&result, inline_block);
}
}
*output = std::move(result);
return absl::OkStatus();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class AccuInlineRewrite : public InlineRewrite {
public:
explicit AccuInlineRewrite(std::vector<std::string>* blocks)
: blocks_(blocks) {}
RewriteStatus Rewrite(absl::string_view input, std::string* output) final {
blocks_->push_back(std::string(input.data(), input.size()));
output->append("r:");
output->append(input.data(), input.size());
return RewriteStatus::SUCCESS;
}
std::vector<std::string>* blocks_;
};
std::vector<std::string> ParseInlines(const std::string& text) {
std::vector<std::string> blocks;
TextPreprocessor preprocessor('$', false);
AccuInlineRewrite rewrite(&blocks);
preprocessor.AddRewrite(&rewrite);
std::string discard;
preprocessor.Rewrite(text, &discard).IgnoreError();
return blocks;
}
TEST(Preprocessor, CornerCases) {
EXPECT_THAT(ParseInlines(""), testing::ElementsAre());
EXPECT_THAT(ParseInlines("text text"), testing::ElementsAre());
EXPECT_THAT(ParseInlines("$$"), testing::ElementsAre(""));
}
TEST(Preprocessor, One) {
EXPECT_THAT(ParseInlines("$text$"), testing::ElementsAre("text"));
EXPECT_THAT(ParseInlines(" $text$ "), testing::ElementsAre("text"));
}
TEST(Preprocessor, More) {
EXPECT_THAT(ParseInlines("Test $inline1$\n$inline2$ test $inline3$ "),
testing::ElementsAre("inline1", "inline2", "inline3"));
}
std::string RewriteInlines(const std::string& text) {
std::vector<std::string> blocks;
TextPreprocessor preprocessor('$', false);
AccuInlineRewrite rewrite(&blocks);
preprocessor.AddRewrite(&rewrite);
std::string out;
preprocessor.Rewrite(text, &out).IgnoreError();
return out;
}
TEST(Preprocessor, RewriteCornerCases) {
EXPECT_EQ(RewriteInlines(""), "");
EXPECT_EQ(RewriteInlines("text text"), "text text");
EXPECT_EQ(RewriteInlines("$$"), "r:");
}
TEST(Preprocessor, RewriteOne) {
EXPECT_EQ(RewriteInlines("$text$"), "r:text");
EXPECT_EQ(RewriteInlines(" $text$ "), " r:text ");
}
TEST(Preprocessor, RewriteMore) {
EXPECT_EQ(RewriteInlines("Test $inline1$\n$inline2$ test $inline3$ "),
"Test r:inline1\nr:inline2 test r:inline3 ");
}
class SingleRewrite : public InlineRewrite {
public:
RewriteStatus Rewrite(absl::string_view input, std::string* output) final {
if (input == "foo") {
output->append("bla");
return RewriteStatus::SUCCESS;
}
return RewriteStatus::NOT_RECOGNIZED;
}
std::vector<std::string>* blocks_;
};
TEST(Preprocessor, KeepUnknownRewrites) {
TextPreprocessor preprocessor('$', true);
SingleRewrite rewrite;
preprocessor.AddRewrite(&rewrite);
std::string out;
ASSERT_TRUE(preprocessor.Rewrite("Good morning, $name$! $foo$", &out).ok());
EXPECT_EQ("Good morning, $name$! bla", out);
}
TEST(Preprocessor, KeepUnknownRewrites_Fail) {
TextPreprocessor preprocessor('$', false);
SingleRewrite rewrite;
preprocessor.AddRewrite(&rewrite);
std::string out;
EXPECT_FALSE(preprocessor.Rewrite("Good morning, $name$! $foo$", &out).ok());
}
}
}
}
} |
217 | #ifndef TENSORFLOW_LITE_CORE_ASYNC_INTEROP_ATTRIBUTE_MAP_INTERNAL_H_
#define TENSORFLOW_LITE_CORE_ASYNC_INTEROP_ATTRIBUTE_MAP_INTERNAL_H_
#include <cstdint>
#include <map>
#include <string>
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/interop/variant.h"
namespace tflite {
namespace interop {
class AttributeMap {
public:
explicit AttributeMap(TfLiteAttrMapType type) : type_(type) {}
using KeyT = uint32_t;
using CustomKeyT = std::string;
using ValueT = tflite::interop::Variant;
using ContainerT = std::map<KeyT, ValueT>;
using CustomContainerT = std::map<CustomKeyT, ValueT>;
bool IsBufferAttributeMap() const {
return type_ == kTfLiteAttrMapTypeBuffer;
}
bool IsSyncAttributeMap() const { return type_ == kTfLiteAttrMapTypeSync; }
bool ReconcileAttributes(const AttributeMap* other, AttributeMap* merged,
AttributeMap* conflict) const;
bool CheckAttributeCoverage(const AttributeMap* other,
AttributeMap* conflict) const;
template <typename AttrKeyT, typename ValueT>
bool GetAttr(AttrKeyT key, ValueT* value) const {
if (auto it = attrs_.find(static_cast<uint32_t>(key)); it != attrs_.end()) {
if (auto* v = it->second.Get<ValueT>(); v != nullptr) {
*value = *v;
return true;
}
}
return false;
}
template <typename AttrKeyT, typename ValueT>
void SetAttr(AttrKeyT key, ValueT value) {
attrs_.insert_or_assign(static_cast<KeyT>(key), value);
}
template <typename ValueT>
bool GetCustomAttr(CustomKeyT key, ValueT* value) const {
if (auto it = custom_attrs_.find(key); it != custom_attrs_.end()) {
if (auto* v = it->second.Get<ValueT>(); v != nullptr) {
*value = *v;
return true;
}
}
return false;
}
template <typename ValueT>
void SetCustomAttr(CustomKeyT key, ValueT value) {
custom_attrs_.insert_or_assign(key, value);
}
private:
TfLiteAttrMapType type_;
ContainerT attrs_;
CustomContainerT custom_attrs_;
};
}
}
struct TfLiteAttributeMap {
explicit TfLiteAttributeMap(TfLiteAttrMapType type) : impl(type) {}
tflite::interop::AttributeMap impl;
};
#endif
#include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include "tensorflow/lite/core/async/interop/reconcile_fns.h"
namespace tflite {
namespace interop {
bool AttributeMap::ReconcileAttributes(const AttributeMap* other,
AttributeMap* merged,
AttributeMap* conflict) const {
if (other == nullptr || merged == nullptr) return false;
if (type_ != other->type_) return false;
merged->type_ = type_;
if (conflict) conflict->type_ = type_;
return tflite::interop::ReconcileGeneralAttributeKeys(
type_, &attrs_, &other->attrs_, &merged->attrs_,
conflict ? &conflict->attrs_ : nullptr);
}
bool AttributeMap::CheckAttributeCoverage(const AttributeMap* other,
AttributeMap* conflict) const {
if (other == nullptr) return false;
if (type_ != other->type_) return false;
if (conflict) conflict->type_ = type_;
return tflite::interop::CheckGeneralAttributeKeysCoverage(
type_, &attrs_, &other->attrs_, conflict ? &conflict->attrs_ : nullptr);
}
}
} | #include "tensorflow/lite/core/async/interop/attribute_map_internal.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite {
namespace interop {
namespace {
TEST(AttributeMapTest, TypeTest) {
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_TRUE(attrs.IsBufferAttributeMap());
EXPECT_FALSE(attrs.IsSyncAttributeMap());
}
{
auto attrs = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs.IsSyncAttributeMap());
EXPECT_FALSE(attrs.IsBufferAttributeMap());
}
}
TEST(AttributeMapTest, AccessorTest) {
auto attrs = AttributeMap(kTfLiteAttrMapTypeBuffer);
{
attrs.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
size_t result;
EXPECT_TRUE(attrs.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
{
attrs.SetCustomAttr("Foo", 12);
int result;
EXPECT_FALSE(attrs.GetCustomAttr("Bar", &result));
EXPECT_TRUE(attrs.GetCustomAttr("Foo", &result));
EXPECT_EQ(12, result);
}
}
TEST(AttributeMapTest, ReconcileFailDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(
attrs1.ReconcileAttributes(&attrs2, &attrs3, nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
}
TEST(AttributeMapTest, NullptrTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(nullptr, &attrs2,
nullptr));
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, nullptr,
nullptr));
EXPECT_FALSE(attrs1.CheckAttributeCoverage(nullptr,
nullptr));
}
TEST(AttributeMapTest, ReconcileDifferentTypes) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeBuffer);
EXPECT_FALSE(attrs1.ReconcileAttributes(&attrs2, &attrs3,
nullptr));
}
TEST(AttributeMapTest, ReconcileTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
auto attrs4 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.ReconcileAttributes(&attrs2, &attrs3, &attrs4));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
EXPECT_TRUE(attrs4.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(attrs3.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(8, result);
}
TEST(AttributeMapTest, CoverageTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(8));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto attrs3 = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_TRUE(attrs1.CheckAttributeCoverage(&attrs2, &attrs3));
EXPECT_TRUE(attrs3.IsBufferAttributeMap());
}
TEST(AttributeMapTest, CoverageFailedTest) {
auto attrs1 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs1.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(10));
auto attrs2 = AttributeMap(kTfLiteAttrMapTypeBuffer);
attrs2.SetAttr(kTfLiteBufferAttrKeyAlignment, size_t(4));
auto conflict = AttributeMap(kTfLiteAttrMapTypeSync);
EXPECT_FALSE(attrs1.CheckAttributeCoverage(&attrs2, &conflict));
EXPECT_TRUE(conflict.IsBufferAttributeMap());
size_t result;
EXPECT_TRUE(conflict.GetAttr(kTfLiteBufferAttrKeyAlignment, &result));
EXPECT_EQ(4, result);
}
}
}
} |
218 | #ifndef XLA_TOOLS_HLO_EXPAND_H_
#define XLA_TOOLS_HLO_EXPAND_H_
#include <string>
#include <vector>
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/tsl/util/command_line_flags.h"
namespace xla {
struct HloExpandConfig {
bool help{false};
std::string input_format;
std::string output_file;
std::string output_format;
bool batch_norm_expander{false};
bool expand_all{false};
bool rng_bit_generator_expander{false};
bool batch_norm_grad_expander{false};
bool batch_norm_inference_expander{false};
bool batch_norm_training_expander{false};
bool cholesky_expander{false};
bool rng_expander{false};
bool rng_bit_generator_philox_expander{false};
bool rng_bit_generator_three_fry_expander{false};
bool triangular_solve_expander{false};
bool spmd_expander{false};
bool verify_hlo{false};
};
void AddPassesToPipeline(xla::HloExpandConfig& config,
xla::HloPassPipeline& pipeline,
const xla::HloModuleConfig& hlo_module_config);
std::vector<tsl::Flag> GetFlags(xla::HloExpandConfig& config);
void ParseCompoundFlags(xla::HloExpandConfig& config);
}
#endif
#include "xla/tools/hlo_expand.h"
#include <vector>
#include "xla/service/batchnorm_expander.h"
#include "xla/service/cholesky_expander.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/rng_bit_generator_expander.h"
#include "xla/service/rng_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/service/triangular_solve_expander.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/xla_data.pb.h"
namespace xla {
void AddPassesToPipeline(HloExpandConfig& config, HloPassPipeline& pipeline,
const HloModuleConfig& hlo_module_config) {
if (config.batch_norm_grad_expander || config.batch_norm_inference_expander ||
config.batch_norm_training_expander) {
pipeline.AddPass<xla::BatchNormExpander>(
config.batch_norm_training_expander,
config.batch_norm_inference_expander,
config.batch_norm_grad_expander);
}
if (config.cholesky_expander) {
pipeline.AddPass<xla::CholeskyExpander>();
}
if (config.rng_expander) {
pipeline.AddPass<xla::RngExpander>();
}
if (config.rng_bit_generator_philox_expander) {
pipeline.AddPass<xla::RngBitGeneratorExpander>(
xla::RandomAlgorithm::RNG_PHILOX);
}
if (config.rng_bit_generator_three_fry_expander) {
pipeline.AddPass<xla::RngBitGeneratorExpander>(
xla::RandomAlgorithm::RNG_THREE_FRY);
}
if (config.triangular_solve_expander) {
pipeline.AddPass<xla::TriangularSolveExpander>();
}
if (config.spmd_expander) {
pipeline.AddPass<ShardingPropagation>(
true, false,
hlo_module_config.allow_spmd_sharding_propagation_to_output(),
hlo_module_config.allow_spmd_sharding_propagation_to_parameters());
pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>(
hlo_module_config.num_partitions(), hlo_module_config.replica_count(),
hlo_module_config.debug_options()
.xla_gpu_threshold_for_windowed_einsum_mib());
}
if (config.verify_hlo) {
pipeline.AddPass<xla::HloVerifier>(false,
false);
}
}
std::vector<tsl::Flag> GetFlags(HloExpandConfig& config) {
return {
tsl::Flag("h", &config.help, "Alias of --help"),
tsl::Flag("help", &config.help, "Display available options"),
tsl::Flag(
"input_format", &config.input_format,
"The format of the input file. If this flag is not specified, it's"
"inferred from the file extension instead. Valid values:\n "
"* hlo|txt : HLO textual format\n"
"* pb : xla::HloProto in binary proto format\n"
"* pbtxt : xla::HloProto in text proto format"),
tsl::Flag("o", &config.output_file, "Alias of --output_file="),
tsl::Flag("output_file", &config.output_file, "Full output file path"),
tsl::Flag("output_format", &config.output_format,
"The format of the output file. Defaults to input_format. "
"Valid values:\n"
"* hlo|txt : HLO textual format\n"
"* pb : xla::HloProto in binary proto format\n"
"* pbtxt : xla::HloProto in text proto format"),
tsl::Flag("batch_norm_expander", &config.batch_norm_expander,
"Overrides and expands batch_norm_grad, batch_norm_inference, "
"and batch_norm_training ops"),
tsl::Flag("batch_norm_grad_expander", &config.batch_norm_grad_expander,
"Expands batch_norm_grad op"),
tsl::Flag("batch_norm_inference_expander",
&config.batch_norm_inference_expander,
"Expands batch_norm_inference_grad op"),
tsl::Flag("batch_norm_training_expander",
&config.batch_norm_training_expander,
"Expands batch_norm_training_grad op"),
tsl::Flag("cholesky_expander", &config.cholesky_expander,
"Expands cholesky op"),
tsl::Flag("spmd_expander", &config.spmd_expander,
"Expands SPMD sharding"),
tsl::Flag("expand_all", &config.expand_all,
"Overrides and expands all supported passes below"),
tsl::Flag("rng_expander", &config.rng_expander, "Expands rng op"),
tsl::Flag(
"rng_bit_generator_expander", &config.rng_bit_generator_expander,
"Overrides and expands rng_bit_generator op on all prng algorithms"),
tsl::Flag("rng_bit_generator_philox_expander",
&config.rng_bit_generator_philox_expander,
"Expands rng_bit_generator op using philox prng algorithm"),
tsl::Flag("rng_bit_generator_three_fry_expander",
&config.rng_bit_generator_three_fry_expander,
"Expands rng_bit_generator op using three_fry prng algorithm"),
tsl::Flag("triangular_solve_expander", &config.triangular_solve_expander,
"Expands triangular_solve op"),
tsl::Flag("verify_hlo", &config.verify_hlo,
"Run HLO verifier after passes"),
};
}
void ParseCompoundFlags(HloExpandConfig& config) {
config.batch_norm_grad_expander |=
config.expand_all || config.batch_norm_expander;
config.batch_norm_inference_expander |=
config.expand_all || config.batch_norm_expander;
config.batch_norm_training_expander |=
config.expand_all || config.batch_norm_expander;
config.cholesky_expander |= config.expand_all;
config.rng_bit_generator_philox_expander |=
config.expand_all || config.rng_bit_generator_expander;
config.rng_bit_generator_three_fry_expander |=
config.expand_all || config.rng_bit_generator_expander;
config.rng_expander |= config.expand_all;
config.triangular_solve_expander |= config.expand_all;
}
} | #include <string>
#include <vector>
#include <gmock/gmock.h>
#include "tsl/platform/path.h"
#include "tsl/platform/subprocess.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloExpandTest : public ::testing::Test {
protected:
void HloOpt(std::vector<std::string>& additional_flags) {
std::string hlo_opt_bin =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools", "hlo-expand");
tsl::SubProcess proc;
std::vector<std::string> argv = {hlo_opt_bin};
argv.insert(argv.end(), additional_flags.begin(), additional_flags.end());
proc.SetProgram(hlo_opt_bin, argv);
proc.SetChannelAction(tsl::CHAN_STDOUT, tsl::ACTION_PIPE);
proc.SetChannelAction(tsl::CHAN_STDERR, tsl::ACTION_PIPE);
EXPECT_TRUE(proc.Start());
stdout_output_ = stderr_output_ = "";
int status = proc.Communicate(nullptr, &stdout_output_, &stderr_output_);
#if defined(_WIN32) || defined(_WIN64)
exited_normally_ = (status == 0);
exit_status_ = status;
#else
exited_normally_ = WIFEXITED(status);
exit_status_ = exited_normally_ ? WEXITSTATUS(status) : -1;
#endif
}
std::string stdout_output_;
std::string stderr_output_;
bool exited_normally_ = false;
int exit_status_ = -1;
};
TEST_F(HloExpandTest, CholeskyHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_hlo_string =
R"(HloModule main, entry_computation_layout={()->f64[3,3]{1,0}}
ENTRY %main.3 () -> f64[3,3] {
%constant.1 = f64[3,3]{1,0} constant({ { 1, 2, 3 }, { 2, 20, 26 }, { 3, 26, 70 } })
ROOT %cholesky.2 = f64[3,3]{1,0} cholesky(f64[3,3]{1,0} %constant.1), lower=true
})";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, SpmdHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "spmd.hlo");
std::vector<std::string> additional_flags = {"--spmd_expander", hlo_path};
HloOpt(additional_flags);
const std::string& expected_hlo_string =
R"(HloModule module, entry_computation_layout={(f32[24,64]{1,0}, f32[39296,64]{1,0})->f32[24,19648]{1,0}}, num_partitions=2
ENTRY %entry_spmd (param: f32[24,64], param.1: f32[39296,64]) -> f32[24,19648] {
%param = f32[24,64]{1,0} parameter(0), sharding={replicated}
%lhs.copy.1 = f32[24,64]{1,0} copy(f32[24,64]{1,0} %param)
%param.1 = f32[39296,64]{1,0} parameter(1), sharding={replicated}
%constant = s32[2]{0} constant({0, 19648})
%partition-id = u32[] partition-id()
%dynamic-slice = s32[1]{0} dynamic-slice(s32[2]{0} %constant, u32[] %partition-id), dynamic_slice_sizes={1}
%reshape = s32[] reshape(s32[1]{0} %dynamic-slice)
%constant.1 = s32[] constant(0)
%dynamic-slice.1 = f32[19648,64]{1,0} dynamic-slice(f32[39296,64]{1,0} %param.1, s32[] %reshape, s32[] %constant.1), dynamic_slice_sizes={19648,64}
%rhs.copy.1 = f32[19648,64]{1,0} copy(f32[19648,64]{1,0} %dynamic-slice.1)
ROOT %dot.1 = f32[24,19648]{1,0} dot(f32[24,64]{1,0} %lhs.copy.1, f32[19648,64]{1,0} %rhs.copy.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
})";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, CholeskyExpanderHlo) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path,
"--expand_all"};
HloOpt(additional_flags);
const std::string& expected_hlo_string = "%xla.cholesky_f64";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 0);
EXPECT_THAT(stdout_output_, testing::HasSubstr(expected_hlo_string));
}
TEST_F(HloExpandTest, InvalidArgc) {
std::vector<std::string> additional_flags = {"--input_format=hlo", "foo",
"bar", "baz"};
HloOpt(additional_flags);
const std::string& expected_string =
"Cannot parse more than one argument. See usage below:";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidInputFileExtension) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "foo.bar");
std::vector<std::string> additional_flags = {hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"input_format must be specified as [hlo|pb|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidInputFormat) {
std::vector<std::string> additional_flags = {"--input_format=foo"};
HloOpt(additional_flags);
const std::string& expected_string =
"input_format must be specified as [hlo|pb|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidOutputFileExtension) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::string output_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(),
"tools", "tests", "foo.bar");
std::vector<std::string> additional_flags = {"--input_format=", hlo_path,
"--output_file=" + output_path};
HloOpt(additional_flags);
const std::string& expected_string =
"output_format must be specified as [hlo|pb|pbtxt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidOutputFormat) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=", hlo_path,
"--output_format=foo"};
HloOpt(additional_flags);
const std::string& expected_string =
"output_format must be specified as [hlo|pb|pbtxt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, InvalidFile) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "foo.bar");
std::vector<std::string> additional_flags = {"--input_format=hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string = "Try: hlo-expand --help";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, UnsupportedOutputFormat) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "cholesky.hlo");
std::vector<std::string> additional_flags = {"--input_format=hlo",
"--output_format=pb", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"Printing to stdout must specify supported "
"output_format=[hlo|pbtxt|txt].";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
TEST_F(HloExpandTest, VerificationFailure) {
std::string hlo_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "tools",
"tests", "invalid_concat.hlo");
std::vector<std::string> additional_flags = {"--verify_hlo", hlo_path};
HloOpt(additional_flags);
const std::string& expected_string =
"Cannot concatenate arrays that differ in dimensions";
EXPECT_TRUE(exited_normally_);
EXPECT_EQ(exit_status_, 1);
EXPECT_THAT(stderr_output_, testing::HasSubstr(expected_string));
}
}
} |
219 | #ifndef XLA_PERMUTATION_UTIL_H_
#define XLA_PERMUTATION_UTIL_H_
#include <vector>
#include "absl/types/span.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
bool IsPermutation(absl::Span<const int64_t> permutation);
template <typename Container>
std::vector<typename Container::value_type> Permute(
const Container& input, absl::Span<const int64_t> permutation) {
using T = typename Container::value_type;
absl::Span<const T> data(input);
CHECK_EQ(permutation.size(), data.size());
CHECK(IsPermutation(permutation));
std::vector<T> output(data.size());
for (size_t i = 0; i < permutation.size(); ++i) {
output[i] = data[permutation[i]];
}
return output;
}
template <typename Container>
std::vector<typename Container::value_type> PermuteInverse(
const Container& input, absl::Span<const int64_t> permutation) {
using T = typename Container::value_type;
absl::Span<const T> data(input);
CHECK_EQ(permutation.size(), data.size());
CHECK(IsPermutation(permutation));
std::vector<T> output(data.size());
for (size_t i = 0; i < permutation.size(); ++i) {
output[permutation[i]] = data[i];
}
return output;
}
std::vector<int64_t> InversePermutation(
absl::Span<const int64_t> input_permutation);
std::vector<int64_t> ComposePermutations(absl::Span<const int64_t> p1,
absl::Span<const int64_t> p2);
bool IsIdentityPermutation(absl::Span<const int64_t> permutation);
}
#endif
#include "xla/permutation_util.h"
#include <vector>
#include "absl/container/inlined_vector.h"
namespace xla {
bool IsPermutation(absl::Span<const int64_t> permutation) {
absl::InlinedVector<bool, 8> seen(permutation.size(), false);
for (int64_t p : permutation) {
if (p < 0 || p >= permutation.size() || seen[p]) {
return false;
}
seen[p] = true;
}
return true;
}
std::vector<int64_t> InversePermutation(
absl::Span<const int64_t> input_permutation) {
DCHECK(IsPermutation(input_permutation));
std::vector<int64_t> output_permutation(input_permutation.size(), -1);
for (size_t i = 0; i < input_permutation.size(); ++i) {
output_permutation[input_permutation[i]] = i;
}
return output_permutation;
}
std::vector<int64_t> ComposePermutations(absl::Span<const int64_t> p1,
absl::Span<const int64_t> p2) {
CHECK_EQ(p1.size(), p2.size());
std::vector<int64_t> output;
output.reserve(p1.size());
for (size_t i = 0; i < p1.size(); ++i) {
output.push_back(p1.at(p2.at(i)));
}
return output;
}
bool IsIdentityPermutation(absl::Span<const int64_t> permutation) {
for (int64_t i = 0; i < permutation.size(); ++i) {
if (permutation[i] != i) {
return false;
}
}
return true;
}
} | #include "xla/permutation_util.h"
#include "xla/test.h"
namespace xla {
namespace {
TEST(PermutationUtilTest, IsPermutation) {
EXPECT_TRUE(IsPermutation({}));
EXPECT_TRUE(IsPermutation({0}));
EXPECT_FALSE(IsPermutation({-3}));
EXPECT_TRUE(IsPermutation({0, 1}));
EXPECT_FALSE(IsPermutation({1, 1}));
EXPECT_TRUE(IsPermutation({1, 0}));
EXPECT_TRUE(IsPermutation({3, 1, 0, 2}));
EXPECT_FALSE(IsPermutation({3, 0, 2}));
}
}
} |
220 | #ifndef XLA_SERVICE_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#define XLA_SERVICE_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#include <utility>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ConvertAsyncCollectivesToSync : public HloModulePass {
public:
explicit ConvertAsyncCollectivesToSync(HloPredicate is_nop = {})
: is_nop_(is_nop) {}
absl::string_view name() const override {
return "convert-async-collectives-to-sync";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
virtual absl::Status ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const {
return ReplaceAsyncInstructionsWithSync(computation, async_pairs);
}
static absl::Status ReplaceAsyncInstructionsWithSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>>
async_pairs);
static constexpr char kAsyncCollectiveNameAttributeName[] =
"async_collective_name";
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
HloPredicate is_nop_;
};
}
#endif
#include "xla/service/convert_async_collectives_to_sync.h"
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<HloInstruction*> CreateSyncVariant(HloInstruction* async_start,
HloInstruction* async_done) {
HloInstruction* sync_instruction = nullptr;
HloComputation* computation = async_start->parent();
const HloOpcode async_start_op = async_start->opcode();
switch (async_start_op) {
case HloOpcode::kAllReduceStart: {
auto* async_ar = Cast<HloAllReduceInstruction>(async_start);
sync_instruction =
computation->AddInstruction(HloInstruction::CreateAllReduce(
async_done->shape(), async_ar->operands(), async_ar->to_apply(),
async_ar->device_list(), async_ar->constrain_layout(),
async_ar->channel_id(), async_ar->use_global_device_ids()));
break;
}
case HloOpcode::kAllGatherStart: {
auto* async_ag = Cast<HloAllGatherInstruction>(async_start);
sync_instruction =
computation->AddInstruction(HloInstruction::CreateAllGather(
async_done->shape(), async_ag->operands(),
async_ag->all_gather_dimension(), async_ag->device_list(),
async_ag->constrain_layout(), async_ag->channel_id(),
async_ag->use_global_device_ids()));
break;
}
case HloOpcode::kCollectivePermuteStart: {
auto* async_cp = Cast<HloCollectivePermuteInstruction>(async_start);
TF_RET_CHECK(async_cp->operand_count() == 1);
sync_instruction =
computation->AddInstruction(HloInstruction::CreateCollectivePermute(
async_done->shape(), async_cp->mutable_operand(0),
async_cp->source_target_pairs(), async_cp->channel_id()));
break;
}
case HloOpcode::kAsyncStart: {
auto* as_start = Cast<HloAsyncInstruction>(async_start);
HloInstruction* wrapped = as_start->async_wrapped_instruction();
sync_instruction =
computation->AddInstruction(wrapped->CloneWithNewOperands(
async_done->shape(), as_start->operands()));
break;
}
default:
return Internal("Unexpected async start op %s",
HloOpcodeString(async_start->opcode()));
}
sync_instruction->set_metadata(async_start->metadata());
sync_instruction->CopyBackendConfigFrom(async_start);
TF_RETURN_IF_ERROR(async_done->ReplaceAllUsesWith(sync_instruction));
TF_RETURN_IF_ERROR(async_start->DropAllControlDeps());
TF_RETURN_IF_ERROR(async_done->DropAllControlDeps());
bool is_async_start_removed = false;
auto track_async_start_removed = [&](const HloInstruction* instr) {
is_async_start_removed |= instr == async_start;
};
TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(
async_done, track_async_start_removed));
if (!is_async_start_removed) {
TF_RETURN_IF_ERROR(computation->RemoveInstruction(async_start));
}
return sync_instruction;
}
absl::Status
ConvertAsyncCollectivesToSync::ReplaceAsyncInstructionsWithSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs) {
absl::flat_hash_map<HloInstruction*, HloInstruction*> replaced_ops;
for (auto& [async_start, async_done] : async_pairs) {
TF_ASSIGN_OR_RETURN(HloInstruction * sync,
CreateSyncVariant(async_start, async_done));
FrontendAttributes attributes;
auto& map = *attributes.mutable_map();
map[kAsyncCollectiveNameAttributeName] = async_start->name();
sync->add_frontend_attributes(std::move(attributes));
replaced_ops[async_start] = nullptr;
replaced_ops[async_done] = sync;
}
HloModule* module = computation->parent();
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
std::vector<HloInstruction*> new_sequence;
new_sequence.reserve(sequence.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_ops.find(instr);
if (it != replaced_ops.end()) {
if (it->second != nullptr) {
new_sequence.push_back(it->second);
}
} else {
new_sequence.push_back(instr);
}
}
module->schedule().set_sequence(computation, new_sequence);
return absl::OkStatus();
}
absl::StatusOr<bool> ConvertAsyncCollectivesToSync::RunOnComputation(
HloComputation* computation) {
HloModule* module = computation->parent();
std::vector<std::pair<HloInstruction*, HloInstruction*>> async_pairs;
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
absl::flat_hash_set<HloInstruction*> in_flight_ops;
for (HloInstruction* instruction : sequence.instructions()) {
if (hlo_query::IsAsyncCollectiveStartOp(instruction)) {
in_flight_ops.insert(instruction);
VLOG(3) << "Found async start " << instruction->ToString();
} else if (hlo_query::IsAsyncCollectiveDoneOp(instruction)) {
VLOG(3) << "Found async done " << instruction->ToString();
TF_RET_CHECK(instruction->operand_count() == 1);
HloInstruction* matching_async_start = instruction->mutable_operand(0);
if (in_flight_ops.erase(matching_async_start) == 1) {
async_pairs.push_back({matching_async_start, instruction});
VLOG(3) << "Added pair: {" << matching_async_start->name() << ", "
<< instruction->name();
}
} else if (!in_flight_ops.empty() && (!is_nop_ || !is_nop_(instruction))) {
VLOG(3) << "Found intervening non-NOP instruction "
<< instruction->ToString();
in_flight_ops.clear();
}
}
if (async_pairs.empty()) {
return false;
}
TF_RETURN_IF_ERROR(ConvertAsyncInstructionsToSync(computation, async_pairs));
return true;
}
absl::StatusOr<bool> ConvertAsyncCollectivesToSync::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!module->has_schedule()) {
VLOG(3) << "Skipping as module is not scheduled";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (!module->schedule().is_computation_scheduled(computation)) {
VLOG(3) << "Skipping computation" << computation->name()
<< " as it is not scheduled";
continue;
}
TF_ASSIGN_OR_RETURN(bool computation_changed,
RunOnComputation(computation));
changed |= computation_changed;
}
return changed;
}
} | #include "xla/service/convert_async_collectives_to_sync.h"
#include <memory>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = xla::testing::opcode_matchers;
class ConvertAsyncCollectivesToSyncTest : public HloTestBase {
public:
absl::Status RunPass(HloModule *module, bool expect_change,
HloPredicate is_nop = {}) {
TF_ASSIGN_OR_RETURN(bool changed,
ConvertAsyncCollectivesToSync{is_nop}.Run(module));
EXPECT_EQ(changed, expect_change);
return absl::OkStatus();
}
absl::string_view GetAsyncName(const HloInstruction *inst) {
const auto &map = inst->frontend_attributes().map();
return map.at(
ConvertAsyncCollectivesToSync::kAsyncCollectiveNameAttributeName);
}
HloPredicate is_nop_simple_ =
HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>;
};
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllReduce) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::ReplicaId()));
const auto *ar = Cast<HloAllReduceInstruction>(root);
EXPECT_TRUE(ar->channel_id().has_value());
EXPECT_EQ(ar->channel_id().value(), 3);
EXPECT_EQ(GetAsyncName(ar), "start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}}
id2 = f32[] bitcast(id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllReduce(m::ReplicaId()));
const auto *ar = Cast<HloAllReduceInstruction>(root);
EXPECT_TRUE(ar->channel_id().has_value());
EXPECT_EQ(ar->channel_id().value(), 3);
EXPECT_THAT(ar, m::ReplicaGroups({{0, 1}, {2, 3}}));
EXPECT_EQ(GetAsyncName(ar), "start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
id2 = u32[] add(id, id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), false));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllGather) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
a1 = u32[1, 2] parameter(0)
ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3
ROOT allgather = u32[2,2] all-gather-done(ags)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllGather(m::Parameter(0)));
const auto *ag = Cast<HloAllGatherInstruction>(root);
EXPECT_TRUE(ag->channel_id().has_value());
EXPECT_EQ(ag->channel_id().value(), 3);
EXPECT_EQ(ag->all_gather_dimension(), 0);
EXPECT_EQ(GetAsyncName(ag), "ags");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
p = u32[2] parameter(0)
start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}
ROOT done = u32[2] collective-permute-done(start)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::CollectivePermute(m::Parameter(0)));
const auto *cp = Cast<HloCollectivePermuteInstruction>(root);
EXPECT_THAT(cp, m::SourceTargetPairs({{0, 1}, {1, 0}}));
EXPECT_EQ(GetAsyncName(cp), "start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
reduce_scatter {
p0 = u32[8] parameter(0)
ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}},
dimensions={0}, to_apply=add
}
ENTRY main {
data = u32[8] parameter(0)
rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter
ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::ReduceScatter(m::Parameter(0)));
const auto *rs = Cast<HloReduceScatterInstruction>(root);
EXPECT_THAT(rs, m::ReplicaGroups({{0, 3}, {1, 2}}));
EXPECT_EQ(rs->scatter_dimension(), 0);
EXPECT_EQ(GetAsyncName(rs), "rs-start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, SimpleAllToAll) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
all_to_all {
p0 = u32[2] parameter(0)
ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}}
}
ENTRY test_computation {
a1 = u32[2] parameter(0)
a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all
ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::AllToAll(m::Parameter(0)));
const auto *a2a = Cast<HloAllToAllInstruction>(root);
EXPECT_THAT(a2a, m::ReplicaGroups({{0, 1}, {2, 3}}));
EXPECT_TRUE(a2a->split_dimension().has_value());
EXPECT_EQ(a2a->split_dimension().value(), 0);
EXPECT_EQ(GetAsyncName(a2a), "a2a-start");
}
TEST_F(ConvertAsyncCollectivesToSyncTest, ControlDeps) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
done1 = u32[] all-reduce-done(start1)
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1}
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduce(), m::AllReduce()));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done1 = u32[] all-reduce-done(start1)
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduce(), m::AllReduce()));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduce(), m::AllReduce()));
}
TEST_F(ConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
id2 = u32[] add(done2, done2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
const HloInstruction *root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, m::Add(m::AllReduceDone(), m::AllReduce()));
}
}
} |
221 | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUSION_UTILS_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_FUSION_UTILS_H_
#include <functional>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
using SetFunctionSignatureFn = std::function<void(
const OpDef& first_function_signature,
const OpDef& second_function_signature, OpDef* fused_function_signature)>;
using StringCollection = gtl::InlinedVector<string, 2>;
using SetInputFn =
std::function<string(const StringCollection& first_function_inputs,
const StringCollection& second_function_inputs,
const StringCollection& parent_outputs, int arg_num)>;
using SetOutputFn =
std::function<void(const protobuf::Map<string, string>& parent_ret,
const protobuf::Map<string, string>& second_function_ret,
protobuf::Map<string, string>* fused_ret)>;
using SetNodesFn = std::function<void(
const FunctionDef& first_function, const FunctionDef& second_function,
FunctionDef* fused_function, FunctionDefLibrary* library)>;
void MergeNodes(const FunctionDef& first_function,
const FunctionDef& second_function, FunctionDef* fused_function,
FunctionDefLibrary* library);
bool CanCompose(const OpDef& first_signature, const OpDef& second_signature);
void ComposeSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature);
string ComposeInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num);
void ComposeOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret);
void CombineSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature);
void CombineOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret);
bool HasSameSignature(const OpDef& first_signature,
const OpDef& second_signature);
void SameSignature(const OpDef& first_signature, const OpDef& second_signature,
OpDef* fused_signature);
string SameInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num);
void LazyConjunctionOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret);
void LazyConjunctionNodes(const FunctionDef& first_function,
const FunctionDef& second_function,
FunctionDef* fused_function,
FunctionDefLibrary* library);
FunctionDef* FuseFunctions(
const FunctionDef& first_function, const FunctionDef& second_function,
StringPiece fused_name_prefix, const SetFunctionSignatureFn& set_signature,
const SetInputFn& set_input, const SetOutputFn& set_output,
const SetNodesFn& set_nodes, FunctionDefLibrary* library);
}
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
namespace {
constexpr char kControlInputPrefix[] = "^";
bool IsControlInput(const string& node_input) {
return absl::StartsWith(node_input, kControlInputPrefix);
}
string StripControlInputNotation(const string& node_input) {
return string(absl::StripPrefix(node_input, kControlInputPrefix));
}
string AddControlInputNotation(const string& node_input) {
return absl::StrCat(kControlInputPrefix, node_input);
}
string ParseNodeConnection(const string& name) {
return name.substr(0, name.find(':'));
}
string ParseOutputNode(const string& name) {
if (name.find(':') == string::npos) return {};
return name.substr(name.find(':'), string::npos);
}
string GetOutputNode(const FunctionDef& function, int output_idx) {
const auto& ret_output_name =
function.signature().output_arg(output_idx).name();
return function.ret().at(ret_output_name);
}
string& GetMutableOutputNode(FunctionDef* function, int output_idx) {
const auto& ret_output_name =
function->signature().output_arg(output_idx).name();
return function->mutable_ret()->at(ret_output_name);
}
template <typename Iterable>
StringCollection GetNames(const Iterable& iterable, int allocate_size) {
StringCollection names;
names.reserve(allocate_size);
for (auto& arg : iterable) names.push_back(arg.name());
return names;
}
template <typename Iterable>
gtl::FlatSet<string> GetNodeNamesSet(const Iterable& nodes) {
gtl::FlatSet<string> names;
for (const auto& node : nodes) {
CHECK(gtl::InsertIfNotPresent(&names, node.name()))
<< "Functions should have unique node names. Node with name "
<< node.name() << " already exists";
}
return names;
}
template <typename Iterable>
gtl::FlatMap<string, string> GetUniqueNames(const Iterable& first_iterable,
const Iterable& second_iterable) {
gtl::FlatMap<string, string> changed_node_names;
const auto first_names = GetNodeNamesSet(first_iterable);
auto second_names = GetNodeNamesSet(first_iterable);
int id = second_iterable.size();
for (const auto& node : second_iterable) {
string name_before = node.name();
string name = name_before;
bool changed_name = false;
while (first_names.count(name) ||
(changed_name && second_names.count(name))) {
name = strings::StrCat(name_before, "/_", id);
changed_name = true;
++id;
}
if (changed_name) {
changed_node_names[name_before] = name;
second_names.insert(std::move(name));
}
}
return changed_node_names;
}
void RenameFunctionNodes(
const FunctionDef& first_function,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse,
protobuf::Map<string, string>* rets_to_fuse,
protobuf::Map<string, string>* control_rets_to_fuse,
protobuf::RepeatedPtrField<string>* control_outputs_to_fuse) {
const gtl::FlatMap<string, string> changed_node_names =
GetUniqueNames(first_function.node_def(), *nodes_to_fuse);
auto updated_name = [&changed_node_names](const string& input) {
string input_node = ParseNodeConnection(input);
auto iter = changed_node_names.find(input_node);
if (iter != changed_node_names.end()) {
return iter->second + ParseOutputNode(input);
}
return input;
};
for (NodeDef& function_node : *nodes_to_fuse) {
if (const string* new_name =
gtl::FindOrNull(changed_node_names, function_node.name())) {
function_node.set_name(*new_name);
}
for (string& input : *function_node.mutable_input()) {
input = updated_name(input);
}
}
for (auto& [unused, ret_node] : *rets_to_fuse) {
ret_node = updated_name(ret_node);
}
protobuf::Map<string, string> new_control_rets_to_fuse;
protobuf::RepeatedPtrField<string> new_control_outputs_to_fuse;
for (const auto& [unused, control_ret_node] : *control_rets_to_fuse) {
string updated_control_ret_node = updated_name(control_ret_node);
new_control_rets_to_fuse.insert(
{updated_control_ret_node, updated_control_ret_node});
*new_control_outputs_to_fuse.Add() = updated_control_ret_node;
}
*control_rets_to_fuse = new_control_rets_to_fuse;
*control_outputs_to_fuse = new_control_outputs_to_fuse;
}
StringCollection GetFunctionInputs(const FunctionDef& function) {
return GetNames(function.signature().input_arg(),
function.signature().input_arg_size());
}
OpDef GetUniqueSignature(const OpDef& first_signature,
const OpDef& second_signature,
protobuf::Map<string, string>* rets_to_fuse,
protobuf::Map<string, string>* control_rets_to_fuse,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse) {
const gtl::FlatMap<string, string> changed_input_names =
GetUniqueNames(first_signature.input_arg(), second_signature.input_arg());
OpDef signature;
signature.set_name(second_signature.name());
for (const auto& input_arg : second_signature.input_arg()) {
auto& input = *signature.add_input_arg();
input = input_arg;
if (const string* new_name =
gtl::FindOrNull(changed_input_names, input.name())) {
input.set_name(*new_name);
}
}
const gtl::FlatMap<string, string> changed_output_names = GetUniqueNames(
first_signature.output_arg(), second_signature.output_arg());
for (const auto& output_arg : second_signature.output_arg()) {
auto& output = *signature.add_output_arg();
output = output_arg;
if (const string* new_name =
gtl::FindOrNull(changed_output_names, output.name())) {
output.set_name(*new_name);
}
}
auto new_rets = [&](const protobuf::Map<string, string>& old_rets) {
protobuf::Map<string, string> new_rets;
for (const auto& ret : old_rets) {
const auto& key = changed_output_names.count(ret.first)
? changed_output_names.at(ret.first)
: ret.first;
const auto& input = ParseNodeConnection(ret.second);
const auto& value =
changed_input_names.count(input)
? changed_input_names.at(input) + ParseOutputNode(ret.second)
: ret.second;
new_rets[key] = value;
}
return new_rets;
};
*rets_to_fuse = new_rets(*rets_to_fuse);
*control_rets_to_fuse = new_rets(*control_rets_to_fuse);
for (NodeDef& function_node : *nodes_to_fuse) {
for (auto& node_input : *function_node.mutable_input()) {
bool is_control_input = IsControlInput(node_input);
const auto& input =
ParseNodeConnection(StripControlInputNotation(node_input));
if (const string* new_name =
gtl::FindOrNull(changed_input_names, input)) {
node_input = *new_name + ParseOutputNode(node_input);
if (is_control_input) {
node_input = AddControlInputNotation(node_input);
}
}
}
}
if (second_signature.is_stateful()) {
signature.set_is_stateful(true);
}
return signature;
}
void FuseFunctionNodes(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs,
const SetInputFn& set_input,
protobuf::RepeatedPtrField<NodeDef>* nodes_to_fuse) {
for (NodeDef& function_node : *nodes_to_fuse) {
for (auto& node_input : *function_node.mutable_input()) {
bool is_control_input = IsControlInput(node_input);
auto parsed_name =
ParseNodeConnection(StripControlInputNotation(node_input));
auto input_it =
std::find(second_inputs.begin(), second_inputs.end(), parsed_name);
if (input_it == second_inputs.end()) continue;
auto arg_num = std::distance(second_inputs.begin(), input_it);
node_input =
set_input(first_inputs, second_inputs, first_outputs, arg_num);
if (is_control_input) {
node_input = AddControlInputNotation(node_input);
}
}
}
}
void FuseReturns(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs,
const SetInputFn& set_input,
protobuf::Map<string, string>* fused_ret) {
for (auto& ret : *fused_ret) {
auto return_input = ParseNodeConnection(ret.second);
auto input_it =
std::find(second_inputs.begin(), second_inputs.end(), return_input);
if (input_it == second_inputs.end()) continue;
auto input_idx = std::distance(second_inputs.begin(), input_it);
ret.second =
set_input(first_inputs, second_inputs, first_outputs, input_idx);
}
}
StringCollection GetFunctionOutputs(const FunctionDef& function) {
const auto number_of_outputs = function.signature().output_arg_size();
StringCollection outputs;
outputs.reserve(number_of_outputs);
for (int output_idx = 0; output_idx < number_of_outputs; output_idx++)
outputs.push_back(GetOutputNode(function, output_idx));
return outputs;
}
FunctionDef* CreateFalsePredicate(
const protobuf::RepeatedPtrField<OpDef_ArgDef>& fake_args,
FunctionDefLibrary* library) {
GraphDef graph;
MutableGraphView graph_view(&graph);
auto* node = graph_utils::AddScalarConstNode(false, &graph_view);
auto* false_predicate = library->add_function();
graph_utils::SetUniqueGraphFunctionName("false_predicate", library,
false_predicate);
int num = 0;
for (const auto& fake_arg : fake_args) {
auto* arg = false_predicate->mutable_signature()->add_input_arg();
arg->set_type(fake_arg.type());
arg->set_name(strings::StrCat("fake_arg", num));
num++;
}
auto* output = false_predicate->mutable_signature()->add_output_arg();
output->set_name("false_out");
output->set_type(DT_BOOL);
(*false_predicate->mutable_ret())["false_out"] = node->name() + ":output:0";
*false_predicate->mutable_node_def() = std::move(*graph.mutable_node());
return false_predicate;
}
void CheckIfCanCompose(const OpDef& first_signature,
const OpDef& second_signature) {
CHECK(CanCompose(first_signature, second_signature))
<< "The number of input arguments of function " << second_signature.name()
<< " should be the same as the number of output arguments of function "
<< first_signature.name() << ".";
}
}
void MergeNodes(const FunctionDef& first_function,
const FunctionDef& second_function, FunctionDef* fused_function,
FunctionDefLibrary* library) {
fused_function->mutable_node_def()->CopyFrom(first_function.node_def());
fused_function->mutable_node_def()->MergeFrom(second_function.node_def());
}
bool CanCompose(const OpDef& first_signature, const OpDef& second_signature) {
return first_signature.output_arg_size() == second_signature.input_arg_size();
}
string ComposeInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num) {
return first_outputs.at(arg_num);
}
void ComposeSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature) {
CheckIfCanCompose(first_signature, second_signature);
*fused_signature->mutable_input_arg() = first_signature.input_arg();
*fused_signature->mutable_output_arg() = second_signature.output_arg();
if (first_signature.is_stateful() || second_signature.is_stateful()) {
if (!(first_signature.is_stateful() && second_signature.is_stateful())) {
metrics::RecordTFDataDebug("fused_with_mixed_statefulness");
}
fused_signature->set_is_stateful(true);
}
fused_signature->mutable_control_output()->Add(
first_signature.control_output().begin(),
first_signature.control_output().end());
fused_signature->mutable_control_output()->Add(
second_signature.control_output().begin(),
second_signature.control_output().end());
}
void ComposeOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
*fused_ret = second_ret;
}
void CombineSignature(const OpDef& first_signature,
const OpDef& second_signature, OpDef* fused_signature) {
CheckIfCanCompose(first_signature, second_signature);
*fused_signature = first_signature;
fused_signature->mutable_output_arg()->MergeFrom(
second_signature.output_arg());
}
void CombineOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
*fused_ret = first_ret;
fused_ret->insert(second_ret.begin(), second_ret.end());
}
string SameInput(const StringCollection& first_inputs,
const StringCollection& second_inputs,
const StringCollection& first_outputs, int arg_num) {
return first_inputs.at(arg_num);
}
bool HasSameSignature(const OpDef& first_signature,
const OpDef& second_signature) {
return first_signature.input_arg_size() ==
second_signature.input_arg_size() &&
first_signature.output_arg_size() ==
second_signature.output_arg_size();
}
void SameSignature(const OpDef& first_signature, const OpDef& second_signature,
OpDef* fused_signature) {
CHECK(HasSameSignature(first_signature, second_signature))
<< "Functions do not have the same signature";
*fused_signature = first_signature;
}
void LazyConjunctionNodes(const FunctionDef& first_function,
const FunctionDef& second_function,
FunctionDef* fused_function,
FunctionDefLibrary* library) {
fused_function->mutable_node_def()->CopyFrom(first_function.node_def());
NodeDefBuilder if_builder("", "If");
if_builder.Input(GetOutputNode(first_function, 0), 0, DT_BOOL);
DataTypeVector in_arg_types;
std::vector<NodeDefBuilder::NodeOut> inputs;
for (const auto& input_arg : first_function.signature().input_arg()) {
inputs.push_back({input_arg.name(), 0, input_arg.type()});
in_arg_types.push_back(input_arg.type());
}
if_builder.Attr("Tin", in_arg_types);
if_builder.Attr("Tcond", DT_BOOL);
if_builder.Attr("Tout", DataTypeVector{DT_BOOL});
if_builder.Attr("_lower_using_switch_merge", true);
NameAttrList then_branch;
then_branch.set_name(second_function.signature().name());
if_builder.Attr("then_branch", then_branch);
auto* false_predicate =
CreateFalsePredicate(first_function.signature().input_arg(), library);
NameAttrList else_branch;
else_branch.set_name(false_predicate->signature().name());
if_builder.Attr("else_branch", else_branch);
if_builder.Input(inputs);
auto* if_node = fused_function->add_node_def();
TF_CHECK_OK(if_builder.Finalize(if_node));
function_utils::SetUniqueFunctionNodeName("cond", fused_function, if_node);
GetMutableOutputNode(fused_function, 0) = if_node->name() + ":output:0";
}
void LazyConjunctionOutput(const protobuf::Map<string, string>& first_ret,
const protobuf::Map<string, string>& second_ret,
protobuf::Map<string, string>* fused_ret) {
CHECK_EQ(first_ret.size(), 1);
CHECK_EQ(second_ret.size(), 1);
*fused_ret = first_ret;
}
FunctionDef* FuseFunctions(
const FunctionDef& first_function, const FunctionDef& second_function,
StringPiece fused_name_prefix, const SetFunctionSignatureFn& set_signature,
const SetInputFn& set_input, const SetOutputFn& set_output,
const SetNodesFn& set_nodes, FunctionDefLibrary* library) {
auto has_unknown_attrs = [](const FunctionDef& func) {
int known_attribute_size = 0;
if (data::IsTFDataFunction(func)) known_attribute_size += 1;
if (func.attr().contains("_construction_context"))
known_attribute_size += 1;
return func.attr_size() > known_attribute_size;
};
if (has_unknown_attrs(first_function) || has_unknown_attrs(second_function)) {
return nullptr;
}
FunctionDef setup_function = second_function;
*setup_function.mutable_signature() = GetUniqueSignature(
first_function.signature(), setup_function.signature(),
setup_function.mutable_ret(), setup_function.mutable_control_ret(),
setup_function.mutable_node_def());
FunctionDef* fused_function = library->add_function();
RenameFunctionNodes(
first_function, setup_function.mutable_node_def(),
setup_function.mutable_ret(), setup_function.mutable_control_ret(),
setup_function.mutable_signature()->mutable_control_output());
set_output(first_function.ret(), setup_function.ret(),
fused_function->mutable_ret());
CombineOutput(first_function.control_ret(), setup_function.control_ret(),
fused_function->mutable_control_ret());
set_signature(first_function.signature(), setup_function.signature(),
fused_function->mutable_signature());
graph_utils::SetUniqueGraphFunctionName(fused_name_prefix, library,
fused_function);
CHECK(fused_function->signature().output_arg_size() ==
fused_function->ret_size())
<< "Fused function must have the same number of returns as output "
"args. Output size: "
<< fused_function->signature().output_arg_size()
<< ", ret size: " << fused_function->ret_size();
const auto first_inputs = GetFunctionInputs(first_function);
const auto second_inputs = GetFunctionInputs(setup_function);
const auto first_outputs = GetFunctionOutputs(first_function);
FuseFunctionNodes(first_inputs, second_inputs, first_outputs, set_input,
setup_function.mutable_node_def());
FuseReturns(first_inputs, second_inputs, first_outputs, set_input,
fused_function->mutable_ret());
set_nodes(first_function, setup_function, fused_function, library);
(*fused_function->mutable_attr())[data::kTFDataFunction].set_b(true);
auto get_construction_context = [](const FunctionDef& func) {
auto iter = func.attr().find("_construction_context");
if (iter == func.attr().cend()) return std::string();
return iter->second.s();
};
std::string first_construction_context =
get_construction_context(first_function);
std::string second_construction_context =
get_construction_context(second_function);
if (first_construction_context != second_construction_context) {
LOG(ERROR) << "_construction_context attribute mismatch during fused "
"function optimization pass. First function: "
<< first_construction_context
<< " Second function: " << first_construction_context;
}
if (!first_construction_context.empty()) {
(*fused_function->mutable_attr())["_construction_context"].set_s(
first_construction_context);
}
return fused_function;
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/fusion_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
namespace fusion_utils {
namespace {
string ParseNodeConnection(const string& name) {
return name.substr(0, name.find(':'));
}
void CheckUniqueNames(const FunctionDef& function) {
std::unordered_set<string> inputs;
for (const auto& input_arg : function.signature().input_arg())
inputs.insert(input_arg.name());
EXPECT_EQ(inputs.size(), function.signature().input_arg_size());
std::unordered_set<string> outputs;
for (const auto& output_arg : function.signature().output_arg())
outputs.insert(output_arg.name());
EXPECT_EQ(outputs.size(), function.signature().output_arg_size());
std::unordered_set<string> nodes;
for (const auto& node : function.node_def()) nodes.insert(node.name());
EXPECT_EQ(nodes.size(), function.node_def_size());
}
TEST(FusionUtilsTest, FuseFunctionsByComposition) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(), "fused_maps");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 1);
EXPECT_EQ(fused_function->ret_size(), 1);
std::cerr << fused_function->DebugString();
CheckUniqueNames(*fused_function);
const NodeDef *parent_mul = nullptr, *output_mul = nullptr;
for (const auto& fused_node : fused_function->node_def()) {
if (fused_node.op() == "Mul") {
if (fused_node.name() == "y")
parent_mul = &fused_node;
else
output_mul = &fused_node;
}
}
ASSERT_NE(parent_mul, nullptr);
ASSERT_NE(output_mul, nullptr);
EXPECT_EQ(ParseNodeConnection(output_mul->input(0)), parent_mul->name());
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(0).name());
EXPECT_EQ(ParseNodeConnection(output_value), output_mul->name());
}
TEST(FusionUtilsTest, FuseFunctionsWithControlInputs) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwoWithControlInput();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwoWithControlInput();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(), "fused_maps");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 1);
EXPECT_EQ(fused_function->ret_size(), 1);
CheckUniqueNames(*fused_function);
const NodeDef *parent_mul = nullptr, *output_mul = nullptr;
for (const auto& fused_node : fused_function->node_def()) {
if (fused_node.op() == "Mul") {
if (fused_node.name() == "y")
parent_mul = &fused_node;
else
output_mul = &fused_node;
}
}
ASSERT_NE(parent_mul, nullptr);
ASSERT_NE(output_mul, nullptr);
EXPECT_EQ(ParseNodeConnection(output_mul->input(1)),
absl::StrCat("^", parent_mul->name()));
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(0).name());
EXPECT_EQ(ParseNodeConnection(output_value), output_mul->name());
}
TEST(FusionUtilsTest, FuseFunctionWithControlOutputs) {
GraphDef graph;
auto *f1 = graph.mutable_library()->add_function();
*f1 = test::function::XTimesTwoWithControlOutput();
f1->mutable_signature()->set_name("f1");
auto *f2 = graph.mutable_library()->add_function();
*f2 = test::function::XTimesTwoWithControlOutput();
f2->mutable_signature()->set_name("f2");
auto *fused_function =
FuseFunctions(*f1, *f2, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().control_output_size(), 2);
string control_output_1 = fused_function->signature().control_output(0);
string control_output_2 = fused_function->signature().control_output(1);
EXPECT_NE(control_output_1, control_output_2);
EXPECT_EQ(fused_function->control_ret_size(), 2);
EXPECT_TRUE(fused_function->control_ret().contains(control_output_1));
EXPECT_TRUE(fused_function->control_ret().contains(control_output_2));
EXPECT_EQ(fused_function->control_ret().at(control_output_1),
control_output_1);
EXPECT_EQ(fused_function->control_ret().at(control_output_2),
control_output_2);
}
struct StatefulnessTestCase {
bool is_stateful_a, is_stateful_b;
};
using FusionUtilsTest_Statefulness =
::testing::TestWithParam<StatefulnessTestCase>;
TEST_P(FusionUtilsTest_Statefulness, FuseFunctionStatefulness) {
const StatefulnessTestCase &test_case = GetParam();
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
if (test_case.is_stateful_a) {
parent_function->mutable_signature()->set_is_stateful(true);
}
if (test_case.is_stateful_b) {
function->mutable_signature()->set_is_stateful(true);
}
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::ComposeSignature,
fusion_utils::ComposeInput, fusion_utils::ComposeOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().is_stateful(),
test_case.is_stateful_a || test_case.is_stateful_b);
}
INSTANTIATE_TEST_SUITE_P(
StatefulnessTests, FusionUtilsTest_Statefulness,
::testing::ValuesIn<StatefulnessTestCase>(
{{false, false}, {false, true}, {true, false}, {true, true}}));
TEST(FusionUtilsTest, FuseFunctionWithPredicate) {
GraphDef graph;
auto *xtimes_two = graph.mutable_library()->add_function();
*xtimes_two = test::function::XTimesTwo();
auto *is_zero = graph.mutable_library()->add_function();
*is_zero = test::function::IsZero();
auto *fused_function =
FuseFunctions(*xtimes_two, *is_zero, "fused_map_and_filter_function",
fusion_utils::CombineSignature, fusion_utils::ComposeInput,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
graph.mutable_library());
EXPECT_EQ(fused_function->signature().name(),
"fused_map_and_filter_function");
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
ASSERT_TRUE(
function_utils::ContainsFunctionNodeWithOp("Equal", *fused_function));
const auto& equal_node = fused_function->node_def(
function_utils::FindFunctionNodeWithOp("Equal", *fused_function));
EXPECT_EQ(xtimes_two->signature().output_arg(0).name(),
fused_function->signature().output_arg(0).name());
EXPECT_EQ(fused_function->signature().output_arg(1).name(),
equal_node.name());
EXPECT_EQ(ParseNodeConnection(equal_node.input(0)),
fused_function->signature().output_arg(0).name());
auto output_value = fused_function->ret().at(
fused_function->signature().output_arg(1).name());
EXPECT_EQ(ParseNodeConnection(output_value), equal_node.name());
}
TEST(FusionUtilsTest, FuseSameFunctionWithExtraOutput) {
GraphDef graph;
auto *parent_function = graph.mutable_library()->add_function();
*parent_function = test::function::XTimesTwo();
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto *fused_function = FuseFunctions(
*parent_function, *function, "fused_maps", fusion_utils::CombineSignature,
fusion_utils::ComposeInput, fusion_utils::CombineOutput,
fusion_utils::MergeNodes, graph.mutable_library());
EXPECT_EQ(fused_function->signature().input_arg_size(), 1);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
}
TEST(FusionUtilsTest, ZipFusion) {
GraphDef graph;
auto *function = graph.mutable_library()->add_function();
*function = test::function::XTimesTwo();
auto zip_signature = [](const OpDef& parent_function_signature,
const OpDef& function_signature,
OpDef *fused_function_signature) {
*fused_function_signature = parent_function_signature;
fused_function_signature->mutable_input_arg()->MergeFrom(
function_signature.input_arg());
fused_function_signature->mutable_output_arg()->MergeFrom(
function_signature.output_arg());
};
auto zip_input = [](const StringCollection& parent_inputs,
const StringCollection& function_inputs,
const StringCollection& parent_outputs, int arg_num) {
return function_inputs.at(arg_num);
};
auto *fused_function =
FuseFunctions(*function, *function, "zip_maps", zip_signature, zip_input,
fusion_utils::CombineOutput, fusion_utils::MergeNodes,
graph.mutable_library());
EXPECT_EQ(fused_function->signature().input_arg_size(), 2);
EXPECT_EQ(fused_function->signature().output_arg_size(), 2);
EXPECT_EQ(fused_function->ret_size(), 2);
CheckUniqueNames(*fused_function);
}
}
}
}
} |
222 | #ifndef TENSORFLOW_TSL_PLATFORM_STRINGPRINTF_H_
#define TENSORFLOW_TSL_PLATFORM_STRINGPRINTF_H_
#include <stdarg.h>
#include <string>
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace strings {
std::string Printf(const char* format, ...)
TF_PRINTF_ATTRIBUTE(1, 2);
void Appendf(std::string* dst, const char* format, ...)
TF_PRINTF_ATTRIBUTE(2, 3);
void Appendv(std::string* dst, const char* format, va_list ap);
}
}
#endif
#include "tsl/platform/stringprintf.h"
#include <errno.h>
#include <stdarg.h>
#include <stdio.h>
namespace tsl {
namespace strings {
void Appendv(string* dst, const char* format, va_list ap) {
static const int kSpaceLength = 1024;
char space[kSpaceLength];
va_list backup_ap;
va_copy(backup_ap, ap);
int result = vsnprintf(space, kSpaceLength, format, backup_ap);
va_end(backup_ap);
if (result < kSpaceLength) {
if (result >= 0) {
dst->append(space, result);
return;
}
#ifdef _MSC_VER
va_copy(backup_ap, ap);
result = vsnprintf(nullptr, 0, format, backup_ap);
va_end(backup_ap);
#endif
if (result < 0) {
return;
}
}
int length = result + 1;
char* buf = new char[length];
va_copy(backup_ap, ap);
result = vsnprintf(buf, length, format, backup_ap);
va_end(backup_ap);
if (result >= 0 && result < length) {
dst->append(buf, result);
}
delete[] buf;
}
string Printf(const char* format, ...) {
va_list ap;
va_start(ap, format);
string result;
Appendv(&result, format, ap);
va_end(ap);
return result;
}
void Appendf(string* dst, const char* format, ...) {
va_list ap;
va_start(ap, format);
Appendv(dst, format, ap);
va_end(ap);
}
}
} | #include "tsl/platform/stringprintf.h"
#include <string>
#include "tsl/platform/test.h"
namespace tsl {
namespace strings {
namespace {
TEST(PrintfTest, Empty) {
EXPECT_EQ("", Printf("%s", string().c_str()));
EXPECT_EQ("", Printf("%s", ""));
}
TEST(PrintfTest, Misc) {
#if !defined(_MSC_VER)
EXPECT_EQ("123hello w", Printf("%3$d%2$s %1$c", 'w', "hello", 123));
#endif
}
TEST(AppendfTest, Empty) {
string value("Hello");
const char* empty = "";
Appendf(&value, "%s", empty);
EXPECT_EQ("Hello", value);
}
TEST(AppendfTest, EmptyString) {
string value("Hello");
Appendf(&value, "%s", "");
EXPECT_EQ("Hello", value);
}
TEST(AppendfTest, String) {
string value("Hello");
Appendf(&value, " %s", "World");
EXPECT_EQ("Hello World", value);
}
TEST(AppendfTest, Int) {
string value("Hello");
Appendf(&value, " %d", 123);
EXPECT_EQ("Hello 123", value);
}
TEST(PrintfTest, Multibyte) {
char* old_locale = setlocale(LC_CTYPE, nullptr);
setlocale(LC_CTYPE, "en_US.utf8");
const char kInvalidCodePoint[] = "\375\067s";
string value = Printf("%.*s", 3, kInvalidCodePoint);
EXPECT_TRUE(value.empty() || value == kInvalidCodePoint);
int n = 2048;
char* buf = new char[n + 1];
memset(buf, ' ', n - 3);
memcpy(buf + n - 3, kInvalidCodePoint, 4);
value = Printf("%.*s", n, buf);
EXPECT_TRUE(value.empty() || value == buf);
delete[] buf;
setlocale(LC_CTYPE, old_locale);
}
TEST(PrintfTest, NoMultibyte) {
char* old_locale = setlocale(LC_CTYPE, nullptr);
setlocale(LC_CTYPE, "POSIX");
string value = Printf("%.*s", 3, "\375\067s");
setlocale(LC_CTYPE, old_locale);
EXPECT_EQ("\375\067s", value);
}
TEST(PrintfTest, DontOverwriteErrno) {
errno = ECHILD;
string value = Printf("Hello, %s!", "World");
EXPECT_EQ(ECHILD, errno);
}
TEST(PrintfTest, LargeBuf) {
int n = 2048;
char* buf = new char[n + 1];
memset(buf, ' ', n);
buf[n] = 0;
string value = Printf("%s", buf);
EXPECT_EQ(buf, value);
delete[] buf;
}
}
}
} |
223 | #ifndef AROLLA_DECISION_FOREST_EXPR_OPERATOR_DECISION_FOREST_OPERATOR_H_
#define AROLLA_DECISION_FOREST_EXPR_OPERATOR_DECISION_FOREST_OPERATOR_H_
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/qtype/qtype.h"
namespace arolla {
class DecisionForestOperator : public expr::BasicExprOperator,
public expr::BuiltinExprOperatorTag {
public:
DecisionForestOperator(DecisionForestPtr forest,
std::vector<TreeFilter> tree_filters);
DecisionForestOperator(
DecisionForestPtr forest, std::vector<TreeFilter> tree_filters,
const absl::flat_hash_map<int, QTypePtr>& required_types);
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final;
DecisionForestPtr forest() const { return forest_; }
const std::vector<TreeFilter>& tree_filters() const { return tree_filters_; }
absl::string_view py_qvalue_specialization_key() const final {
return "::arolla::DecisionForestOperator";
}
private:
DecisionForestOperator(std::vector<int> required_input_ids,
DecisionForestPtr forest,
std::vector<TreeFilter> tree_filters);
DecisionForestPtr forest_;
std::vector<TreeFilter> tree_filters_;
std::vector<int> required_input_ids_;
};
}
#endif
#include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
std::vector<int> GetRequiredInputIds(
const absl::flat_hash_map<int, QTypePtr>& required_types) {
std::vector<int> result;
result.reserve(required_types.size());
for (const auto& [id, _] : required_types) {
result.push_back(id);
}
return result;
}
}
DecisionForestOperator::DecisionForestOperator(
DecisionForestPtr forest, std::vector<TreeFilter> tree_filters)
: DecisionForestOperator(GetRequiredInputIds(forest->GetRequiredQTypes()),
forest, std::move(tree_filters)) {}
DecisionForestOperator::DecisionForestOperator(
DecisionForestPtr forest, std::vector<TreeFilter> tree_filters,
const absl::flat_hash_map<int, QTypePtr>& required_types)
: DecisionForestOperator(GetRequiredInputIds(required_types),
std::move(forest), std::move(tree_filters)) {}
DecisionForestOperator::DecisionForestOperator(
std::vector<int> required_input_ids, DecisionForestPtr forest,
std::vector<TreeFilter> tree_filters)
: BasicExprOperator(
"anonymous.decision_forest_operator",
expr::ExprOperatorSignature::MakeVariadicArgs(),
"Evaluates decision forest stored in the operator state.",
FingerprintHasher("::arolla::DecisionForestOperator")
.Combine(forest->fingerprint())
.CombineSpan(tree_filters)
.Finish()),
forest_(std::move(forest)),
tree_filters_(std::move(tree_filters)),
required_input_ids_(std::move(required_input_ids)) {
std::sort(required_input_ids_.begin(), required_input_ids_.end());
}
absl::StatusOr<QTypePtr> DecisionForestOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
int last_forest_input_id =
required_input_ids_.empty() ? -1 : required_input_ids_.back();
if (last_forest_input_id >= static_cast<int>(input_qtypes.size())) {
return absl::InvalidArgumentError(absl::StrFormat(
"not enough arguments for the decision forest: expected at least %d, "
"got %d",
last_forest_input_id + 1, input_qtypes.size()));
}
bool batched = !input_qtypes.empty() && !required_input_ids_.empty() &&
IsArrayLikeQType(input_qtypes[required_input_ids_[0]]);
for (int id : required_input_ids_) {
if (IsArrayLikeQType(input_qtypes[id]) != batched) {
DCHECK(!required_input_ids_.empty());
return absl::InvalidArgumentError(absl::StrFormat(
"either all forest inputs must be scalars or all forest inputs "
"must be arrays, but arg[%d] is %s and arg[%d] is %s",
required_input_ids_[0], input_qtypes[required_input_ids_[0]]->name(),
id, input_qtypes[id]->name()));
}
}
QTypePtr output_type;
if (batched) {
DCHECK(!required_input_ids_.empty());
ASSIGN_OR_RETURN(const ArrayLikeQType* array_type,
ToArrayLikeQType(input_qtypes[required_input_ids_[0]]));
ASSIGN_OR_RETURN(output_type,
array_type->WithValueQType(GetQType<float>()));
} else {
output_type = GetQType<float>();
}
return MakeTupleQType(
std::vector<QTypePtr>(tree_filters_.size(), output_type));
}
} | #include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
constexpr float inf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
absl::StatusOr<DecisionForestPtr> CreateForest() {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].tag.submodel_id = 0;
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5};
trees[1].tag.submodel_id = 1;
return DecisionForest::FromTrees(std::move(trees));
}
class DecisionForestOperatorTest : public ::testing::Test {
void SetUp() override { CHECK_OK(InitArolla()); }
};
TEST_F(DecisionForestOperatorTest, GetOutputQType) {
ASSERT_OK_AND_ASSIGN(const DecisionForestPtr forest, CreateForest());
{
auto forest_op = std::make_shared<DecisionForestOperator>(
forest, std::vector<TreeFilter>{});
EXPECT_THAT(forest_op->GetOutputQType({GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments for the decision "
"forest: expected at least 2, got 1")));
EXPECT_THAT(
forest_op->GetOutputQType(
{GetQType<float>(), GetDenseArrayQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"FLOAT32 and arg[1] is DENSE_ARRAY_FLOAT32")));
EXPECT_THAT(
forest_op->GetOutputQType({GetQType<float>(), GetQType<float>()}),
IsOkAndHolds(MakeTupleQType({})));
}
{
auto forest_op = std::make_shared<DecisionForestOperator>(
forest, std::vector<TreeFilter>{TreeFilter{.submodels = {0}},
TreeFilter{.submodels = {1, 2}}});
EXPECT_THAT(forest_op->GetOutputQType({GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments for the decision "
"forest: expected at least 2, got 1")));
EXPECT_THAT(
forest_op->GetOutputQType(
{GetQType<float>(), GetDenseArrayQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"FLOAT32 and arg[1] is DENSE_ARRAY_FLOAT32")));
EXPECT_THAT(
forest_op->GetOutputQType({GetQType<float>(), GetQType<float>()}),
IsOkAndHolds(MakeTupleQType({GetQType<float>(), GetQType<float>()})));
}
}
}
} |
224 | #ifndef XLA_SERVICE_GPU_MODEL_GPU_COLLECTIVE_PERFORMANCE_MODEL_H_
#define XLA_SERVICE_GPU_MODEL_GPU_COLLECTIVE_PERFORMANCE_MODEL_H_
#include <array>
#include <cstdint>
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/stream_executor/device_description.h"
#if GOOGLE_CUDA
#include <dlfcn.h>
#include "third_party/gpus/cuda/nvml/include/nvml.h"
#define NVML_FUNCTOR(name, rettype, args) \
inline rettype(*xla_##name) args = nullptr;
NVML_FUNCTOR(nvmlInit, nvmlReturn_t, ())
NVML_FUNCTOR(nvmlShutdown, nvmlReturn_t, ())
NVML_FUNCTOR(nvmlDeviceGetHandleByIndex, nvmlReturn_t,
(unsigned int index, nvmlDevice_t* device))
NVML_FUNCTOR(nvmlDeviceGetNvLinkCapability, nvmlReturn_t,
(nvmlDevice_t device, unsigned int link,
nvmlNvLinkCapability_t capability, unsigned int* capResult))
#endif
namespace xla {
namespace gpu {
class GpuPerformanceWithCollectiveModel : public GpuPerformanceModelBase {
public:
enum CollectiveAlgo {
RING = 0,
TREE,
};
static constexpr std::array<double, 3> kLowLatencyMaxBandwidths = {
39.0 , 87.7 , 87.7
};
static constexpr std::array<double, 3> kPerChannelMaxRingLL128Bandwidths = {
20.0 ,
20.0 ,
36.7 ,
};
static constexpr double kSm60NvlinkBandwidth = 18.0;
static constexpr double kSm70NvlinkBandwidth = 20.0;
static constexpr double kSm80NvlinkBandwidth = 20.0;
static constexpr double kSm90NvlinkBandwidth = 20.0;
static constexpr double kPciBandwidth = 12.0;
static constexpr double kRingAlgorithmDiscountFactor = 0.92;
static constexpr std::array<double, 13> kIntraNodeSpeeds = {
40.0, 30.0, 20.0, 18.0, 15.0, 12.0, 10.0, 9.0, 7.0, 6.0, 5.0, 4.0, 3.0};
static constexpr std::array<double, 9> kIntraNodeSpeedsSm90 = {
60.0, 40.0, 30.0, 24.0, 20.0, 15.0, 12.0, 6.0, 3.0};
static constexpr int64_t kMaxNumChannelsRing = 16;
static constexpr int64_t kLL128NumThreads = 640;
static constexpr absl::Duration kNcclKernelLaunchOverhead =
absl::Microseconds(5);
static absl::Duration ComputeCollectiveTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info);
static float GetNvlinkBw(se::CudaComputeCapability compute_capability);
static bool InitNvml();
static bool ShutdownNvml();
static uint32_t CheckIfNvlinkSupportsP2P();
private:
static absl::Duration ComputeAllreduceTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info);
};
}
}
#endif
#include "xla/service/gpu/model/gpu_collective_performance_model.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/numbers.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/nvml/include/nvml.h"
#endif
namespace xla {
namespace gpu {
namespace {
int64_t GetNcclMaxNumChannels(
GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) {
int64_t max_nchannels = 0;
switch (algorithm) {
case GpuPerformanceWithCollectiveModel::RING:
case GpuPerformanceWithCollectiveModel::TREE:
max_nchannels = GpuPerformanceWithCollectiveModel::kMaxNumChannelsRing;
break;
}
const char* env = std::getenv("NCCL_MAX_NCHANNELS");
if (env != nullptr) {
int64_t max_nchannels_from_env;
if (absl::SimpleAtoi(env, &max_nchannels_from_env)) {
max_nchannels = std::min(max_nchannels_from_env, max_nchannels);
}
}
return max_nchannels;
}
int64_t GetMinNumberOfChannels(
GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) {
int64_t min_nchannels = 0;
switch (algorithm) {
case GpuPerformanceWithCollectiveModel::RING:
case GpuPerformanceWithCollectiveModel::TREE:
min_nchannels = 1;
break;
}
const char* env = std::getenv("NCCL_MIN_NCHANNELS");
if (env != nullptr) {
int64_t min_nchannels_from_env;
if (absl::SimpleAtoi(env, &min_nchannels_from_env)) {
min_nchannels = std::min(min_nchannels_from_env, min_nchannels);
}
}
return min_nchannels;
}
int GetNumThreads(int warp_size, int min_num_threads, int max_num_threads,
int default_num_threads) {
int threads_from_env = default_num_threads;
const char* env = std::getenv("NCCL_NTHREADS");
if (env != nullptr) {
CHECK(absl::SimpleAtoi(env, &threads_from_env));
}
int num_threads = threads_from_env;
if (num_threads > 0) {
if (num_threads % warp_size != 0) {
num_threads = max_num_threads;
} else if (num_threads > max_num_threads) {
num_threads = max_num_threads;
} else if (num_threads < min_num_threads) {
num_threads = min_num_threads;
}
} else {
num_threads = default_num_threads;
}
return num_threads;
}
float GetMaxSysBwFromGpu(const se::CudaComputeCapability cc,
const double* bandwidths_table) {
switch (cc.major) {
case se::CudaComputeCapability::VOLTA:
return bandwidths_table[0];
case se::CudaComputeCapability::AMPERE:
return bandwidths_table[1];
case se::CudaComputeCapability::HOPPER:
return bandwidths_table[2];
}
return -1;
}
}
float GpuPerformanceWithCollectiveModel::GetNvlinkBw(
se::CudaComputeCapability compute_capability) {
return compute_capability.IsAtLeast(se::CudaComputeCapability::HOPPER)
? kSm90NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::AMPERE)
? kSm80NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::VOLTA)
? kSm70NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::PASCAL_)
? kSm60NvlinkBandwidth
: kSm80NvlinkBandwidth;
}
bool GpuPerformanceWithCollectiveModel::InitNvml() {
#if GOOGLE_CUDA
void* libhandle = dlopen("libnvidia-ml.so.1", RTLD_NOW);
CHECK(libhandle != nullptr) << "Failed to open libnvidia-ml.so.1";
struct SymbolEntry {
void** functor;
char const* name;
};
std::vector<SymbolEntry> symbols = {
{(void**)&xla_nvmlInit, "nvmlInit_v2"},
{(void**)&xla_nvmlShutdown, "nvmlShutdown"},
{(void**)&xla_nvmlDeviceGetHandleByIndex, "nvmlDeviceGetHandleByIndex"},
{(void**)&xla_nvmlDeviceGetNvLinkCapability,
"nvmlDeviceGetNvLinkCapability"},
};
for (SymbolEntry se : symbols) {
*se.functor = dlsym(libhandle, se.name);
}
nvmlReturn_t init_result = xla_nvmlInit();
return init_result == NVML_SUCCESS;
#else
return false;
#endif
}
bool GpuPerformanceWithCollectiveModel::ShutdownNvml() {
#if GOOGLE_CUDA
nvmlReturn_t shutdown_result = xla_nvmlShutdown();
return shutdown_result == NVML_SUCCESS;
#else
return false;
#endif
}
uint32_t
GpuPerformanceWithCollectiveModel::CheckIfNvlinkSupportsP2P() {
#if GOOGLE_CUDA
CHECK(InitNvml()) << "NVML init failed.";
nvmlDevice_t nvml_device;
nvmlReturn_t get_device_result =
xla_nvmlDeviceGetHandleByIndex(0, &nvml_device);
CHECK(get_device_result == NVML_SUCCESS);
uint32_t supported_p2p = 0;
nvmlReturn_t nvlink_cap_result = xla_nvmlDeviceGetNvLinkCapability(
nvml_device, 0, NVML_NVLINK_CAP_P2P_SUPPORTED,
&supported_p2p);
CHECK(nvlink_cap_result == NVML_SUCCESS);
CHECK(ShutdownNvml()) << "NVML shutdown failed.";
return supported_p2p;
#else
return 0;
#endif
}
absl::Duration
GpuPerformanceWithCollectiveModel::ComputeAllreduceTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info) {
absl::Duration total_time = kNcclKernelLaunchOverhead;
stream_executor::CudaComputeCapability compute_cap =
gpu_device_info.cuda_compute_capability();
int64_t size_of_speed_array = kIntraNodeSpeeds.size();
int64_t size_of_sm90_speed_array = kIntraNodeSpeedsSm90.size();
int num_speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER
? size_of_sm90_speed_array
: size_of_speed_array;
const double* speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER
? kIntraNodeSpeedsSm90.data()
: kIntraNodeSpeeds.data();
int speed_index = 0;
float max_sys_bw =
GetMaxSysBwFromGpu(compute_cap, kLowLatencyMaxBandwidths.data());
CHECK_GT(max_sys_bw, 0);
while ((speed_index < num_speeds - 1) && speeds[speed_index] > max_sys_bw) {
speed_index++;
}
float bw_intra_node = speeds[speed_index];
int64_t num_devices = cost_analysis->NumOfDevices(instr);
int64_t min_nchannels =
std::max(num_devices, GetMinNumberOfChannels(CollectiveAlgo::RING));
int64_t num_channels =
std::max(min_nchannels, GetNcclMaxNumChannels(CollectiveAlgo::RING));
int default_threads =
(bw_intra_node * num_channels <= kPciBandwidth) ? 256 : kLL128NumThreads;
int warp_size = gpu_device_info.threads_per_warp();
int num_threads = GetNumThreads(warp_size, kLL128NumThreads / 4,
kLL128NumThreads, default_threads);
absl::Duration compute_time_per_channel = ComputeTime(
gpu_device_info, cost_analysis->flop_count(instr) / num_channels,
num_channels, num_threads);
total_time += compute_time_per_channel;
uint32_t supported_p2p = CheckIfNvlinkSupportsP2P();
if (supported_p2p == 0) {
VLOG(8) << "Nvlink doesn't support p2p communication. Model will "
"continue using default system bandwidth.";
} else {
VLOG(8) << "Nvlink supports p2p communication, setting intra node "
"bandwidth to nvlink bw.";
bw_intra_node = GetNvlinkBw(compute_cap);
}
double bus_bandwidth = bw_intra_node * num_channels;
double per_channel_ring_ll128_Bw =
GetMaxSysBwFromGpu(compute_cap, kPerChannelMaxRingLL128Bandwidths.data());
bus_bandwidth = std::min(bus_bandwidth * kRingAlgorithmDiscountFactor,
num_channels * per_channel_ring_ll128_Bw);
double actual_bandwidth = bus_bandwidth * cost_analysis->ScalingRatio(instr);
absl::Duration communication_time = absl::Milliseconds(
cost_analysis->bytes_accessed(instr) / (1e6 * actual_bandwidth));
total_time += communication_time;
return total_time;
}
absl::Duration
GpuPerformanceWithCollectiveModel::ComputeCollectiveTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info) {
if (cost_analysis->NumOfDevices(instr) == 1) {
VLOG(8) << "Returning only kernel launch overhead for a single partition.";
return kNcclKernelLaunchOverhead;
}
if (HloDataflowAnalysis::IsAsynchronousOperationDone(instr.opcode())) {
VLOG(8) << "Returning 0 cost for async done op " << instr.name();
return absl::ZeroDuration();
}
switch (instr.opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
return ComputeAllreduceTime(instr, cost_analysis, gpu_device_info);
default: {
LOG(WARNING)
<< "Runtime estimate for " << instr.name()
<< " not implemented. Returning only the kernel launch time.";
return kNcclKernelLaunchOverhead;
}
}
}
}
} | #include <gtest/gtest.h>
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using GpuPerformanceWithCollectiveModelTest = HloTestBase;
TEST_F(GpuPerformanceWithCollectiveModelTest, TestNvmlLibraryLoading) {
#if GOOGLE_CUDA
EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml());
nvmlDevice_t nvml_device;
nvmlReturn_t get_device_result =
xla_nvmlDeviceGetHandleByIndex(0, &nvml_device);
EXPECT_TRUE(get_device_result == NVML_SUCCESS);
EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml());
#endif
}
}
}
} |
225 | #ifndef GLOG_INTERNAL_SYMBOLIZE_H
#define GLOG_INTERNAL_SYMBOLIZE_H
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include "config.h"
#include "glog/platform.h"
#if defined(HAVE_LINK_H)
# include <link.h>
#elif defined(HAVE_ELF_H)
# include <elf.h>
#elif defined(HAVE_SYS_EXEC_ELF_H)
# include <sys/exec_elf.h>
#endif
#if defined(GLOG_USE_GLOG_EXPORT)
# include "glog/export.h"
#endif
#if !defined(GLOG_NO_EXPORT)
# error "symbolize.h" was not included correctly.
#endif
#ifndef GLOG_NO_SYMBOLIZE_DETECTION
# ifndef HAVE_SYMBOLIZE
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H)
# define HAVE_SYMBOLIZE
# elif defined(GLOG_OS_MACOSX) && defined(HAVE_DLADDR)
# define HAVE_SYMBOLIZE
# elif defined(GLOG_OS_WINDOWS)
# define HAVE_SYMBOLIZE
# endif
# endif
#endif
#ifdef HAVE_SYMBOLIZE
# if !defined(SIZEOF_VOID_P) && defined(__SIZEOF_POINTER__)
# define SIZEOF_VOID_P __SIZEOF_POINTER__
# endif
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H)
# ifndef ElfW
# if SIZEOF_VOID_P == 4
# define ElfW(type) Elf32_##type
# elif SIZEOF_VOID_P == 8
# define ElfW(type) Elf64_##type
# else
# error "Unknown sizeof(void *)"
# endif
# endif
namespace google {
inline namespace glog_internal_namespace_ {
GLOG_NO_EXPORT
bool GetSectionHeaderByName(int fd, const char* name, size_t name_len,
ElfW(Shdr) * out);
}
}
# endif
namespace google {
inline namespace glog_internal_namespace_ {
using SymbolizeCallback = int (*)(int, void*, char*, size_t, uint64_t);
GLOG_NO_EXPORT
void InstallSymbolizeCallback(SymbolizeCallback callback);
using SymbolizeOpenObjectFileCallback = int (*)(uint64_t, uint64_t&, uint64_t&,
char*, size_t);
GLOG_NO_EXPORT
void InstallSymbolizeOpenObjectFileCallback(
SymbolizeOpenObjectFileCallback callback);
}
}
#endif
namespace google {
inline namespace glog_internal_namespace_ {
#if defined(HAVE_SYMBOLIZE)
enum class SymbolizeOptions {
kNone = 0,
kNoLineNumbers = 1
};
constexpr SymbolizeOptions operator&(SymbolizeOptions lhs,
SymbolizeOptions rhs) noexcept {
return static_cast<SymbolizeOptions>(
static_cast<std::underlying_type_t<SymbolizeOptions>>(lhs) &
static_cast<std::underlying_type_t<SymbolizeOptions>>(rhs));
}
constexpr SymbolizeOptions operator|(SymbolizeOptions lhs,
SymbolizeOptions rhs) noexcept {
return static_cast<SymbolizeOptions>(
static_cast<std::underlying_type_t<SymbolizeOptions>>(lhs) |
static_cast<std::underlying_type_t<SymbolizeOptions>>(rhs));
}
GLOG_NO_EXPORT bool Symbolize(
void* pc, char* out, size_t out_size,
SymbolizeOptions options = SymbolizeOptions::kNone);
#endif
}
}
#endif
#ifdef GLOG_BUILD_CONFIG_INCLUDE
# include GLOG_BUILD_CONFIG_INCLUDE
#endif
#include "symbolize.h"
#include "utilities.h"
#if defined(HAVE_SYMBOLIZE)
# include <algorithm>
# include <cstdlib>
# include <cstring>
# include <limits>
# include "demangle.h"
# define GLOG_SAFE_ASSERT(expr) ((expr) ? 0 : (std::abort(), 0))
namespace google {
inline namespace glog_internal_namespace_ {
namespace {
SymbolizeCallback g_symbolize_callback = nullptr;
SymbolizeOpenObjectFileCallback g_symbolize_open_object_file_callback = nullptr;
ATTRIBUTE_NOINLINE
void DemangleInplace(char* out, size_t out_size) {
char demangled[256];
if (Demangle(out, demangled, sizeof(demangled))) {
size_t len = strlen(demangled);
if (len + 1 <= out_size) {
GLOG_SAFE_ASSERT(len < sizeof(demangled));
memmove(out, demangled, len + 1);
}
}
}
}
void InstallSymbolizeCallback(SymbolizeCallback callback) {
g_symbolize_callback = callback;
}
void InstallSymbolizeOpenObjectFileCallback(
SymbolizeOpenObjectFileCallback callback) {
g_symbolize_open_object_file_callback = callback;
}
}
}
# if defined(HAVE_LINK_H)
# if defined(HAVE_DLFCN_H)
# include <dlfcn.h>
# endif
# include <fcntl.h>
# include <sys/stat.h>
# include <sys/types.h>
# include <unistd.h>
# include <cerrno>
# include <climits>
# include <cstddef>
# include <cstdint>
# include <cstdio>
# include <cstdlib>
# include <cstring>
# include "config.h"
# include "glog/raw_logging.h"
# include "symbolize.h"
namespace google {
inline namespace glog_internal_namespace_ {
namespace {
template <class Functor>
auto FailureRetry(Functor run, int error = EINTR) noexcept(noexcept(run())) {
decltype(run()) result;
while ((result = run()) == -1 && errno == error) {
}
return result;
}
}
static ssize_t ReadFromOffset(const int fd, void* buf, const size_t count,
const size_t offset) {
GLOG_SAFE_ASSERT(fd >= 0);
GLOG_SAFE_ASSERT(count <=
static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
char* buf0 = reinterpret_cast<char*>(buf);
size_t num_bytes = 0;
while (num_bytes < count) {
ssize_t len = FailureRetry([fd, p = buf0 + num_bytes, n = count - num_bytes,
m = static_cast<off_t>(offset + num_bytes)] {
return pread(fd, p, n, m);
});
if (len < 0) {
return -1;
}
if (len == 0) {
break;
}
num_bytes += static_cast<size_t>(len);
}
GLOG_SAFE_ASSERT(num_bytes <= count);
return static_cast<ssize_t>(num_bytes);
}
static bool ReadFromOffsetExact(const int fd, void* buf, const size_t count,
const size_t offset) {
ssize_t len = ReadFromOffset(fd, buf, count, offset);
return static_cast<size_t>(len) == count;
}
static int FileGetElfType(const int fd) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return -1;
}
if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
return -1;
}
return elf_header.e_type;
}
static ATTRIBUTE_NOINLINE bool GetSectionHeaderByType(const int fd,
ElfW(Half) sh_num,
const size_t sh_offset,
ElfW(Word) type,
ElfW(Shdr) * out) {
ElfW(Shdr) buf[16];
for (size_t i = 0; i < sh_num;) {
const size_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
const size_t num_bytes_to_read =
(sizeof(buf) > num_bytes_left) ? num_bytes_left : sizeof(buf);
const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
sh_offset + i * sizeof(buf[0]));
if (len == -1) {
return false;
}
GLOG_SAFE_ASSERT(static_cast<size_t>(len) % sizeof(buf[0]) == 0);
const size_t num_headers_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
GLOG_SAFE_ASSERT(num_headers_in_buf <= sizeof(buf) / sizeof(buf[0]));
for (size_t j = 0; j < num_headers_in_buf; ++j) {
if (buf[j].sh_type == type) {
*out = buf[j];
return true;
}
}
i += num_headers_in_buf;
}
return false;
}
const int kMaxSectionNameLen = 64;
bool GetSectionHeaderByName(int fd, const char* name, size_t name_len,
ElfW(Shdr) * out) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
ElfW(Shdr) shstrtab;
size_t shstrtab_offset =
(elf_header.e_shoff + static_cast<size_t>(elf_header.e_shentsize) *
static_cast<size_t>(elf_header.e_shstrndx));
if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
return false;
}
for (size_t i = 0; i < elf_header.e_shnum; ++i) {
size_t section_header_offset =
(elf_header.e_shoff + elf_header.e_shentsize * i);
if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
return false;
}
char header_name[kMaxSectionNameLen];
if (sizeof(header_name) < name_len) {
RAW_LOG(WARNING,
"Section name '%s' is too long (%zu); "
"section will not be found (even if present).",
name, name_len);
return false;
}
size_t name_offset = shstrtab.sh_offset + out->sh_name;
ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
if (n_read == -1) {
return false;
} else if (static_cast<size_t>(n_read) != name_len) {
continue;
}
if (memcmp(header_name, name, name_len) == 0) {
return true;
}
}
return false;
}
static ATTRIBUTE_NOINLINE bool FindSymbol(uint64_t pc, const int fd, char* out,
size_t out_size,
uint64_t symbol_offset,
const ElfW(Shdr) * strtab,
const ElfW(Shdr) * symtab) {
if (symtab == nullptr) {
return false;
}
const size_t num_symbols = symtab->sh_size / symtab->sh_entsize;
for (unsigned i = 0; i < num_symbols;) {
size_t offset = symtab->sh_offset + i * symtab->sh_entsize;
# if defined(__WORDSIZE) && __WORDSIZE == 64
const size_t NUM_SYMBOLS = 32U;
# else
const size_t NUM_SYMBOLS = 64U;
# endif
ElfW(Sym) buf[NUM_SYMBOLS];
size_t num_symbols_to_read = std::min(NUM_SYMBOLS, num_symbols - i);
const ssize_t len =
ReadFromOffset(fd, &buf, sizeof(buf[0]) * num_symbols_to_read, offset);
GLOG_SAFE_ASSERT(static_cast<size_t>(len) % sizeof(buf[0]) == 0);
const size_t num_symbols_in_buf = static_cast<size_t>(len) / sizeof(buf[0]);
GLOG_SAFE_ASSERT(num_symbols_in_buf <= num_symbols_to_read);
for (unsigned j = 0; j < num_symbols_in_buf; ++j) {
const ElfW(Sym)& symbol = buf[j];
uint64_t start_address = symbol.st_value;
start_address += symbol_offset;
uint64_t end_address = start_address + symbol.st_size;
if (symbol.st_value != 0 &&
symbol.st_shndx != 0 &&
start_address <= pc && pc < end_address) {
ssize_t len1 = ReadFromOffset(fd, out, out_size,
strtab->sh_offset + symbol.st_name);
if (len1 <= 0 || memchr(out, '\0', out_size) == nullptr) {
memset(out, 0, out_size);
return false;
}
return true;
}
}
i += num_symbols_in_buf;
}
return false;
}
static bool GetSymbolFromObjectFile(const int fd, uint64_t pc, char* out,
size_t out_size, uint64_t base_address) {
ElfW(Ehdr) elf_header;
if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
return false;
}
ElfW(Shdr) symtab, strtab;
if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
SHT_SYMTAB, &symtab)) {
if (!ReadFromOffsetExact(
fd, &strtab, sizeof(strtab),
elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
return false;
}
if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true;
}
}
if (GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
SHT_DYNSYM, &symtab)) {
if (!ReadFromOffsetExact(
fd, &strtab, sizeof(strtab),
elf_header.e_shoff + symtab.sh_link * sizeof(symtab))) {
return false;
}
if (FindSymbol(pc, fd, out, out_size, base_address, &strtab, &symtab)) {
return true;
}
}
return false;
}
namespace {
class LineReader {
public:
explicit LineReader(int fd, char* buf, size_t buf_len, size_t offset)
: fd_(fd),
buf_(buf),
buf_len_(buf_len),
offset_(offset),
bol_(buf),
eol_(buf),
eod_(buf) {}
bool ReadLine(const char** bol, const char** eol) {
if (BufferIsEmpty()) {
const ssize_t num_bytes = ReadFromOffset(fd_, buf_, buf_len_, offset_);
if (num_bytes <= 0) {
return false;
}
offset_ += static_cast<size_t>(num_bytes);
eod_ = buf_ + num_bytes;
bol_ = buf_;
} else {
bol_ = eol_ + 1;
GLOG_SAFE_ASSERT(bol_ <= eod_);
if (!HasCompleteLine()) {
const auto incomplete_line_length = static_cast<size_t>(eod_ - bol_);
memmove(buf_, bol_, incomplete_line_length);
char* const append_pos = buf_ + incomplete_line_length;
const size_t capacity_left = buf_len_ - incomplete_line_length;
const ssize_t num_bytes =
ReadFromOffset(fd_, append_pos, capacity_left, offset_);
if (num_bytes <= 0) {
return false;
}
offset_ += static_cast<size_t>(num_bytes);
eod_ = append_pos + num_bytes;
bol_ = buf_;
}
}
eol_ = FindLineFeed();
if (eol_ == nullptr) {
return false;
}
*eol_ = '\0';
*bol = bol_;
*eol = eol_;
return true;
}
const char* bol() { return bol_; }
const char* eol() { return eol_; }
private:
LineReader(const LineReader&) = delete;
void operator=(const LineReader&) = delete;
char* FindLineFeed() {
return reinterpret_cast<char*>(
memchr(bol_, '\n', static_cast<size_t>(eod_ - bol_)));
}
bool BufferIsEmpty() { return buf_ == eod_; }
bool HasCompleteLine() {
return !BufferIsEmpty() && FindLineFeed() != nullptr;
}
const int fd_;
char* const buf_;
const size_t buf_len_;
size_t offset_;
char* bol_;
char* eol_;
const char* eod_;
};
}
static char* GetHex(const char* start, const char* end, uint64_t* hex) {
*hex = 0;
const char* p;
for (p = start; p < end; ++p) {
int ch = *p;
if ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F') ||
(ch >= 'a' && ch <= 'f')) {
*hex = (*hex << 4U) |
(ch < 'A' ? static_cast<uint64_t>(ch - '0') : (ch & 0xF) + 9U);
} else {
break;
}
}
GLOG_SAFE_ASSERT(p <= end);
return const_cast<char*>(p);
}
static ATTRIBUTE_NOINLINE FileDescriptor
OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
uint64_t& start_address,
uint64_t& base_address,
char* out_file_name,
size_t out_file_name_size) {
FileDescriptor maps_fd{
FailureRetry([] { return open("/proc/self/maps", O_RDONLY); })};
if (!maps_fd) {
return nullptr;
}
FileDescriptor mem_fd{
FailureRetry([] { return open("/proc/self/mem", O_RDONLY); })};
if (!mem_fd) {
return nullptr;
}
char buf[1024];
LineReader reader(maps_fd.get(), buf, sizeof(buf), 0);
while (true) {
const char* cursor;
const char* eol;
if (!reader.ReadLine(&cursor, &eol)) {
return nullptr;
}
cursor = GetHex(cursor, eol, &start_address);
if (cursor == eol || *cursor != '-') {
return nullptr;
}
++cursor;
uint64_t end_address;
cursor = GetHex(cursor, eol, &end_address);
if (cursor == eol || *cursor != ' ') {
return nullptr;
}
++cursor;
const char* const flags_start = cursor;
while (cursor < eol && *cursor != ' ') {
++cursor;
}
if (cursor == eol || cursor < flags_start + 4) {
return nullptr;
}
ElfW(Ehdr) ehdr;
if (flags_start[0] == 'r' &&
ReadFromOffsetExact(mem_fd.get(), &ehdr, sizeof(ElfW(Ehdr)),
start_address) &&
memcmp(ehdr.e_ident, ELFMAG, SELFMAG) == 0) {
switch (ehdr.e_type) {
case ET_EXEC:
base_address = 0;
break;
case ET_DYN:
base_address = start_address;
for (unsigned i = 0; i != ehdr.e_phnum; ++i) {
ElfW(Phdr) phdr;
if (ReadFromOffsetExact(
mem_fd.get(), &phdr, sizeof(phdr),
start_address + ehdr.e_phoff + i * sizeof(phdr)) &&
phdr.p_type == PT_LOAD && phdr.p_offset == 0) {
base_address = start_address - phdr.p_vaddr;
break;
}
}
break;
default:
break;
}
}
if (start_address > pc || pc >= end_address) {
continue;
}
if (flags_start[0] != 'r' || flags_start[2] != 'x') {
continue;
}
++cursor;
uint64_t file_offset;
cursor = GetHex(cursor, eol, &file_offset);
if (cursor == eol || *cursor != ' ') {
return nullptr;
}
++cursor;
int num_spaces = 0;
while (cursor < eol) {
if (*cursor == ' ') {
++num_spaces;
} else if (num_spaces >= 2) {
break;
}
++cursor;
}
if (cursor == eol) {
return nullptr;
}
strncpy(out_file_name, cursor, out_file_name_size);
out_file_name[out_file_name_size - 1] = '\0';
return FileDescriptor{
FailureRetry([cursor] { return open(cursor, O_RDONLY); })};
}
}
static char* itoa_r(uintptr_t i, char* buf, size_t sz, unsigned base,
size_t padding) {
size_t n = 1;
if (n > sz) {
return nullptr;
}
if (base < 2 || base > 16) {
buf[0] = '\000';
return nullptr;
}
char* start = buf;
char* ptr = start;
do {
if (++n > sz) {
buf[0] = '\000';
return nullptr;
}
*ptr++ = "0123456789abcdef"[i % base];
i /= base;
if (padding > 0) {
padding--;
}
} while (i > 0 || padding > 0);
*ptr = '\000';
while (--ptr > start) {
char ch = *ptr;
*ptr = *start;
*start++ = ch;
}
return buf;
}
static void SafeAppendString(const char* source, char* dest, size_t dest_size) {
size_t | #include "symbolize.h"
#include <csignal>
#include <iostream>
#include "config.h"
#include "glog/logging.h"
#include "googletest.h"
#include "utilities.h"
#include "stacktrace.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
using namespace std;
using namespace google;
#if defined(__GNUG__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wpedantic"
#endif
#if defined(HAVE_STACKTRACE)
# define always_inline
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H) || \
defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
static const char* TrySymbolize(void* pc, google::SymbolizeOptions options =
google::SymbolizeOptions::kNone) {
static char symbol[4096];
if (Symbolize(pc, symbol, sizeof(symbol), options)) {
return symbol;
} else {
return nullptr;
}
}
# endif
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H)
# if defined(__GNUC__) && !defined(__OPENCC__)
# if __GNUC__ >= 4
# define TEST_WITH_MODERN_GCC
# if defined(__i386__) && __i386__
# undef always_inline
# define always_inline __attribute__((always_inline))
# define HAVE_ALWAYS_INLINE
# endif
# else
# endif
# define TEST_WITH_LABEL_ADDRESSES
# endif
extern "C" {
void nonstatic_func();
void nonstatic_func() {
volatile int a = 0;
a = a + 1;
}
static void static_func() {
volatile int a = 0;
a = a + 1;
}
}
TEST(Symbolize, Symbolize) {
EXPECT_STREQ("nonstatic_func", TrySymbolize((void*)(&nonstatic_func)));
const char* static_func_symbol =
TrySymbolize(reinterpret_cast<void*>(&static_func));
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(nullptr != static_func_symbol);
EXPECT_TRUE(strcmp("static_func", static_func_symbol) == 0 ||
strcmp("static_func()", static_func_symbol) == 0);
# endif
EXPECT_TRUE(nullptr == TrySymbolize(nullptr));
}
struct Foo {
static void func(int x);
};
void ATTRIBUTE_NOINLINE Foo::func(int x) {
volatile int a = x;
a = a + 1;
}
# ifdef TEST_WITH_MODERN_GCC
TEST(Symbolize, SymbolizeWithDemangling) {
Foo::func(100);
# if !defined(_MSC_VER) || !defined(NDEBUG)
# if defined(HAVE___CXA_DEMANGLE)
EXPECT_STREQ("Foo::func(int)", TrySymbolize((void*)(&Foo::func)));
# else
EXPECT_STREQ("Foo::func()", TrySymbolize((void*)(&Foo::func)));
# endif
# endif
}
# endif
static void* g_pc_to_symbolize;
static char g_symbolize_buffer[4096];
static char* g_symbolize_result;
static void EmptySignalHandler(int ) {}
static void SymbolizeSignalHandler(int ) {
if (Symbolize(g_pc_to_symbolize, g_symbolize_buffer,
sizeof(g_symbolize_buffer))) {
g_symbolize_result = g_symbolize_buffer;
} else {
g_symbolize_result = nullptr;
}
}
const int kAlternateStackSize = 8096;
const char kAlternateStackFillValue = 0x55;
static ATTRIBUTE_NOINLINE bool StackGrowsDown(int* x) {
int y;
return &y < x;
}
static int GetStackConsumption(const char* alt_stack) {
int x;
if (StackGrowsDown(&x)) {
for (int i = 0; i < kAlternateStackSize; i++) {
if (alt_stack[i] != kAlternateStackFillValue) {
return (kAlternateStackSize - i);
}
}
} else {
for (int i = (kAlternateStackSize - 1); i >= 0; i--) {
if (alt_stack[i] != kAlternateStackFillValue) {
return i;
}
}
}
return -1;
}
# ifdef HAVE_SIGALTSTACK
static const char* SymbolizeStackConsumption(void* pc, int* stack_consumed) {
g_pc_to_symbolize = pc;
char altstack[kAlternateStackSize];
memset(altstack, kAlternateStackFillValue, kAlternateStackSize);
stack_t sigstk;
memset(&sigstk, 0, sizeof(stack_t));
stack_t old_sigstk;
sigstk.ss_sp = altstack;
sigstk.ss_size = kAlternateStackSize;
sigstk.ss_flags = 0;
CHECK_ERR(sigaltstack(&sigstk, &old_sigstk));
struct sigaction sa;
memset(&sa, 0, sizeof(struct sigaction));
struct sigaction old_sa1, old_sa2;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK;
sa.sa_handler = EmptySignalHandler;
CHECK_ERR(sigaction(SIGUSR1, &sa, &old_sa1));
sa.sa_handler = SymbolizeSignalHandler;
CHECK_ERR(sigaction(SIGUSR2, &sa, &old_sa2));
CHECK_ERR(kill(getpid(), SIGUSR1));
int stack_consumption1 = GetStackConsumption(altstack);
CHECK_ERR(kill(getpid(), SIGUSR2));
int stack_consumption2 = GetStackConsumption(altstack);
if (stack_consumption1 != -1 && stack_consumption2 != -1) {
*stack_consumed = stack_consumption2 - stack_consumption1;
} else {
*stack_consumed = -1;
}
LOG(INFO) << "Stack consumption of empty signal handler: "
<< stack_consumption1;
LOG(INFO) << "Stack consumption of symbolize signal handler: "
<< stack_consumption2;
LOG(INFO) << "Stack consumption of Symbolize: " << *stack_consumed;
CHECK_ERR(sigaltstack(&old_sigstk, nullptr));
CHECK_ERR(sigaction(SIGUSR1, &old_sa1, nullptr));
CHECK_ERR(sigaction(SIGUSR2, &old_sa2, nullptr));
return g_symbolize_result;
}
# if !defined(HAVE___CXA_DEMANGLE)
# ifdef __ppc64__
constexpr int kStackConsumptionUpperLimit = 4096;
# else
constexpr int kStackConsumptionUpperLimit = 2048;
# endif
# endif
TEST(Symbolize, SymbolizeStackConsumption) {
int stack_consumed;
const char* symbol;
symbol = SymbolizeStackConsumption(reinterpret_cast<void*>(&nonstatic_func),
&stack_consumed);
EXPECT_STREQ("nonstatic_func", symbol);
EXPECT_GT(stack_consumed, 0);
# if !defined(HAVE___CXA_DEMANGLE)
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
# endif
symbol = SymbolizeStackConsumption(reinterpret_cast<void*>(&static_func),
&stack_consumed);
CHECK(nullptr != symbol);
EXPECT_TRUE(strcmp("static_func", symbol) == 0 ||
strcmp("static_func()", symbol) == 0);
EXPECT_GT(stack_consumed, 0);
# if !defined(HAVE___CXA_DEMANGLE)
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
# endif
}
# if defined(TEST_WITH_MODERN_GCC) && !defined(HAVE___CXA_DEMANGLE)
TEST(Symbolize, SymbolizeWithDemanglingStackConsumption) {
Foo::func(100);
int stack_consumed;
const char* symbol;
symbol = SymbolizeStackConsumption(reinterpret_cast<void*>(&Foo::func),
&stack_consumed);
# if defined(HAVE___CXA_DEMANGLE)
EXPECT_STREQ("Foo::func(int)", symbol);
# else
EXPECT_STREQ("Foo::func()", symbol);
# endif
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, kStackConsumptionUpperLimit);
}
# endif
# endif
extern "C" {
inline void* always_inline inline_func() {
void* pc = nullptr;
# ifdef TEST_WITH_LABEL_ADDRESSES
pc = &&curr_pc;
curr_pc:
# endif
return pc;
}
void* ATTRIBUTE_NOINLINE non_inline_func();
void* ATTRIBUTE_NOINLINE non_inline_func() {
void* pc = nullptr;
# ifdef TEST_WITH_LABEL_ADDRESSES
pc = &&curr_pc;
curr_pc:
# endif
return pc;
}
static void ATTRIBUTE_NOINLINE TestWithPCInsideNonInlineFunction() {
# if defined(TEST_WITH_LABEL_ADDRESSES) && defined(HAVE_ATTRIBUTE_NOINLINE)
void* pc = non_inline_func();
const char* symbol = TrySymbolize(pc);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, "non_inline_func");
# endif
cout << "Test case TestWithPCInsideNonInlineFunction passed." << endl;
# endif
}
static void ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() {
# if defined(TEST_WITH_LABEL_ADDRESSES) && defined(HAVE_ALWAYS_INLINE)
void* pc = inline_func();
const char* symbol = TrySymbolize(pc);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, __FUNCTION__);
# endif
cout << "Test case TestWithPCInsideInlineFunction passed." << endl;
# endif
}
}
static void ATTRIBUTE_NOINLINE TestWithReturnAddress() {
# if defined(HAVE_ATTRIBUTE_NOINLINE)
void* return_address = __builtin_return_address(0);
const char* symbol =
TrySymbolize(return_address, google::SymbolizeOptions::kNoLineNumbers);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, "main");
# endif
cout << "Test case TestWithReturnAddress passed." << endl;
# endif
}
# elif defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
# ifdef _MSC_VER
# include <intrin.h>
# pragma intrinsic(_ReturnAddress)
# endif
struct Foo {
static void func(int x);
};
__declspec(noinline) void Foo::func(int x) {
volatile int a = x;
a = a + 1;
}
TEST(Symbolize, SymbolizeWithDemangling) {
Foo::func(100);
const char* ret = TrySymbolize((void*)(&Foo::func));
# if defined(HAVE_DBGHELP) && !defined(NDEBUG)
EXPECT_STREQ("public: static void __cdecl Foo::func(int)", ret);
# endif
}
__declspec(noinline) void TestWithReturnAddress() {
void* return_address =
# ifdef __GNUC__
__builtin_return_address(0)
# else
_ReturnAddress()
# endif
;
const char* symbol =
TrySymbolize(return_address, google::SymbolizeOptions::kNoLineNumbers);
# if !defined(_MSC_VER) || !defined(NDEBUG)
CHECK(symbol != nullptr);
CHECK_STREQ(symbol, "main");
# endif
cout << "Test case TestWithReturnAddress passed." << endl;
}
# endif
#endif
int main(int argc, char** argv) {
FLAGS_logtostderr = true;
InitGoogleLogging(argv[0]);
InitGoogleTest(&argc, argv);
#if defined(HAVE_SYMBOLIZE) && defined(HAVE_STACKTRACE)
# if defined(HAVE_ELF_H) || defined(HAVE_SYS_EXEC_ELF_H)
InstallSymbolizeCallback(nullptr);
TestWithPCInsideInlineFunction();
TestWithPCInsideNonInlineFunction();
TestWithReturnAddress();
return RUN_ALL_TESTS();
# elif defined(GLOG_OS_WINDOWS) || defined(GLOG_OS_CYGWIN)
TestWithReturnAddress();
return RUN_ALL_TESTS();
# else
printf("PASS (no symbolize_unittest support)\n");
return 0;
# endif
#else
printf("PASS (no symbolize support)\n");
return 0;
#endif
}
#if defined(__GNUG__)
# pragma GCC diagnostic pop
#endif |
226 | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONVERT_ASSET_ARGS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONVERT_ASSET_ARGS_H_
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
FailureOr<SmallVector<tensorflow::AssetFileDef>> ConvertAssetArgs(
ModuleOp module_op);
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "absl/algorithm/container.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::mlir::tf_saved_model::AssetOp;
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::LookupBoundInputOfType;
using ::tensorflow::AssetFileDef;
SmallVector<NamedAttribute> ReplaceBoundInputAttrWithIndexPathAttr(
const ArrayRef<NamedAttribute> arg_attrs, const StringRef index_path,
Builder& builder) {
SmallVector<NamedAttribute> new_arg_attrs;
for (auto arg_attr : arg_attrs) {
if (arg_attr.getName() == "tf_saved_model.bound_input") continue;
new_arg_attrs.emplace_back(arg_attr);
}
const NamedAttribute index_path_attr(
builder.getStringAttr(kTfSavedModelIndexPathAttr),
builder.getStrArrayAttr({index_path}));
new_arg_attrs.emplace_back(index_path_attr);
return new_arg_attrs;
}
StringRef MaybeStripAssetDirectoryPrefix(const StringRef filename) {
if (filename.find("assets/") == 0) {
return filename.drop_front(7);
} else {
return filename;
}
}
AssetFileDef CreateAssetFileDef(const StringRef filename,
const StringRef tensor_name) {
AssetFileDef asset_file_def{};
asset_file_def.set_filename(MaybeStripAssetDirectoryPrefix(filename).str());
tensorflow::TensorInfo tensor_info{};
tensor_info.set_name(tensor_name.str());
*asset_file_def.mutable_tensor_info() = tensor_info;
return asset_file_def;
}
SmallVector<StringRef> GetEntryFunctionInputs(func::FuncOp func_op) {
auto entry_function_attr =
func_op->getAttrOfType<DictionaryAttr>("tf.entry_function");
SmallVector<StringRef> inputs;
mlir::dyn_cast_or_null<StringAttr>(entry_function_attr.get("inputs"))
.strref()
.split(inputs, ",");
return inputs;
}
void ConvertMainArgAttrs(func::FuncOp main_func_op, const int arg_idx,
const StringRef index_path) {
const ArrayRef<NamedAttribute> arg_attrs =
main_func_op.getArgAttrDict(arg_idx).getValue();
Builder builder(main_func_op.getContext());
SmallVector<NamedAttribute> new_arg_attrs =
ReplaceBoundInputAttrWithIndexPathAttr(arg_attrs, index_path, builder);
main_func_op.setArgAttrs(arg_idx, new_arg_attrs);
}
}
FailureOr<SmallVector<AssetFileDef>> ConvertAssetArgs(ModuleOp module_op) {
func::FuncOp main_func_op = FindMainFuncOp(module_op);
if (!main_func_op) return failure();
SmallVector<StringRef> input_names = GetEntryFunctionInputs(main_func_op);
SymbolTable symbol_table(module_op);
SmallVector<AssetFileDef> asset_file_defs;
for (BlockArgument argument : main_func_op.getArguments()) {
const int arg_idx = argument.getArgNumber();
auto asset_op =
LookupBoundInputOfType<AssetOp>(main_func_op, arg_idx, symbol_table);
if (!asset_op) continue;
const StringRef input_name = input_names[arg_idx];
ConvertMainArgAttrs(main_func_op, arg_idx, input_name);
asset_file_defs.emplace_back(CreateAssetFileDef(
asset_op.getFilenameAttr(), input_name));
}
return asset_file_defs;
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::tensorflow::AssetFileDef;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SizeIs;
class ConvertAssetArgsTest : public ::testing::Test {
protected:
ConvertAssetArgsTest() {
ctx_.loadDialect<func::FuncDialect, TF::TensorFlowDialect,
tf_saved_model::TensorFlowSavedModelDialect>();
}
OwningOpRef<ModuleOp> ParseModuleOpString(
const absl::string_view module_op_str) {
auto module_op_ref = parseSourceString<ModuleOp>(module_op_str, &ctx_);
EXPECT_TRUE(module_op_ref);
return module_op_ref;
}
mlir::MLIRContext ctx_{};
};
func::FuncOp GetMainFuncOp(ModuleOp module_op) {
for (auto func_op : module_op.getOps<func::FuncOp>()) {
if (func_op.getSymName() == "main") {
return func_op;
}
}
return {};
}
TEST_F(ConvertAssetArgsTest, ConvertsSingleAssetArg) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.asset"() {filename = "assets/file_0.txt", sym_name = "__tf_saved_model_asset0"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.bound_input = @__tf_saved_model_asset0}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, SizeIs(1));
const AssetFileDef& asset_file_def = *asset_file_defs->begin();
EXPECT_THAT(asset_file_def.filename(), Eq("file_0.txt"));
EXPECT_THAT(asset_file_def.tensor_info().name(), Eq("arg_0:0"));
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, NonBoundedArgsNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, ArgsBoundedToGlobalTensorNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.global_tensor"() {type = tensor<2xi32>, value = dense<2> : tensor<2xi32>, sym_name = "__tf_saved_model_x"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.resource<tensor<2xi32>>> {tf_saved_model.bound_input = @__tf_saved_model_x}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), NotNull());
}
TEST_F(ConvertAssetArgsTest, FailsWhenNoMain) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(module {})mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(failed(asset_file_defs));
}
}
} |
227 | #ifndef TENSORFLOW_LITE_CORE_ASYNC_BACKEND_ASYNC_KERNEL_INTERFACE_H_
#define TENSORFLOW_LITE_CORE_ASYNC_BACKEND_ASYNC_KERNEL_INTERFACE_H_
#include "tensorflow/lite/async/backend_async_kernel_interface.h"
#endif
#include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <vector>
#include "tensorflow/lite/async/c/async_kernel.h"
#include "tensorflow/lite/async/c/types.h"
namespace tflite {
namespace delegates {
namespace internal {
TfLiteStatus RegisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteIoType io_type,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBuffer(context, io_type, buffer, attrs, handle);
}
TfLiteStatus RegisterBufferSlice(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
TfLiteBufferHandle buffer,
const TfLiteAttributeMap* attrs,
TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->RegisterBufferSlice(context, buffer, attrs, handle);
}
TfLiteStatus UnregisterBuffer(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context,
const TfLiteBufferHandle handle) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->UnregisterBuffer(context, handle);
}
void SupportedBufferTypes(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& buf_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedBufferTypes(io_type);
*types = buf_types.data();
*n_types = buf_types.size();
}
void SupportedSynchronizations(const TfLiteAsyncKernel* async_kernel,
TfLiteIoType io_type, const char* const** types,
size_t* n_types) {
if (types == nullptr || n_types == nullptr) return;
const auto& sync_types = reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SupportedSynchronizations(io_type);
*types = sync_types.data();
*n_types = sync_types.size();
}
bool ReconcileRestrictions(const TfLiteAsyncKernel* async_kernel,
const TfLiteOpaqueContext* context,
const TfLiteOpaqueNode* node, int tensor_index,
const TfLiteAttributeMap* user_provided_attributes,
TfLiteAttributeMap* merged,
TfLiteAttributeMap* conflict) {
return reinterpret_cast<const BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->ReconcileRestrictions(context, node, tensor_index,
user_provided_attributes, merged, conflict);
}
TfLiteStatus SetAttributes(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node,
int tensor_index, const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetAttributes(context, node, tensor_index, attrs);
}
TfLiteStatus Prepare(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteOpaqueNode* node) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Prepare(context, node);
}
TfLiteStatus Eval(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Eval(context, node, task);
}
TfLiteStatus Wait(TfLiteAsyncKernel* async_kernel, TfLiteOpaqueContext* context,
TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Wait(context, task);
}
TfLiteStatus Finish(TfLiteAsyncKernel* async_kernel,
TfLiteOpaqueContext* context, TfLiteExecutionTask* task) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->Finish(context, task);
}
TfLiteStatus SetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
const TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->SetBufferAttributes(buffer, attrs);
}
TfLiteStatus GetBufferAttributes(TfLiteAsyncKernel* async_kernel,
const TfLiteBackendBuffer* buffer,
TfLiteAttributeMap* attrs) {
return reinterpret_cast<BackendAsyncKernelInterface*>(
TfLiteAsyncKernelGetKernelData(async_kernel))
->GetBufferAttributes(buffer, attrs);
}
}
BackendAsyncKernelInterface::BackendAsyncKernelInterface() {
kernel_ = TfLiteAsyncKernelCreate(this);
TfLiteAsyncKernelSetRegisterBuffer(kernel_, internal::RegisterBuffer);
TfLiteAsyncKernelSetRegisterBufferSlice(kernel_,
internal::RegisterBufferSlice);
TfLiteAsyncKernelSetUnregisterBuffer(kernel_, internal::UnregisterBuffer);
TfLiteAsyncKernelSetSupportedBufferTypes(kernel_,
internal::SupportedBufferTypes);
TfLiteAsyncKernelSetSupportedSynchronizations(
kernel_, internal::SupportedSynchronizations);
TfLiteAsyncKernelSetReconcileRestrictions(kernel_,
internal::ReconcileRestrictions);
TfLiteAsyncKernelSetSetAttributes(kernel_, internal::SetAttributes);
TfLiteAsyncKernelSetSetBufferAttributes(kernel_,
internal::SetBufferAttributes);
TfLiteAsyncKernelSetGetBufferAttributes(kernel_,
internal::GetBufferAttributes);
TfLiteAsyncKernelSetPrepare(kernel_, internal::Prepare);
TfLiteAsyncKernelSetEval(kernel_, internal::Eval);
TfLiteAsyncKernelSetWait(kernel_, internal::Wait);
TfLiteAsyncKernelSetFinish(kernel_, internal::Finish);
}
}
} | #include "tensorflow/lite/async/backend_async_kernel_interface.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/async/c/types.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/testing/mock_async_kernel.h"
using ::testing::_;
namespace tflite::delegates {
namespace {
TEST(BackendAsyncKernelInterfaceTest, BasicTest) {
testing::StrictMock<async::testing::MockAsyncKernel> kernel;
EXPECT_CALL(kernel, RegisterBuffer(_, _, _, _, _));
EXPECT_CALL(kernel, RegisterBufferSlice(_, _, _, _));
EXPECT_CALL(kernel, UnregisterBuffer(_, _));
EXPECT_CALL(kernel, ReconcileRestrictions(_, _, _, _, _, _));
EXPECT_CALL(kernel, SetAttributes(_, _, _, _));
EXPECT_CALL(kernel, SetBufferAttributes(_, _));
EXPECT_CALL(kernel, GetBufferAttributes(_, _));
EXPECT_CALL(kernel, Prepare(_, _));
EXPECT_CALL(kernel, Eval(_, _, _));
EXPECT_CALL(kernel, Wait(_, _));
EXPECT_CALL(kernel, Finish(_, _));
auto* tflite_kernel = kernel.kernel();
tflite_kernel->register_buffer(tflite_kernel, nullptr, kTfLiteIoTypeInput,
nullptr, nullptr, 0);
tflite_kernel->register_buffer_slice(tflite_kernel, nullptr, 0, nullptr, 0);
tflite_kernel->unregister_buffer(tflite_kernel, nullptr, 0);
tflite_kernel->reconcile_restrictions(tflite_kernel, nullptr, nullptr, 0,
nullptr, nullptr, nullptr);
tflite_kernel->set_attributes(tflite_kernel, nullptr, nullptr, 0, nullptr);
tflite_kernel->set_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->get_buffer_attributes(tflite_kernel, nullptr, nullptr);
tflite_kernel->prepare(tflite_kernel, nullptr, nullptr);
tflite_kernel->eval(tflite_kernel, nullptr, nullptr, nullptr);
tflite_kernel->wait(tflite_kernel, nullptr, nullptr);
tflite_kernel->finish(tflite_kernel, nullptr, nullptr);
}
}
} |
228 | #ifndef TENSORFLOW_COMPILER_AOT_BENCHMARK_H_
#define TENSORFLOW_COMPILER_AOT_BENCHMARK_H_
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
struct Options {
static constexpr int64_t kDefaultMicros = 3000000;
int64_t max_iters = 0;
int64_t max_micros = 0;
};
struct Stats {
std::vector<int64_t> per_iter_us;
int64_t total_us;
Stats() : total_us(0) { per_iter_us.reserve(5000); }
};
void DumpStatsToStdout(const Stats& stats);
typedef std::function<void()> BenchmarkFn;
void Benchmark(const Options& options, const BenchmarkFn& fn, Stats* stats);
}
}
}
#endif
#include "tensorflow/compiler/aot/benchmark.h"
#include <sys/time.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
static uint64 NowMicros() {
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<uint64>(tv.tv_sec) * 1000000 + tv.tv_usec;
}
void DumpStatsToStdout(const Stats& stats) {
std::vector<int64_t> sorted_us(stats.per_iter_us);
std::sort(sorted_us.begin(), sorted_us.end());
const size_t count_us = sorted_us.size();
double sum_us = 0;
size_t count_us_trimmed = 0;
double sum_us_trimmed = 0;
size_t count_us_best = 0;
double sum_us_best = 0;
static constexpr float trim_ratio = 0.25;
static constexpr float best_ratio = 0.1;
const size_t count_trimmed = count_us * trim_ratio;
const size_t count_best = count_us * best_ratio;
for (size_t i = 0; i < sorted_us.size(); ++i) {
const int64_t us = sorted_us[i];
sum_us += us;
if (i >= count_trimmed && i < count_us - count_trimmed) {
sum_us_trimmed += us;
++count_us_trimmed;
}
if (i < count_best) {
sum_us_best += us;
++count_us_best;
}
}
const int kBufSize = 1000;
char buf[kBufSize];
snprintf(buf, kBufSize, "Mean with %2.0f%% trimmed:", trim_ratio * 100);
std::string label_trimmed(buf);
snprintf(buf, kBufSize, "Mean of %2.0f%% best:", best_ratio * 100);
std::string label_best(buf);
std::vector<std::pair<std::string, double>> groups = {
{"Best:", sorted_us.front()},
{"Worst:", sorted_us.back()},
{"Median:", sorted_us[count_us / 2]},
{"Mean:", sum_us / count_us},
{std::move(label_trimmed), sum_us_trimmed / count_us_trimmed},
{std::move(label_best), sum_us_best / count_us_best},
};
int max_label_size = 0;
double max_us = 0;
for (const auto& g : groups) {
if (g.first.size() > max_label_size) {
max_label_size = g.first.size();
}
if (g.second > max_us) {
max_us = g.second;
}
}
int max_digits = 1;
while (max_us >= 10.0) {
max_us /= 10.0;
++max_digits;
}
printf("Benchmark ran %zu iterations over %lld us\n", count_us,
static_cast<long long>(stats.total_us));
for (const auto& g : groups) {
printf(" %-*s %*.3f us\n", max_label_size, g.first.c_str(), max_digits + 4,
g.second);
}
}
void Benchmark(const Options& options, const BenchmarkFn& fn, Stats* stats) {
const int64_t max_us = (options.max_micros <= 0 && options.max_iters <= 0)
? Options::kDefaultMicros
: options.max_micros;
printf("Running benchmark for %lld us\n", static_cast<long long>(max_us));
const int64_t start_us = NowMicros();
int64_t iters = 0;
while (true) {
const int64_t iter_start_us = NowMicros();
fn();
const int64_t end_us = NowMicros();
stats->per_iter_us.push_back(end_us - iter_start_us);
const int64_t total_us = end_us - start_us;
++iters;
if ((max_us > 0 && total_us >= max_us) ||
(options.max_iters > 0 && iters >= options.max_iters)) {
stats->total_us = total_us;
break;
}
}
}
}
}
} | #include "tensorflow/compiler/aot/benchmark.h"
#include "tensorflow/compiler/aot/test_graph_tfadd.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfcompile {
namespace benchmark {
namespace {
TEST(Benchmark, Benchmark) {
AddComp add;
Options options;
options.max_iters = 1;
Stats stats1;
Benchmark(options, [&] { add.Run(); }, &stats1);
EXPECT_EQ(stats1.per_iter_us.size(), 1);
options.max_iters = 5;
Stats stats5;
Benchmark(options, [&] { add.Run(); }, &stats5);
EXPECT_EQ(stats5.per_iter_us.size(), 5);
}
}
}
}
} |
229 | #ifndef TENSORFLOW_TSL_PLATFORM_STRCAT_H_
#define TENSORFLOW_TSL_PLATFORM_STRCAT_H_
#include <string>
#include "tsl/platform/macros.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace strings {
enum PadSpec {
kNoPad = 1,
kZeroPad2,
kZeroPad3,
kZeroPad4,
kZeroPad5,
kZeroPad6,
kZeroPad7,
kZeroPad8,
kZeroPad9,
kZeroPad10,
kZeroPad11,
kZeroPad12,
kZeroPad13,
kZeroPad14,
kZeroPad15,
kZeroPad16
};
struct Hex {
uint64 value;
enum PadSpec spec;
template <class Int>
explicit Hex(Int v, PadSpec s = kNoPad) : spec(s) {
static_assert(
sizeof(v) == 1 || sizeof(v) == 2 || sizeof(v) == 4 || sizeof(v) == 8,
"Unknown integer type");
value = sizeof(v) == 1 ? static_cast<uint8>(v)
: sizeof(v) == 2 ? static_cast<uint16>(v)
: sizeof(v) == 4 ? static_cast<uint32>(v)
: static_cast<uint64>(v);
}
};
class AlphaNum {
public:
AlphaNum(int i32)
: piece_(digits_, FastInt32ToBufferLeft(i32, digits_)) {}
AlphaNum(unsigned int u32)
: piece_(digits_, FastUInt32ToBufferLeft(u32, digits_)) {}
AlphaNum(long x)
: piece_(digits_, FastInt64ToBufferLeft(x, digits_)) {}
AlphaNum(unsigned long x)
: piece_(digits_, FastUInt64ToBufferLeft(x, digits_)) {}
AlphaNum(long long int i64)
: piece_(digits_, FastInt64ToBufferLeft(i64, digits_)) {}
AlphaNum(unsigned long long int u64)
: piece_(digits_, FastUInt64ToBufferLeft(u64, digits_)) {}
AlphaNum(float f)
: piece_(digits_, FloatToBuffer(f, digits_)) {}
AlphaNum(double f)
: piece_(digits_, DoubleToBuffer(f, digits_)) {}
AlphaNum(bfloat16 bf)
: piece_(digits_, FloatToBuffer(static_cast<float>(bf), digits_)) {}
AlphaNum(Hex hex);
AlphaNum(const char *c_str) : piece_(c_str) {}
AlphaNum(const StringPiece &pc) : piece_(pc) {}
AlphaNum(const std::string &str)
: piece_(str) {}
AlphaNum(const tstring &str)
: piece_(str) {}
template <typename A>
AlphaNum(const std::basic_string<char, std::char_traits<char>, A> &str)
: piece_(str) {}
StringPiece::size_type size() const { return piece_.size(); }
const char *data() const { return piece_.data(); }
StringPiece Piece() const { return piece_; }
private:
StringPiece piece_;
char digits_[kFastToBufferSize];
AlphaNum(char c);
AlphaNum(const AlphaNum &) = delete;
void operator=(const AlphaNum &) = delete;
};
std::string StrCat(const AlphaNum &a) TF_MUST_USE_RESULT;
std::string StrCat(const AlphaNum &a, const AlphaNum &b) TF_MUST_USE_RESULT;
std::string StrCat(const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c) TF_MUST_USE_RESULT;
std::string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
const AlphaNum &d) TF_MUST_USE_RESULT;
namespace internal {
std::string CatPieces(std::initializer_list<StringPiece> pieces);
void AppendPieces(std::string *dest, std::initializer_list<StringPiece> pieces);
}
template <typename... AV>
std::string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
const AlphaNum &d, const AlphaNum &e,
const AV &...args) TF_MUST_USE_RESULT;
template <typename... AV>
std::string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
const AlphaNum &d, const AlphaNum &e, const AV &...args) {
return internal::CatPieces({a.Piece(), b.Piece(), c.Piece(), d.Piece(),
e.Piece(),
static_cast<const AlphaNum &>(args).Piece()...});
}
void StrAppend(std::string *dest, const AlphaNum &a);
void StrAppend(std::string *dest, const AlphaNum &a, const AlphaNum &b);
void StrAppend(std::string *dest, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c);
void StrAppend(std::string *dest, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c, const AlphaNum &d);
template <typename... AV>
inline void StrAppend(std::string *dest, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c, const AlphaNum &d, const AlphaNum &e,
const AV &...args) {
internal::AppendPieces(dest,
{a.Piece(), b.Piece(), c.Piece(), d.Piece(), e.Piece(),
static_cast<const AlphaNum &>(args).Piece()...});
}
}
}
#endif
#include "tsl/platform/strcat.h"
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include "absl/meta/type_traits.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace strings {
AlphaNum::AlphaNum(Hex hex) {
char *const end = &digits_[kFastToBufferSize];
char *writer = end;
uint64 value = hex.value;
uint64 width = hex.spec;
uint64 mask = (static_cast<uint64>(1) << (width - 1) * 4) | value;
static const char hexdigits[] = "0123456789abcdef";
do {
*--writer = hexdigits[value & 0xF];
value >>= 4;
mask >>= 4;
} while (mask != 0);
piece_ = StringPiece(writer, end - writer);
}
static char *Append1(char *out, const AlphaNum &x) {
if (x.data() == nullptr) return out;
memcpy(out, x.data(), x.size());
return out + x.size();
}
static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) {
if (x1.data() != nullptr) {
memcpy(out, x1.data(), x1.size());
out += x1.size();
}
if (x2.data() == nullptr) return out;
memcpy(out, x2.data(), x2.size());
return out + x2.size();
}
static char *Append4(char *out, const AlphaNum &x1, const AlphaNum &x2,
const AlphaNum &x3, const AlphaNum &x4) {
if (x1.data() != nullptr) {
memcpy(out, x1.data(), x1.size());
out += x1.size();
}
if (x2.data() != nullptr) {
memcpy(out, x2.data(), x2.size());
out += x2.size();
}
if (x3.data() != nullptr) {
memcpy(out, x3.data(), x3.size());
out += x3.size();
}
if (x4.data() == nullptr) return out;
memcpy(out, x4.data(), x4.size());
return out + x4.size();
}
string StrCat(const AlphaNum &a) { return string(a.data(), a.size()); }
string StrCat(const AlphaNum &a, const AlphaNum &b) {
string result(a.size() + b.size(), '\0');
char *const begin = &*result.begin();
char *out = Append2(begin, a, b);
DCHECK_EQ(out, begin + result.size());
return result;
}
string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c) {
string result(a.size() + b.size() + c.size(), '\0');
char *const begin = &*result.begin();
char *out = Append2(begin, a, b);
out = Append1(out, c);
DCHECK_EQ(out, begin + result.size());
return result;
}
string StrCat(const AlphaNum &a, const AlphaNum &b, const AlphaNum &c,
const AlphaNum &d) {
string result(a.size() + b.size() + c.size() + d.size(), '\0');
char *const begin = &*result.begin();
char *out = Append4(begin, a, b, c, d);
DCHECK_EQ(out, begin + result.size());
return result;
}
namespace {
template <typename string_type, typename = void>
struct ResizeUninitializedTraits {
using HasMember = std::false_type;
static void Resize(string_type *s, size_t new_size) { s->resize(new_size); }
};
template <typename string_type>
struct ResizeUninitializedTraits<
string_type, absl::void_t<decltype(std::declval<string_type &>()
.__resize_default_init(237))> > {
using HasMember = std::true_type;
static void Resize(string_type *s, size_t new_size) {
s->__resize_default_init(new_size);
}
};
static inline void STLStringResizeUninitialized(string *s, size_t new_size) {
ResizeUninitializedTraits<string>::Resize(s, new_size);
}
template <typename string_type>
void STLStringReserveAmortized(string_type *s, size_t new_size) {
const size_t cap = s->capacity();
if (new_size > cap) {
s->reserve((std::max)(new_size, 2 * cap));
}
}
template <typename string_type>
void STLStringResizeUninitializedAmortized(string_type *s, size_t new_size) {
STLStringReserveAmortized(s, new_size);
STLStringResizeUninitialized(s, new_size);
}
}
namespace internal {
string CatPieces(std::initializer_list<StringPiece> pieces) {
size_t total_size = 0;
for (const StringPiece piece : pieces) total_size += piece.size();
string result(total_size, '\0');
char *const begin = &*result.begin();
char *out = begin;
for (const StringPiece piece : pieces) {
const size_t this_size = piece.size();
memcpy(out, piece.data(), this_size);
out += this_size;
}
DCHECK_EQ(out, begin + result.size());
return result;
}
#define DCHECK_NO_OVERLAP(dest, src) \
DCHECK_GE(uintptr_t((src).data() - (dest).data()), uintptr_t((dest).size()))
void AppendPieces(string *result, std::initializer_list<StringPiece> pieces) {
size_t old_size = result->size();
size_t total_size = old_size;
for (const StringPiece piece : pieces) {
DCHECK_NO_OVERLAP(*result, piece);
total_size += piece.size();
}
STLStringResizeUninitializedAmortized(result, total_size);
char *const begin = &*result->begin();
char *out = begin + old_size;
for (const StringPiece piece : pieces) {
const size_t this_size = piece.size();
memcpy(out, piece.data(), this_size);
out += this_size;
}
DCHECK_EQ(out, begin + result->size());
}
}
void StrAppend(string *result, const AlphaNum &a) {
DCHECK_NO_OVERLAP(*result, a);
result->append(a.data(), a.size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(result, old_size + a.size() + b.size());
char *const begin = &*result->begin();
char *out = Append2(begin + old_size, a, b);
DCHECK_EQ(out, begin + result->size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
DCHECK_NO_OVERLAP(*result, c);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(
result, old_size + a.size() + b.size() + c.size());
char *const begin = &*result->begin();
char *out = Append2(begin + old_size, a, b);
out = Append1(out, c);
DCHECK_EQ(out, begin + result->size());
}
void StrAppend(string *result, const AlphaNum &a, const AlphaNum &b,
const AlphaNum &c, const AlphaNum &d) {
DCHECK_NO_OVERLAP(*result, a);
DCHECK_NO_OVERLAP(*result, b);
DCHECK_NO_OVERLAP(*result, c);
DCHECK_NO_OVERLAP(*result, d);
string::size_type old_size = result->size();
STLStringResizeUninitializedAmortized(
result, old_size + a.size() + b.size() + c.size() + d.size());
char *const begin = &*result->begin();
char *out = Append4(begin + old_size, a, b, c, d);
DCHECK_EQ(out, begin + result->size());
}
}
} | #include "tsl/platform/strcat.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
#ifdef _MSC_VER
typedef ptrdiff_t ssize_t;
#endif
namespace tsl {
namespace strings {
TEST(StrCat, Ints) {
const int16_t s = -1;
const uint16 us = 2;
const int i = -3;
const unsigned int ui = 4;
const int32_t l = -5;
const uint32 ul = 6;
const int64_t ll = -7;
const uint64 ull = 8;
const ptrdiff_t ptrdiff = -9;
const size_t size = 10;
const ssize_t ssize = -11;
const intptr_t intptr = -12;
const uintptr_t uintptr = 13;
string answer;
answer = StrCat(s, us);
EXPECT_EQ(answer, "-12");
answer = StrCat(i, ui);
EXPECT_EQ(answer, "-34");
answer = StrCat(l, ul);
EXPECT_EQ(answer, "-56");
answer = StrCat(ll, ull);
EXPECT_EQ(answer, "-78");
answer = StrCat(ptrdiff, size);
EXPECT_EQ(answer, "-910");
answer = StrCat(ssize, intptr);
EXPECT_EQ(answer, "-11-12");
answer = StrCat(uintptr, 0);
EXPECT_EQ(answer, "130");
}
TEST(StrCat, Floats) {
const int s = 0;
const float f = 1.5f;
const double d = 1.5;
const bfloat16 bf(1.5f);
string answer;
answer = StrCat(s, f);
EXPECT_EQ(answer, "01.5");
answer = StrCat(s, d);
EXPECT_EQ(answer, "01.5");
answer = StrCat(s, bf);
EXPECT_EQ(answer, "01.5");
}
TEST(StrCat, Nulls) {
string result;
absl::string_view v;
string strs[] = {"Hello", "Cruel", "World"};
result = StrCat(v);
EXPECT_EQ(result, "");
result = StrCat(strs[0], v);
EXPECT_EQ(result, "Hello");
result = StrCat(v, strs[0]);
EXPECT_EQ(result, "Hello");
result = StrCat(v, strs[0], strs[1]);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(strs[0], v, strs[1]);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(strs[0], strs[1], v);
EXPECT_EQ(result, "HelloCruel");
result = StrCat(v, strs[0], strs[1], strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], v, strs[1], strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], strs[1], v, strs[2]);
EXPECT_EQ(result, "HelloCruelWorld");
result = StrCat(strs[0], strs[1], strs[2], v);
EXPECT_EQ(result, "HelloCruelWorld");
}
TEST(StrCat, Basics) {
string result;
string strs[] = {"Hello", "Cruel", "World"};
StringPiece pieces[] = {"Hello", "Cruel", "World"};
const char *c_strs[] = {"Hello", "Cruel", "World"};
int32 i32s[] = {'H', 'C', 'W'};
uint64 ui64s[] = {12345678910LL, 10987654321LL};
result = StrCat(false, true, 2, 3);
EXPECT_EQ(result, "0123");
result = StrCat(-1);
EXPECT_EQ(result, "-1");
result = StrCat(0.5);
EXPECT_EQ(result, "0.5");
result = StrCat(strs[1], pieces[2]);
EXPECT_EQ(result, "CruelWorld");
result = StrCat(strs[0], ", ", pieces[2]);
EXPECT_EQ(result, "Hello, World");
result = StrCat(strs[0], ", ", strs[1], " ", strs[2], "!");
EXPECT_EQ(result, "Hello, Cruel World!");
result = StrCat(pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = StrCat(c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result, "Hello, Cruel World");
result = StrCat("ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result, "ASCII 72, 67 87!");
result = StrCat(ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result, "12345678910, 10987654321!");
string one = "1";
result = StrCat("And a ", one.size(), " and a ", &result[2] - &result[0],
" and a ", one, " 2 3 4", "!");
EXPECT_EQ(result, "And a 1 and a 2 and a 1 2 3 4!");
result = StrCat("To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result, "To output a char by ASCII/numeric value, use +: 33");
float f = 100000.5;
result = StrCat("A hundred K and a half is ", f);
EXPECT_EQ(result, "A hundred K and a half is 100000.5");
double d = f;
d *= d;
result = StrCat("A hundred K and a half squared is ", d);
EXPECT_EQ(result, "A hundred K and a half squared is 10000100000.25");
result = StrCat(1, 2, 333, 4444, 55555, 666666, 7777777, 88888888, 999999999);
EXPECT_EQ(result, "12333444455555666666777777788888888999999999");
}
TEST(StrCat, MaxArgs) {
string result;
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a");
EXPECT_EQ(result, "123456789a");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b");
EXPECT_EQ(result, "123456789ab");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c");
EXPECT_EQ(result, "123456789abc");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d");
EXPECT_EQ(result, "123456789abcd");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e");
EXPECT_EQ(result, "123456789abcde");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f");
EXPECT_EQ(result, "123456789abcdef");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g");
EXPECT_EQ(result, "123456789abcdefg");
result =
StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g", "h");
EXPECT_EQ(result, "123456789abcdefgh");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i");
EXPECT_EQ(result, "123456789abcdefghi");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j");
EXPECT_EQ(result, "123456789abcdefghij");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k");
EXPECT_EQ(result, "123456789abcdefghijk");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l");
EXPECT_EQ(result, "123456789abcdefghijkl");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m");
EXPECT_EQ(result, "123456789abcdefghijklm");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n");
EXPECT_EQ(result, "123456789abcdefghijklmn");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o");
EXPECT_EQ(result, "123456789abcdefghijklmno");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p");
EXPECT_EQ(result, "123456789abcdefghijklmnop");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o", "p", "q");
EXPECT_EQ(result, "123456789abcdefghijklmnopq");
result = StrCat(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D",
"E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z");
EXPECT_EQ(result,
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ");
}
TEST(StrAppend, Basics) {
string result = "existing text";
string strs[] = {"Hello", "Cruel", "World"};
StringPiece pieces[] = {"Hello", "Cruel", "World"};
const char *c_strs[] = {"Hello", "Cruel", "World"};
int32 i32s[] = {'H', 'C', 'W'};
uint64 ui64s[] = {12345678910LL, 10987654321LL};
string::size_type old_size = result.size();
StrAppend(&result, strs[0]);
EXPECT_EQ(result.substr(old_size), "Hello");
old_size = result.size();
StrAppend(&result, strs[1], pieces[2]);
EXPECT_EQ(result.substr(old_size), "CruelWorld");
old_size = result.size();
StrAppend(&result, strs[0], ", ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "Hello, World");
old_size = result.size();
StrAppend(&result, strs[0], ", ", strs[1], " ", strs[2], "!");
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World!");
old_size = result.size();
StrAppend(&result, pieces[0], ", ", pieces[1], " ", pieces[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
StrAppend(&result, c_strs[0], ", ", c_strs[1], " ", c_strs[2]);
EXPECT_EQ(result.substr(old_size), "Hello, Cruel World");
old_size = result.size();
StrAppend(&result, "ASCII ", i32s[0], ", ", i32s[1], " ", i32s[2], "!");
EXPECT_EQ(result.substr(old_size), "ASCII 72, 67 87!");
old_size = result.size();
StrAppend(&result, ui64s[0], ", ", ui64s[1], "!");
EXPECT_EQ(result.substr(old_size), "12345678910, 10987654321!");
string one = "1";
old_size = result.size();
StrAppend(&result, "And a ", one.size(), " and a ", &result[2] - &result[0],
" and a ", one, " 2 3 4", "!");
EXPECT_EQ(result.substr(old_size), "And a 1 and a 2 and a 1 2 3 4!");
old_size = result.size();
StrAppend(&result,
"To output a char by ASCII/numeric value, use +: ", '!' + 0);
EXPECT_EQ(result.substr(old_size),
"To output a char by ASCII/numeric value, use +: 33");
float f = 100000.5;
old_size = result.size();
StrAppend(&result, "A hundred K and a half is ", f);
EXPECT_EQ(result.substr(old_size), "A hundred K and a half is 100000.5");
double d = f;
d *= d;
old_size = result.size();
StrAppend(&result, "A hundred K and a half squared is ", d);
EXPECT_EQ(result.substr(old_size),
"A hundred K and a half squared is 10000100000.25");
old_size = result.size();
StrAppend(&result, 1, 22, 333, 4444, 55555, 666666, 7777777, 88888888, 9);
EXPECT_EQ(result.substr(old_size), "1223334444555556666667777777888888889");
old_size = result.size();
StrAppend(&result, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, "a", "b", "c", "d", "e",
"f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r",
"s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E",
"F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R",
"S", "T", "U", "V", "W", "X", "Y", "Z",
"No limit thanks to C++11's variadic templates");
EXPECT_EQ(result.substr(old_size),
"12345678910abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
"No limit thanks to C++11's variadic templates");
}
TEST(StrAppend, Death) {
string s = "self";
EXPECT_DEBUG_DEATH(StrAppend(&s, s.c_str() + 1), "Check failed:");
EXPECT_DEBUG_DEATH(StrAppend(&s, s), "Check failed:");
}
static void CheckHex64(uint64 v) {
string actual = StrCat(Hex(v, kZeroPad16));
string expected = Printf("%016llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v, kZeroPad8));
expected = Printf("%08llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%llx", static_cast<unsigned long long>(v));
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void CheckHex32(uint32 v) {
string actual = StrCat(Hex(v, kZeroPad8));
string expected = Printf("%08x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void CheckHexSigned32(int32_t v) {
string actual = StrCat(Hex(v, kZeroPad8));
string expected = Printf("%08x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
actual = StrCat(Hex(v));
expected = Printf("%x", v);
EXPECT_EQ(expected, actual) << " decimal value " << v;
}
static void TestFastPrints() {
for (int i = 0; i < 10000; i++) {
CheckHex64(i);
CheckHex32(i);
CheckHexSigned32(i);
CheckHexSigned32(-i);
}
CheckHex64(0x123456789abcdef0ull);
CheckHex32(0x12345678);
int8_t minus_one_8bit = -1;
EXPECT_EQ("ff", StrCat(Hex(minus_one_8bit)));
int16_t minus_one_16bit = -1;
EXPECT_EQ("ffff", StrCat(Hex(minus_one_16bit)));
}
TEST(Numbers, TestFunctionsMovedOverFromNumbersMain) { TestFastPrints(); }
}
} |
230 | #ifndef TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
#define TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
#include "tensorflow/lite/core/c/common.h"
namespace EigenForTFLite {
struct ThreadPoolDevice;
}
namespace tflite {
namespace eigen_support {
void IncrementUsageCounter(TfLiteContext* context);
void DecrementUsageCounter(TfLiteContext* context);
const EigenForTFLite::ThreadPoolDevice* GetThreadPoolDevice(
TfLiteContext* context);
}
}
#endif
#include "tensorflow/lite/kernels/eigen_support.h"
#include <functional>
#include <memory>
#include <utility>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
#include "tensorflow/lite/kernels/op_macros.h"
#ifndef EIGEN_DONT_ALIGN
#include "tensorflow/lite/util.h"
#endif
namespace tflite {
namespace eigen_support {
namespace {
const int kDefaultNumThreadpoolThreads = 4;
bool IsValidNumThreads(int num_threads) { return num_threads >= -1; }
int GetNumThreads(int num_threads) {
return num_threads > -1 ? num_threads : kDefaultNumThreadpoolThreads;
}
#ifndef EIGEN_DONT_ALIGN
static_assert(
kDefaultTensorAlignment % EIGEN_MAX_ALIGN_BYTES == 0,
"kDefaultTensorAlignment doesn't comply with Eigen alignment requirement.");
#endif
void SetEigenNbThreads(int threads) {
#if defined(EIGEN_HAS_OPENMP)
Eigen::setNbThreads(threads);
#endif
}
class EigenThreadPoolWrapper : public Eigen::ThreadPoolInterface {
public:
explicit EigenThreadPoolWrapper(int num_threads) {
if (num_threads > 1) {
pool_ = std::make_unique<Eigen::ThreadPool>(num_threads);
}
}
~EigenThreadPoolWrapper() override {}
void Schedule(std::function<void()> fn) override {
if (pool_) {
pool_->Schedule(std::move(fn));
} else {
fn();
}
}
int NumThreads() const override { return pool_ ? pool_->NumThreads() : 1; }
int CurrentThreadId() const override {
return pool_ ? pool_->CurrentThreadId() : 0;
}
private:
std::unique_ptr<Eigen::ThreadPool> pool_;
};
class LazyEigenThreadPoolHolder {
public:
explicit LazyEigenThreadPoolHolder(int num_threads) {
SetNumThreads(num_threads);
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice() {
if (!device_) {
thread_pool_wrapper_ =
std::make_unique<EigenThreadPoolWrapper>(target_num_threads_);
device_ = std::make_unique<Eigen::ThreadPoolDevice>(
thread_pool_wrapper_.get(), target_num_threads_);
}
return device_.get();
}
void SetNumThreads(int num_threads) {
const int target_num_threads = GetNumThreads(num_threads);
if (target_num_threads_ != target_num_threads) {
target_num_threads_ = target_num_threads;
device_.reset();
thread_pool_wrapper_.reset();
}
}
private:
int target_num_threads_ = kDefaultNumThreadpoolThreads;
std::unique_ptr<Eigen::ThreadPoolDevice> device_;
std::unique_ptr<Eigen::ThreadPoolInterface> thread_pool_wrapper_;
};
struct RefCountedEigenContext : public TfLiteExternalContext {
std::unique_ptr<LazyEigenThreadPoolHolder> thread_pool_holder;
int num_references = 0;
};
RefCountedEigenContext* GetEigenContext(TfLiteContext* context) {
return reinterpret_cast<RefCountedEigenContext*>(
context->GetExternalContext(context, kTfLiteEigenContext));
}
TfLiteStatus Refresh(TfLiteContext* context) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(GetNumThreads(context->recommended_num_threads));
}
auto* ptr = GetEigenContext(context);
if (ptr != nullptr) {
ptr->thread_pool_holder->SetNumThreads(context->recommended_num_threads);
}
return kTfLiteOk;
}
}
void IncrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
if (IsValidNumThreads(context->recommended_num_threads)) {
SetEigenNbThreads(context->recommended_num_threads);
}
ptr = new RefCountedEigenContext;
ptr->type = kTfLiteEigenContext;
ptr->Refresh = Refresh;
ptr->thread_pool_holder = std::make_unique<LazyEigenThreadPoolHolder>(
context->recommended_num_threads);
ptr->num_references = 0;
context->SetExternalContext(context, kTfLiteEigenContext, ptr);
}
ptr->num_references++;
}
void DecrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to DecrementUsageCounter() not preceded by "
"IncrementUsageCounter()");
}
if (--ptr->num_references == 0) {
delete ptr;
context->SetExternalContext(context, kTfLiteEigenContext, nullptr);
}
}
const Eigen::ThreadPoolDevice* GetThreadPoolDevice(TfLiteContext* context) {
auto* ptr = GetEigenContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to GetFromContext() not preceded by IncrementUsageCounter()");
}
return ptr->thread_pool_holder->GetThreadPoolDevice();
}
}
} | #include "tensorflow/lite/kernels/eigen_support.h"
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
namespace tflite {
namespace eigen_support {
struct TestTfLiteContext : public TfLiteContext {
TestTfLiteContext() {
recommended_num_threads = -1;
external_context = nullptr;
GetExternalContext = GetExternalContextImpl;
SetExternalContext = SetExternalContextImpl;
}
static void SetExternalContextImpl(TfLiteContext* context,
TfLiteExternalContextType type,
TfLiteExternalContext* external_context) {
static_cast<TestTfLiteContext*>(context)->external_context =
external_context;
}
static TfLiteExternalContext* GetExternalContextImpl(
TfLiteContext* context, TfLiteExternalContextType type) {
return static_cast<TestTfLiteContext*>(context)->external_context;
}
TfLiteExternalContext* external_context;
};
TEST(EigenSupport, Default) {
TestTfLiteContext context;
IncrementUsageCounter(&context);
ASSERT_NE(context.external_context, nullptr);
EXPECT_EQ(context.external_context->type, kTfLiteEigenContext);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, SingleThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
EXPECT_EQ(thread_pool_device->numThreadsInPool(), 1);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, MultiThreaded) {
TestTfLiteContext context;
context.recommended_num_threads = 2;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 2);
bool executed = false;
auto notification =
thread_pool_device->enqueue([&executed]() { executed = true; });
ASSERT_NE(notification, nullptr);
notification->Wait();
delete notification;
EXPECT_TRUE(executed);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, NumThreadsChanged) {
TestTfLiteContext context;
context.recommended_num_threads = 1;
IncrementUsageCounter(&context);
auto thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 1);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -1;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
context.recommended_num_threads = 0;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 0);
context.recommended_num_threads = 3;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 3);
context.recommended_num_threads = -5;
ASSERT_NE(context.external_context, nullptr);
context.external_context->Refresh(&context);
thread_pool_device = GetThreadPoolDevice(&context);
ASSERT_NE(thread_pool_device, nullptr);
EXPECT_EQ(thread_pool_device->numThreads(), 4);
DecrementUsageCounter(&context);
}
TEST(EigenSupport, RefCounting) {
TestTfLiteContext context;
EXPECT_EQ(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
IncrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_NE(context.external_context, nullptr);
DecrementUsageCounter(&context);
EXPECT_EQ(context.external_context, nullptr);
}
}
} |
231 | #ifndef AROLLA_EXPR_OPERATORS_STD_FUNCTION_OPERATOR_H_
#define AROLLA_EXPR_OPERATORS_STD_FUNCTION_OPERATOR_H_
#include <functional>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
namespace arolla::expr_operators {
class StdFunctionOperator : public expr::BasicExprOperator,
public expr::BuiltinExprOperatorTag {
public:
using OutputQTypeFn =
std::function<absl::StatusOr<QTypePtr>(absl::Span<const QTypePtr>)>;
using EvalFn =
std::function<absl::StatusOr<TypedValue>(absl::Span<const TypedRef>)>;
StdFunctionOperator(absl::string_view name,
expr::ExprOperatorSignature signature,
absl::string_view doc, OutputQTypeFn output_qtype_fn,
EvalFn eval_fn);
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final;
const OutputQTypeFn& GetOutputQTypeFn() const;
const EvalFn& GetEvalFn() const;
private:
OutputQTypeFn output_qtype_fn_;
EvalFn eval_fn_;
};
}
#endif
#include "arolla/expr/operators/std_function_operator.h"
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr_operators {
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
StdFunctionOperator::StdFunctionOperator(absl::string_view name,
ExprOperatorSignature signature,
absl::string_view doc,
OutputQTypeFn output_qtype_fn,
EvalFn eval_fn)
: BasicExprOperator(name, signature, doc, RandomFingerprint()),
output_qtype_fn_(std::move(output_qtype_fn)),
eval_fn_(std::move(eval_fn)) {}
absl::StatusOr<QTypePtr> StdFunctionOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
return output_qtype_fn_(input_qtypes);
}
const StdFunctionOperator::OutputQTypeFn&
StdFunctionOperator::GetOutputQTypeFn() const {
return output_qtype_fn_;
}
const StdFunctionOperator::EvalFn& StdFunctionOperator::GetEvalFn() const {
return eval_fn_;
}
} | #include "arolla/expr/operators/std_function_operator.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/array/qtype/types.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
class StdFunctionOperatorTest : public ::testing::Test {
protected:
void SetUp() override { ASSERT_OK(InitArolla()); }
};
absl::StatusOr<TypedValue> GetFirst(absl::Span<const TypedRef> inputs) {
return TypedValue(inputs[0]);
}
absl::StatusOr<QTypePtr> FirstQType(absl::Span<const QTypePtr> input_qtypes) {
return input_qtypes[0];
}
absl::StatusOr<TypedValue> Add(absl::Span<const TypedRef> inputs) {
ASSIGN_OR_RETURN(int32_t x, inputs[0].As<int32_t>());
ASSIGN_OR_RETURN(int32_t y, inputs[1].As<int32_t>());
return TypedValue::FromValue(x + y);
}
TEST_F(StdFunctionOperatorTest, GetName) {
StdFunctionOperator op("get_first_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_THAT(op.display_name(), "get_first_fn");
}
TEST_F(StdFunctionOperatorTest, GetDoc) {
StdFunctionOperator op("get_first_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_THAT(op.GetDoc(), IsOkAndHolds("dummy op docstring"));
}
TEST_F(StdFunctionOperatorTest, GetEvalFn) {
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, Add);
int32_t x = 1;
int32_t y = 2;
auto res = op.GetEvalFn()({TypedRef::FromValue(x), TypedRef::FromValue(y)});
EXPECT_OK(res);
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(x + y));
}
TEST_F(StdFunctionOperatorTest, GetOutputQTypeFn) {
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, Add);
auto output_qtype_fn = op.GetOutputQTypeFn();
auto res = output_qtype_fn({GetArrayQType<int32_t>(), GetQType<int32_t>()});
EXPECT_THAT(res, IsOkAndHolds(GetArrayQType<int32_t>()));
}
TEST_F(StdFunctionOperatorTest, GetOutputQType) {
{
StdFunctionOperator op("get_first_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_THAT(
op.GetOutputQType({GetArrayQType<int32_t>(), GetQType<int32_t>()}),
IsOkAndHolds(GetArrayQType<int32_t>()));
}
{
auto get_snd =
[](absl::Span<const QTypePtr> inputs) -> absl::StatusOr<QTypePtr> {
return inputs[1];
};
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", std::move(get_snd), Add);
EXPECT_THAT(
op.GetOutputQType({GetArrayQType<int32_t>(), GetQType<float>()}),
IsOkAndHolds(GetQType<float>()));
}
{
auto status_fn =
[](absl::Span<const QTypePtr> inputs) -> absl::StatusOr<QTypePtr> {
return absl::InvalidArgumentError("foo bar");
};
StdFunctionOperator op("add_fn", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", std::move(status_fn), Add);
EXPECT_THAT(
op.GetOutputQType({GetArrayQType<int32_t>(), GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("foo bar")));
}
}
TEST_F(StdFunctionOperatorTest, QTypeInference) {
{
auto op = std::make_shared<StdFunctionOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Literal(1.5f), Literal(kUnit)}));
EXPECT_EQ(expr->qtype(), GetQType<float>());
}
{
auto get_snd =
[](absl::Span<const QTypePtr> inputs) -> absl::StatusOr<QTypePtr> {
return inputs[1];
};
auto op = std::make_shared<StdFunctionOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", std::move(get_snd), GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Literal(1.5f), Literal(kUnit)}));
EXPECT_EQ(expr->qtype(), GetQType<Unit>());
}
{
auto op = std::make_shared<StdFunctionOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Leaf("y")}));
EXPECT_EQ(expr->qtype(), nullptr);
}
}
TEST_F(StdFunctionOperatorTest, Eval) {
{
auto op = std::make_shared<StdFunctionOperator>(
"get_first", ExprOperatorSignature{{"x"}, {"y"}}, "dummy op docstring",
FirstQType, GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1), Literal(2)}));
auto res = Invoke(expr, {});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(1));
}
{
auto op = std::make_shared<StdFunctionOperator>(
"add", ExprOperatorSignature{{"x"}, {"y"}}, "dummy op docstring",
FirstQType, Add);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1), Literal(2)}));
auto res = Invoke(expr, {});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(3));
}
{
auto op = std::make_shared<StdFunctionOperator>(
"add", ExprOperatorSignature{{"x"}, {"y"}}, "dummy op docstring",
FirstQType, Add);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Leaf("y")}));
auto res = Invoke(expr, {{"x", TypedValue::FromValue(1)},
{"y", TypedValue::FromValue(2)}});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(3));
}
}
TEST_F(StdFunctionOperatorTest, VariadicInput) {
ASSERT_OK_AND_ASSIGN(auto signature, ExprOperatorSignature::Make("*args"));
auto op = std::make_shared<StdFunctionOperator>(
"add", signature, "dummy op docstring", FirstQType, Add);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1), Literal(2)}));
auto res = Invoke(expr, {});
EXPECT_OK(res.status());
EXPECT_THAT(res.value().As<int32_t>(), IsOkAndHolds(3));
}
TEST_F(StdFunctionOperatorTest, IncorrectFnOutput) {
auto op = std::make_shared<StdFunctionOperator>(
"get_first", ExprOperatorSignature{{"x"}}, "dummy op docstring",
[](absl::Span<const QTypePtr> input_qtypes) {
return GetQType<int32_t>();
},
GetFirst);
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1.0)}));
EXPECT_THAT(
Invoke(expr, {}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("expected the result to have qtype INT32, got FLOAT64")));
}
TEST_F(StdFunctionOperatorTest, FnRaises) {
auto op = std::make_shared<StdFunctionOperator>(
"get_first", ExprOperatorSignature{{"x"}}, "dummy op docstring",
FirstQType, [](absl::Span<const TypedRef> inputs) {
return absl::InvalidArgumentError("foo bar");
});
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1)}));
EXPECT_THAT(Invoke(expr, {}), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("foo bar")));
}
TEST_F(StdFunctionOperatorTest, Fingerprint) {
StdFunctionOperator op1("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
{
StdFunctionOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2("another_name", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}},
"dummy op docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"another docstring", FirstQType, GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring",
[](absl::Span<const QTypePtr> input_qtypes) {
return GetQType<float>();
},
GetFirst);
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
StdFunctionOperator op2(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", FirstQType,
[](absl::Span<const TypedRef> inputs) -> absl::StatusOr<TypedValue> {
return TypedValue(inputs[1]);
});
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
}
}
} |
232 | #ifndef XLA_SERVICE_GPU_GPU_SPMD_PIPELINE_H_
#define XLA_SERVICE_GPU_GPU_SPMD_PIPELINE_H_
#include <optional>
#include "absl/functional/function_ref.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_pass_pipeline.h"
namespace xla {
namespace gpu {
void AddSPMDPasses(
const HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
const se::GpuComputeCapability& compute_capability,
HloPassPipeline& spmd_pipeline,
std::optional<const absl::FunctionRef<void(HloPassPipeline&)>>
auto_sharding_func = std::nullopt);
}
}
#endif
#include "xla/service/gpu/gpu_spmd_pipeline.h"
#include <cstdint>
#include <optional>
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/conditional_simplifier.h"
#include "xla/service/gather_expander.h"
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/scatter_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/service/sort_simplifier.h"
#include "xla/service/spmd/collective_permute_motion.h"
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
void AddSPMDPasses(
const HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
const se::GpuComputeCapability& compute_capability,
HloPassPipeline& spmd_pipeline,
std::optional<const absl::FunctionRef<void(HloPassPipeline&)>>
auto_sharding_func) {
const int64_t num_partitions = hlo_module->config().num_partitions();
CHECK_GE(num_partitions, 1);
HloPassPipeline& spmd_simplify =
spmd_pipeline.AddPass<HloPassFix<HloPassPipeline>>("spmd-simplify");
spmd_simplify.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts,
compute_capability);
spmd_simplify.AddPass<SortSimplifier>();
spmd_simplify.AddPass<TupleSimplifier>();
spmd_simplify.AddPass<ScatterExpander>(
ScatterExpander::kEliminateSimpleScatters);
spmd_simplify.AddPass<GatherExpander>(
GatherExpander::kEliminateSimpleGathers);
spmd_simplify.AddPass<WhileLoopConstantSinking>();
spmd_simplify.AddPass<WhileLoopSimplifier>();
ReshapeMoverOptions reshape_mover_options;
reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;
spmd_simplify.AddPass<ReshapeMover>(reshape_mover_options);
spmd_simplify.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(
layout_insensitive_algsimp_opts, compute_capability);
spmd_simplify.AddPass<HloConstantFolding>();
spmd_simplify.AddPass<ConditionalSimplifier>();
spmd_pipeline.AddPass<HloConstantSplitter>();
spmd_simplify.AddPass<HloDCE>();
if (auto_sharding_func.has_value()) {
(*auto_sharding_func)(spmd_pipeline);
}
spmd_pipeline.AddPass<ShardingPropagation>(
true, false,
hlo_module->config().allow_spmd_sharding_propagation_to_output());
spmd_pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>(
num_partitions, hlo_module->config().replica_count(),
hlo_module->config()
.debug_options()
.xla_gpu_threshold_for_windowed_einsum_mib(),
hlo_module->config()
.debug_options()
.xla_gpu_multi_streamed_windowed_einsum(),
true,
true);
spmd_pipeline.AddPass<CollectivePermuteMotion>();
}
}
} | #include "xla/service/gpu/gpu_spmd_pipeline.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/client/executable_build_options.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuSpmdPartitioningTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
const char* hlo_module, int64_t num_devices) {
HloModuleConfig config = GetModuleConfigForTest(
1, num_devices);
config.set_num_partitions(num_devices);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
EXPECT_FALSE(config.debug_options().xla_use_shardonnay())
<< "Shardonnay not supported yet";
HloPassPipeline spmd_pipeline("spmd-partitioner");
se::CudaComputeCapability ampere(8, 0);
AlgebraicSimplifierOptions alg_simplifier_options;
AddSPMDPasses(module.get(), alg_simplifier_options, ampere, spmd_pipeline,
std::nullopt);
TF_RETURN_IF_ERROR(spmd_pipeline.Run(module.get()).status());
XLA_VLOG_LINES(10, module->ToString());
return module;
}
protected:
bool UseShardonnay() const { return GetParam(); }
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_use_shardonnay(UseShardonnay());
return debug_options;
}
};
TEST_P(GpuSpmdPartitioningTest, DotWithEntryComputationLayout) {
if (UseShardonnay()) {
GTEST_SKIP() << "Shardonnay not supported yet";
}
const char* const kHloModule = R"(
HloModule module,
entry_computation_layout={(f32[8,16]{0,1}, f32[16,24]{1,0})
->f32[8,24]{1,0}}
ENTRY main {
%p0 = f32[8,16] parameter(0), sharding={devices=[1,8]<=[8]}
%p1 = f32[16,24] parameter(1), sharding={devices=[8,1]<=[8]}
ROOT %dot = f32[8,24] dot(%p0, %p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(kHloModule, 8));
EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(0),
ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 2}, {0, 1}));
EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(1),
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {1, 0}));
EXPECT_EQ(module->config().entry_computation_layout().result_shape(),
ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 24}, {1, 0}));
}
std::string TestParamToString(
const ::testing::TestParamInfo<bool>& param_info) {
return param_info.param ? "Shardonnay" : "GSPMD";
}
INSTANTIATE_TEST_SUITE_P(All, GpuSpmdPartitioningTest,
::testing::Values(true, false), TestParamToString);
}
}
} |
233 | #ifndef XLA_TSL_UTIL_PROTO_PROTO_UTILS_H_
#define XLA_TSL_UTIL_PROTO_PROTO_UTILS_H_
#include "google/protobuf/duration.pb.h"
#include "absl/time/time.h"
namespace tsl {
namespace proto_utils {
inline google::protobuf::Duration ToDurationProto(absl::Duration duration) {
google::protobuf::Duration proto;
proto.set_seconds(absl::IDivDuration(duration, absl::Seconds(1), &duration));
proto.set_nanos(
absl::IDivDuration(duration, absl::Nanoseconds(1), &duration));
return proto;
}
inline absl::Duration FromDurationProto(google::protobuf::Duration proto) {
return absl::Seconds(proto.seconds()) + absl::Nanoseconds(proto.nanos());
}
}
}
#endif
#include "tensorflow/core/util/proto/proto_utils.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace proto_utils {
using tensorflow::protobuf::FieldDescriptor;
using tensorflow::protobuf::internal::WireFormatLite;
bool IsCompatibleType(FieldDescriptor::Type field_type, DataType dtype) {
switch (field_type) {
case WireFormatLite::TYPE_DOUBLE:
return dtype == tensorflow::DT_DOUBLE;
case WireFormatLite::TYPE_FLOAT:
return dtype == tensorflow::DT_FLOAT || dtype == tensorflow::DT_DOUBLE;
case WireFormatLite::TYPE_INT64:
return dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_UINT64:
return dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_INT32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_FIXED64:
return dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_FIXED32:
return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_BOOL:
return dtype == tensorflow::DT_BOOL;
case WireFormatLite::TYPE_STRING:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_GROUP:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_MESSAGE:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_BYTES:
return dtype == tensorflow::DT_STRING;
case WireFormatLite::TYPE_UINT32:
return dtype == tensorflow::DT_UINT32 || dtype == tensorflow::DT_UINT64;
case WireFormatLite::TYPE_ENUM:
return dtype == tensorflow::DT_INT32;
case WireFormatLite::TYPE_SFIXED32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SFIXED64:
return dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SINT32:
return dtype == tensorflow::DT_INT32 || dtype == tensorflow::DT_INT64;
case WireFormatLite::TYPE_SINT64:
return dtype == tensorflow::DT_INT64;
}
}
Status ParseTextFormatFromString(absl::string_view input,
protobuf::Message* output) {
DCHECK(output != nullptr) << "output must be non NULL";
if (output == nullptr) {
LOG(ERROR) << "output must be non NULL";
return Status(absl::StatusCode::kInvalidArgument,
"output must be non NULL");
}
string err;
StringErrorCollector err_collector(&err, true);
protobuf::TextFormat::Parser parser;
parser.RecordErrorsTo(&err_collector);
if (!parser.ParseFromString(string(input), output)) {
return Status(absl::StatusCode::kInvalidArgument, err);
}
return absl::OkStatus();
}
StringErrorCollector::StringErrorCollector(string* error_text)
: StringErrorCollector(error_text, false) {}
StringErrorCollector::StringErrorCollector(string* error_text,
bool one_indexing)
: error_text_(error_text), index_offset_(one_indexing ? 1 : 0) {
DCHECK(error_text_ != nullptr) << "error_text must be non NULL";
if (error_text_ == nullptr) {
LOG(ERROR) << "error_text must be non NULL";
}
}
void StringErrorCollector::AddError(int line, int column,
const string& message) {
if (error_text_ != nullptr) {
absl::SubstituteAndAppend(error_text_, "$0($1): $2\n", line + index_offset_,
column + index_offset_, message);
}
}
void StringErrorCollector::AddWarning(int line, int column,
const string& message) {
AddError(line, column, message);
}
}
} | #include "tensorflow/core/util/proto/proto_utils.h"
#include <gmock/gmock.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
using proto_utils::ParseTextFormatFromString;
using proto_utils::StringErrorCollector;
using ::testing::ContainsRegex;
TEST(ParseTextFormatFromStringTest, Success) {
protobuf::DescriptorProto output;
TF_ASSERT_OK(ParseTextFormatFromString("name: \"foo\"", &output));
EXPECT_EQ(output.name(), "foo");
}
TEST(ParseTextFormatFromStringTest, ErrorOnInvalidSyntax) {
protobuf::DescriptorProto output;
Status status = ParseTextFormatFromString("name: foo", &output);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("foo"));
EXPECT_FALSE(output.has_name());
}
TEST(ParseTextFormatFromStringTest, ErrorOnUnknownFieldName) {
protobuf::DescriptorProto output;
Status status = ParseTextFormatFromString("badname: \"foo\"", &output);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("badname"));
EXPECT_FALSE(output.has_name());
}
TEST(ParseTextFormatFromStringTest, DiesOnNullOutputPointer) {
#ifndef NDEBUG
ASSERT_DEATH(ParseTextFormatFromString("foo", nullptr).IgnoreError(),
"output.*non NULL");
#else
Status status = ParseTextFormatFromString("foo", nullptr);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_THAT(status.message(), ContainsRegex("output.*non NULL"));
#endif
}
TEST(StringErrorCollectorTest, AppendsError) {
string err;
StringErrorCollector collector(&err);
collector.AddError(1, 2, "foo");
EXPECT_EQ("1(2): foo\n", err);
}
TEST(StringErrorCollectorTest, AppendsWarning) {
string err;
StringErrorCollector collector(&err);
collector.AddWarning(1, 2, "foo");
EXPECT_EQ("1(2): foo\n", err);
}
TEST(StringErrorCollectorTest, AppendsMultipleError) {
string err;
StringErrorCollector collector(&err);
collector.AddError(1, 2, "foo");
collector.AddError(3, 4, "bar");
EXPECT_EQ("1(2): foo\n3(4): bar\n", err);
}
TEST(StringErrorCollectorTest, AppendsMultipleWarning) {
string err;
StringErrorCollector collector(&err);
collector.AddWarning(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
EXPECT_EQ("1(2): foo\n3(4): bar\n", err);
}
TEST(StringErrorCollectorTest, OffsetWorks) {
string err;
StringErrorCollector collector(&err, true);
collector.AddError(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
EXPECT_EQ("2(3): foo\n4(5): bar\n", err);
}
TEST(StringErrorCollectorTest, DiesOnNullErrorText) {
#ifndef NDEBUG
ASSERT_DEATH(StringErrorCollector(nullptr), "error_text.*non NULL");
#else
StringErrorCollector collector(nullptr);
collector.AddError(1, 2, "foo");
collector.AddWarning(3, 4, "bar");
#endif
}
} |
234 | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_TPU_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_TPU_H_
#include "tensorflow/core/framework/graph.pb.h"
namespace tensorflow {
namespace grappler {
bool IsLegacyTPUBridgeGraphDef(const GraphDef& def);
}
}
#endif
#include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace tensorflow {
namespace grappler {
bool IsLegacyTPUBridgeGraphDef(const GraphDef& def) {
for (const auto& node : def.node()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
if (!def.has_library()) return false;
for (const auto& function_def : def.library().function()) {
for (const auto& node : function_def.node_def()) {
if (node.op() == "TPUCompile" || node.op() == "TPUPartitionedCall") {
return true;
}
}
}
return false;
}
}
} | #include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class TpuTest : public ::testing::Test {};
TEST_F(TpuTest, NotTpuGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("Add");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("Mul");
EXPECT_FALSE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuMainGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
TEST_F(TpuTest, TpuLibraryGraph) {
{
GraphDef tpu_graph;
tpu_graph.add_node()->set_op("BatchFunction");
FunctionDefLibrary* library = tpu_graph.mutable_library();
FunctionDef* function_def = library->add_function();
function_def->add_node_def()->set_op("TPUPartitionedCall");
EXPECT_TRUE(IsLegacyTPUBridgeGraphDef(tpu_graph));
}
}
}
} |
235 | #ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
#include <string>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
class ScopedSetEnv {
public:
ScopedSetEnv(const char* var_name, const char* new_value);
~ScopedSetEnv();
private:
std::string var_name_;
std::string old_value_;
bool was_unset_;
};
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/base/internal/scoped_set_env.h"
#ifdef _WIN32
#include <windows.h>
#endif
#include <cstdlib>
#include "absl/base/internal/raw_logging.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
#ifdef _WIN32
const int kMaxEnvVarValueSize = 1024;
#endif
void SetEnvVar(const char* name, const char* value) {
#ifdef _WIN32
SetEnvironmentVariableA(name, value);
#else
if (value == nullptr) {
::unsetenv(name);
} else {
::setenv(name, value, 1);
}
#endif
}
}
ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
: var_name_(var_name), was_unset_(false) {
#ifdef _WIN32
char buf[kMaxEnvVarValueSize];
auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
if (get_res == 0) {
was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
} else {
old_value_.assign(buf, get_res);
}
SetEnvironmentVariableA(var_name_.c_str(), new_value);
#else
const char* val = ::getenv(var_name_.c_str());
if (val == nullptr) {
was_unset_ = true;
} else {
old_value_ = val;
}
#endif
SetEnvVar(var_name_.c_str(), new_value);
}
ScopedSetEnv::~ScopedSetEnv() {
SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
}
}
ABSL_NAMESPACE_END
} | #ifdef _WIN32
#include <windows.h>
#endif
#include "gtest/gtest.h"
#include "absl/base/internal/scoped_set_env.h"
namespace {
using absl::base_internal::ScopedSetEnv;
std::string GetEnvVar(const char* name) {
#ifdef _WIN32
char buf[1024];
auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf));
if (get_res >= sizeof(buf)) {
return "TOO_BIG";
}
if (get_res == 0) {
return "UNSET";
}
return std::string(buf, get_res);
#else
const char* val = ::getenv(name);
if (val == nullptr) {
return "UNSET";
}
return val;
#endif
}
TEST(ScopedSetEnvTest, SetNonExistingVarToString) {
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
TEST(ScopedSetEnvTest, SetNonExistingVarToNull) {
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
TEST(ScopedSetEnvTest, SetExistingVarToString) {
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
TEST(ScopedSetEnvTest, SetExistingVarToNull) {
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
{
ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
}
EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
}
} |
236 | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_PIN_TO_HOST_OPTIMIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_PIN_TO_HOST_OPTIMIZER_H_
#include <unordered_set>
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
namespace internal {
string TryFindHostDevice(const gtl::FlatSet<string>& devices,
bool has_device_cpu, const string& device);
}
class PinToHostOptimizer : public GraphOptimizer {
public:
PinToHostOptimizer() {}
explicit PinToHostOptimizer(RewriterConfig::Toggle opt_level) {}
~PinToHostOptimizer() override {}
string name() const override { return "pin_to_host_optimizer"; };
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace grappler {
namespace internal {
constexpr int64_t kTensorMaxSize = 64;
bool IsDenylisted(const NodeDef& node) {
return
IsCollective(node) ||
IsControlFlow(node) ||
IsNoOp(node);
}
bool IsTensorSmall(const OpInfo::TensorProperties& prop) {
if (prop.dtype() == DataType::DT_STRING) {
return true;
}
if (prop.dtype() != DataType::DT_INT32 &&
prop.dtype() != DataType::DT_INT64 &&
prop.dtype() != DataType::DT_FLOAT) {
return false;
}
const int64_t size = NumCoefficients(prop.shape());
if (size < 0 || size > kTensorMaxSize) {
return false;
}
return true;
}
Status TryFindKernelDef(const std::vector<DeviceType>& devices,
const NodeDef& node, const KernelDef** kdef) {
for (const DeviceType& device : devices) {
const KernelDef* kernel = nullptr;
Status s = FindKernelDef(device, node, &kernel, nullptr);
if (s.ok()) {
if (kdef) {
*kdef = kernel;
}
return absl::OkStatus();
}
}
return errors::NotFound("Could not find KernelDef for op: ", node.op());
}
Status IsNodeOutputPortHostFriendly(const GraphView& graph,
GraphProperties* properties,
const NodeDef& node, int port_id,
bool* is_candidate) {
*is_candidate = false;
if (IsDenylisted(node)) {
return absl::OkStatus();
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
const auto& output_properties = properties->GetOutputProperties(node.name());
int output_properties_size = output_properties.size();
if (port_id >= output_properties_size) {
LOG(WARNING) << "port_id=" << port_id
<< " but output_properties.size()=" << output_properties.size()
<< "\n"
<< node.DebugString();
return absl::OkStatus();
}
if (!IsTensorSmall(output_properties[port_id])) {
return absl::OkStatus();
}
if (IsIdentity(node) || IsIdentityNSingleInput(node)) {
for (const auto& fanin : graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return absl::OkStatus();
}
const int output_arg_id = OpOutputPortIdToArgId(node, *op, port_id);
if (output_arg_id < 0) {
LOG(WARNING) << "Invalid port: " << port_id << "!\n"
<< node.DebugString() << "\n"
<< op->DebugString();
return absl::OkStatus();
}
const KernelDef* kernel = nullptr;
s = TryFindKernelDef({node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node,
&kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return absl::OkStatus();
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->output_arg(output_arg_id).name() == host_memory_arg) {
*is_candidate = true;
break;
}
}
return absl::OkStatus();
}
bool IsNodeInputPortHostFriendly(const NodeDef& node, int port_id) {
if (absl::StrContains(node.device(), DEVICE_CPU)) {
return true;
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return false;
}
const int input_arg_id = OpInputPortIdToArgId(node, *op, port_id);
const KernelDef* kernel = nullptr;
s = internal::TryFindKernelDef(
{node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node, &kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return false;
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->input_arg(input_arg_id).name() == host_memory_arg) {
return true;
}
}
return false;
}
Status IsNodeHostCandidate(const GraphView& graph, GraphProperties* properties,
const NodeDef& node, bool* is_candidate) {
*is_candidate = false;
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
if (IsDenylisted(node)) {
return absl::OkStatus();
}
Status s = TryFindKernelDef({DEVICE_CPU}, node, nullptr);
if (!s.ok()) {
return absl::OkStatus();
}
for (const GraphView::OutputPort& fanin :
graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
for (const auto& prop : properties->GetOutputProperties(node.name())) {
if (!IsTensorSmall(prop)) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
string TryFindHostDevice(const gtl::FlatSet<string>& devices,
bool has_device_cpu, const string& device) {
if (device.empty() && has_device_cpu) {
return "/device:CPU:0";
} else if (absl::StrContains(device, DEVICE_GPU)) {
for (const auto& device_match :
{std::pair<string, string>("GPU", "CPU:0"),
std::pair<string, string>("/device", "/device:CPU:0")}) {
const string device_host =
strings::StrCat(device.substr(0, device.rfind(device_match.first)),
device_match.second);
if (devices.find(device_host) != devices.end()) {
return device_host;
}
}
}
return "";
}
}
Status PinToHostOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
*optimized_graph = item.graph;
if (IsLegacyTPUBridgeGraphDef(*optimized_graph)) {
return absl::OkStatus();
}
GraphProperties properties(item);
GraphView graph(optimized_graph);
gtl::FlatSet<string> devices;
if (cluster) {
const std::vector<string> device_names = cluster->GetDeviceNames();
devices.insert(device_names.begin(), device_names.end());
} else {
devices = {"/device:CPU:0"};
}
const bool has_device_cpu = devices.find("/device:CPU:0") != devices.end();
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph));
std::vector<std::pair<NodeDef*, string>> const_nodes;
for (auto& node : *optimized_graph->mutable_node()) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
bool is_candidate = false;
TF_RETURN_IF_ERROR(
internal::IsNodeHostCandidate(graph, &properties, node, &is_candidate));
if (!is_candidate) {
continue;
}
string device =
internal::TryFindHostDevice(devices, has_device_cpu, node.device());
if (!device.empty()) {
if (IsConstant(node)) {
const_nodes.emplace_back(&node, node.device());
}
VLOG(2) << "Moving node " << node.name() << " to device " << device;
*node.mutable_device() = std::move(device);
}
}
for (auto& it : const_nodes) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
NodeDef* node = it.first;
const string& device = it.second;
for (const GraphView::InputPort& fanout : graph.GetFanouts(*node, false)) {
if (!internal::IsNodeInputPortHostFriendly(*fanout.node,
fanout.port_id)) {
VLOG(2) << "Swapping node " << node->name() << " back to device "
<< device;
node->set_device(device);
break;
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class PinToHostOptimizerTest : public GrapplerTest {};
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceNoDevices) {
gtl::FlatSet<string> devices = {};
EXPECT_EQ(internal::TryFindHostDevice(devices, false, "ABC"), "");
}
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceCpuXlaGpu) {
gtl::FlatSet<string> devices = {"/device:CPU:0", "/device:XLA_GPU:0"};
EXPECT_EQ(internal::TryFindHostDevice(devices, true, ""), "/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:0"),
"/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:*"),
"/device:CPU:0");
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
int num_int32 = 4;
Output f = ops::Const(s.WithOpName("f"), {"test"});
GrapplerItem item;
item.fetch = {"a", "c", "d", "e", "f"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
if (i < num_int32) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
} else {
test::ExpectTensorEqual<tstring>(tensors[i], tensors_expected[i]);
}
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e" || node.name() == "f") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallFloatOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {1024, 1024});
Output input_min = ops::Const(s.WithOpName("input_min"), 0.0f);
Output input_max = ops::Const(s.WithOpName("input_max"), 6.0f);
Output b =
ops::QuantizeAndDequantizeV2(s.WithOpName("b"), a, input_min, input_max);
GrapplerItem item;
item.fetch = {"b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<float>(tensors[i], tensors_expected[i]);
}
for (const NodeDef& node : output.node()) {
if (node.name() == "input_min" || node.name() == "input_max") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
}
}
}
TEST_F(PinToHostOptimizerTest, TopologicalSort) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
GrapplerItem item;
item.fetch = {"a", "c", "d", "e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
std::reverse(item.graph.mutable_node()->begin(),
item.graph.mutable_node()->end());
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 4);
}
TEST_F(PinToHostOptimizerTest, NoSwap) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 1});
Output b = ops::Const(s.WithOpName("b"), 1, {1, 1024 * 1024});
Output c = ops::MatMul(s.WithOpName("c"), a, b);
GrapplerItem item;
item.fetch = {"a", "b", "c"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_TRUE(node.device().empty());
++found;
}
EXPECT_EQ(found, 3);
}
TEST_F(PinToHostOptimizerTest, Identity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Const(s.WithOpName("a").WithDevice("/device:GPU:0"), 1, {64, 64});
Output b = ops::Const(s.WithOpName("b"), {0, 1}, {2});
Output c =
ops::ReduceProd(s.WithOpName("c").WithDevice("/device:GPU:0"), a, b);
Output d = ops::Identity(s.WithDevice("/device:CPU:0").WithOpName("d"), c);
Output e = ops::Multiply(s.WithOpName("e"), d, d);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_EQ(node.device(), "/device:GPU:0");
} else if (node.name() == "b") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
} else if (node.name() == "d") {
EXPECT_EQ(node.device(), "/device:CPU:0");
} else if (node.name() == "e") {
EXPECT_TRUE(node.device().empty());
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, PortIdToArgId) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 2, 3});
ops::ShapeN b(s.WithOpName("b"), {a, a, a});
GrapplerItem item;
item.fetch = {"a", "b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_EQ(node.device(), "/device:CPU:0");
++found;
}
EXPECT_EQ(found, 2);
}
}
}
} |
237 | #ifndef AROLLA_EXPR_EVAL_SIDE_OUTPUT_H_
#define AROLLA_EXPR_EVAL_SIDE_OUTPUT_H_
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr_node.h"
#include "arolla/io/slot_listener.h"
namespace arolla::expr {
struct ExprWithSideOutputs {
ExprNodePtr expr;
absl::flat_hash_map<std::string, ExprNodePtr> side_outputs;
};
absl::StatusOr<ExprWithSideOutputs> ExtractSideOutputs(ExprNodePtr expr);
absl::StatusOr<absl::flat_hash_map<std::string, ExprNodePtr>>
PrepareSideOutputsForListener(
const absl::flat_hash_map<std::string, ExprNodePtr>& side_outputs,
const SlotListenerBase& slot_listener);
}
#endif
#include "arolla/expr/eval/side_output.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/operators/bootstrap_operators.h"
#include "arolla/io/slot_listener.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
absl::StatusOr<ExprWithSideOutputs> ExtractSideOutputs(ExprNodePtr expr) {
ExprWithSideOutputs result;
ASSIGN_OR_RETURN(
result.expr,
Transform(expr, [&](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (!IsExportAnnotation(node)) {
return node;
}
DCHECK_GE(node->node_deps().size(), 2);
auto unwrapped_node = node->node_deps()[0];
auto tag = ReadExportAnnotationTag(node);
auto value_expr = ReadExportAnnotationValue(node);
DCHECK_NE(unwrapped_node, nullptr);
DCHECK_NE(value_expr, nullptr);
if (auto [it, inserted] = result.side_outputs.emplace(tag, value_expr);
!inserted) {
return absl::FailedPreconditionError(absl::StrCat(
"duplicated export name ", tag, ": ", GetDebugSnippet(value_expr),
" vs ", GetDebugSnippet(it->second)));
}
return unwrapped_node;
}));
return result;
}
absl::StatusOr<absl::flat_hash_map<std::string, ExprNodePtr>>
PrepareSideOutputsForListener(
const absl::flat_hash_map<std::string, ExprNodePtr>& side_outputs,
const SlotListenerBase& slot_listener) {
absl::flat_hash_map<std::string, ExprNodePtr> result;
for (auto [name, expr] : side_outputs) {
if (auto qtype = slot_listener.GetQTypeOf(name); qtype != nullptr) {
ASSIGN_OR_RETURN(expr, expr_operators::CoreCast(expr, Literal(qtype)));
}
result.emplace(name, std::move(expr));
}
return result;
}
} | #include "arolla/expr/eval/side_output.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::WithExportAnnotation;
using ::arolla::testing::WithExportValueAnnotation;
using ::testing::Field;
using ::testing::MatchesRegex;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
class SideOutputTest : public ::testing::Test {
protected:
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(SideOutputTest, ExtractSideOutputs) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportAnnotation(
CallOp("math.add", {WithExportValueAnnotation(
Leaf("x"), "out_z", Leaf("z")),
Leaf("y")}),
"out_xpy"),
Leaf("y")}));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
CallOp("math.add",
{CallOp("math.add", {Leaf("x"), Leaf("y")}), Leaf("y")}));
auto expected_out_z = Leaf("z");
ASSERT_OK_AND_ASSIGN(auto expected_out_xpy,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(ExtractSideOutputs(expr),
IsOkAndHolds(AllOf(
Field(&ExprWithSideOutputs::expr, EqualsExpr(expected_expr)),
Field(&ExprWithSideOutputs::side_outputs,
UnorderedElementsAre(
Pair("out_z", EqualsExpr(expected_out_z)),
Pair("out_xpy", EqualsExpr(expected_out_xpy)))))));
}
TEST_F(SideOutputTest, ExtractSideOutputsExportValueDuplicateNamesError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportValueAnnotation(Leaf("x"), "out_z", Leaf("z")),
WithExportValueAnnotation(Leaf("y"), "out_z", Leaf("x"))}));
EXPECT_THAT(
ExtractSideOutputs(expr),
testing::StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
TEST_F(SideOutputTest, ExtractSideOutputsExportDuplicateNamesError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add", {WithExportAnnotation(Leaf("x"), "out_z"),
WithExportAnnotation(Leaf("y"), "out_z")}));
EXPECT_THAT(
ExtractSideOutputs(expr),
testing::StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
TEST_F(SideOutputTest,
ExtractSideOutputsExportVsExportValueDuplicateNamesError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportValueAnnotation(Leaf("x"), "out_z", Leaf("z")),
WithExportAnnotation(Leaf("y"), "out_z")}));
EXPECT_THAT(
ExtractSideOutputs(expr),
testing::StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
TEST_F(SideOutputTest,
ExtractSideOutputsExportVsExportValueDuplicateNamesSameExprError) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{WithExportValueAnnotation(Leaf("x"), "out_z", Leaf("z")),
WithExportAnnotation(Leaf("z"), "out_z")}));
ASSERT_OK_AND_ASSIGN(auto expected_expr,
CallOp("math.add", {Leaf("x"), Leaf("z")}));
EXPECT_THAT(
ExtractSideOutputs(expr),
testing::StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex("duplicated export name.*out_z.*")));
}
}
} |
238 | #ifndef TENSORSTORE_INTERNAL_OAUTH2_OAUTH_UTILS_H_
#define TENSORSTORE_INTERNAL_OAUTH2_OAUTH_UTILS_H_
#include <stdint.h>
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_oauth2 {
Result<std::string> SignWithRSA256(std::string_view private_key,
std::string_view to_sign);
std::string BuildJWTHeader(std::string_view key_id);
std::string BuildJWTClaimBody(std::string_view client_email,
std::string_view scope,
std::string_view audience, absl::Time now,
int64_t lifetime = 3600);
Result<std::string> BuildSignedJWTRequest(std::string_view private_key,
std::string_view header,
std::string_view body);
struct GoogleServiceAccountCredentials {
std::string private_key_id;
std::string private_key;
std::string token_uri;
std::string client_email;
};
Result<GoogleServiceAccountCredentials>
ParseGoogleServiceAccountCredentialsImpl(const ::nlohmann::json& credentials);
Result<GoogleServiceAccountCredentials> ParseGoogleServiceAccountCredentials(
std::string_view source);
template <typename T>
std::enable_if_t<std::is_same_v<T, ::nlohmann::json>,
Result<GoogleServiceAccountCredentials>>
ParseGoogleServiceAccountCredentials(const T& json) {
return ParseGoogleServiceAccountCredentialsImpl(json);
}
struct RefreshToken {
std::string client_id;
std::string client_secret;
std::string refresh_token;
};
Result<RefreshToken> ParseRefreshTokenImpl(const ::nlohmann::json& credentials);
Result<RefreshToken> ParseRefreshToken(std::string_view source);
template <typename T>
std::enable_if_t<std::is_same_v<T, ::nlohmann::json>, Result<RefreshToken>>
ParseRefreshToken(const T& json) {
return ParseRefreshTokenImpl(json);
}
struct OAuthResponse {
int64_t expires_in;
std::string token_type;
std::string access_token;
};
Result<OAuthResponse> ParseOAuthResponseImpl(
const ::nlohmann::json& credentials);
Result<OAuthResponse> ParseOAuthResponse(std::string_view source);
template <typename T>
std::enable_if_t<std::is_same_v<T, ::nlohmann::json>, Result<OAuthResponse>>
ParseOAuthResponse(const T& json) {
return ParseOAuthResponseImpl(json);
}
struct ErrorResponse {
std::string error;
std::string error_description;
std::string error_uri;
std::string error_subtype;
};
Result<ErrorResponse> ParseErrorResponse(const ::nlohmann::json& error);
}
}
#endif
#include "tensorstore/internal/oauth2/oauth_utils.h"
#include <stddef.h>
#include <memory>
#include <optional>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/time/time.h"
#include <openssl/bio.h>
#include <openssl/evp.h>
#include <openssl/pem.h>
#include <openssl/rsa.h>
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace jb = tensorstore::internal_json_binding;
namespace tensorstore {
namespace {
constexpr char kCryptoAlgorithm[] = "RS256";
constexpr char kJwtType[] = "JWT";
constexpr char kGrantType[] =
"urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer";
}
namespace internal_oauth2 {
Result<std::string> SignWithRSA256(std::string_view private_key,
std::string_view to_sign) {
if (private_key.empty()) {
return absl::InternalError("No private key provided.");
}
const auto md = EVP_sha256();
assert(md != nullptr);
auto md_ctx = std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)>(
EVP_MD_CTX_create(), &EVP_MD_CTX_free);
assert(md_ctx != nullptr);
auto pem_buffer = std::unique_ptr<BIO, decltype(&BIO_free)>(
BIO_new_mem_buf(static_cast<const char*>(private_key.data()),
static_cast<int>(private_key.length())),
&BIO_free);
if (!pem_buffer) {
return absl::InternalError("Could not create the PEM buffer.");
}
auto key = std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)>(
PEM_read_bio_PrivateKey(
static_cast<BIO*>(pem_buffer.get()),
nullptr,
nullptr,
nullptr),
&EVP_PKEY_free);
if (!key) {
return absl::InternalError("Could not load the private key.");
}
if (EVP_DigestSignInit(md_ctx.get(), nullptr, md, nullptr, key.get()) != 1) {
return absl::InternalError("DigestInit failed.");
}
if (EVP_DigestSignUpdate(md_ctx.get(), to_sign.data(), to_sign.size()) != 1) {
return absl::InternalError("DigestUpdate failed.");
}
size_t sig_len = 0;
if (EVP_DigestSignFinal(md_ctx.get(), nullptr, &sig_len) != 1) {
return absl::InternalError("DigestFinal (get signature length) failed.");
}
std::unique_ptr<unsigned char[]> sig(new unsigned char[sig_len]);
if (EVP_DigestSignFinal(md_ctx.get(), sig.get(), &sig_len) != 1) {
return absl::InternalError("DigestFinal (signature compute) failed.");
}
std::string signature;
absl::WebSafeBase64Escape(
std::string_view(reinterpret_cast<char*>(sig.get()), sig_len),
&signature);
return std::move(signature);
}
std::string BuildJWTHeader(std::string_view key_id) {
::nlohmann::json assertion_header = {
{"alg", kCryptoAlgorithm},
{"typ", kJwtType},
{"kid", std::string(key_id)},
};
std::string encoded_header;
absl::WebSafeBase64Escape(assertion_header.dump(), &encoded_header);
return encoded_header;
}
std::string BuildJWTClaimBody(std::string_view client_email,
std::string_view scope,
std::string_view audience, absl::Time now,
std::int64_t lifetime) {
const std::int64_t request_timestamp_sec = absl::ToUnixSeconds(now);
const std::int64_t expiration_timestamp_sec =
request_timestamp_sec + lifetime;
::nlohmann::json assertion_payload = {
{"iss", std::string(client_email)}, {"scope", std::string(scope)},
{"aud", std::string(audience)}, {"iat", request_timestamp_sec},
{"exp", expiration_timestamp_sec},
};
std::string encoded_payload;
absl::WebSafeBase64Escape(assertion_payload.dump(), &encoded_payload);
return encoded_payload;
}
Result<std::string> BuildSignedJWTRequest(std::string_view private_key,
std::string_view header,
std::string_view body) {
auto claim = tensorstore::StrCat(header, ".", body);
auto result = SignWithRSA256(private_key, claim);
if (!result) {
return result.status();
}
return tensorstore::StrCat("grant_type=", kGrantType, "&assertion=", claim,
".", *result);
}
constexpr static auto ErrorResponseBinder = jb::Object(
jb::Member("error",
jb::Projection(&ErrorResponse::error, jb::NonEmptyStringBinder)),
jb::Member("error_description",
jb::Projection(&ErrorResponse::error_description,
jb::NonEmptyStringBinder)),
jb::Member("error_uri", jb::Projection(&ErrorResponse::error_uri,
jb::NonEmptyStringBinder)),
jb::Member("error_subtype", jb::Projection(&ErrorResponse::error_subtype,
jb::NonEmptyStringBinder)),
jb::DiscardExtraMembers);
Result<ErrorResponse> ParseErrorResponse(const ::nlohmann::json& error) {
if (error.is_discarded()) {
return absl::InvalidArgumentError("Invalid ErrorResponse");
}
return jb::FromJson<ErrorResponse>(error, ErrorResponseBinder);
}
constexpr static auto GoogleServiceAccountCredentialsBinder = jb::Object(
jb::Member("private_key",
jb::Projection(&GoogleServiceAccountCredentials::private_key,
jb::NonEmptyStringBinder)),
jb::Member("private_key_id",
jb::Projection(&GoogleServiceAccountCredentials::private_key_id,
jb::NonEmptyStringBinder)),
jb::Member("client_email",
jb::Projection(&GoogleServiceAccountCredentials::client_email,
jb::NonEmptyStringBinder)),
jb::Member("token_uri",
jb::Projection(&GoogleServiceAccountCredentials::token_uri,
jb::DefaultInitializedValue())),
jb::DiscardExtraMembers);
Result<GoogleServiceAccountCredentials>
ParseGoogleServiceAccountCredentialsImpl(const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::InvalidArgumentError(
"Invalid GoogleServiceAccountCredentials token");
}
auto creds_token = jb::FromJson<GoogleServiceAccountCredentials>(
credentials, GoogleServiceAccountCredentialsBinder);
if (!creds_token.ok()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GoogleServiceAccountCredentials: ", creds_token.status()));
}
return creds_token;
}
Result<GoogleServiceAccountCredentials> ParseGoogleServiceAccountCredentials(
std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid GoogleServiceAccountCredentials: ", source));
}
return ParseGoogleServiceAccountCredentialsImpl(credentials);
}
constexpr static auto RefreshTokenBinder = jb::Object(
jb::Member("client_id", jb::Projection(&RefreshToken::client_id,
jb::NonEmptyStringBinder)),
jb::Member("client_secret", jb::Projection(&RefreshToken::client_secret,
jb::NonEmptyStringBinder)),
jb::Member("refresh_token", jb::Projection(&RefreshToken::refresh_token,
jb::NonEmptyStringBinder)),
jb::DiscardExtraMembers);
Result<RefreshToken> ParseRefreshTokenImpl(
const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::UnauthenticatedError("Invalid RefreshToken token");
}
auto refresh_token =
jb::FromJson<RefreshToken>(credentials, RefreshTokenBinder);
if (!refresh_token.ok()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid RefreshToken: ", credentials.dump()));
}
return refresh_token;
}
Result<RefreshToken> ParseRefreshToken(std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid RefreshToken: ", source));
}
return ParseRefreshTokenImpl(credentials);
}
constexpr static auto OAuthResponseBinder = jb::Object(
jb::Member("token_type", jb::Projection(&OAuthResponse::token_type,
jb::NonEmptyStringBinder)),
jb::Member("access_token", jb::Projection(&OAuthResponse::access_token,
jb::NonEmptyStringBinder)),
jb::Member("expires_in", jb::Projection(&OAuthResponse::expires_in,
jb::LooseInteger<int64_t>(1))),
jb::DiscardExtraMembers);
Result<OAuthResponse> ParseOAuthResponseImpl(
const ::nlohmann::json& credentials) {
if (credentials.is_discarded()) {
return absl::UnauthenticatedError("Invalid OAuthResponse token");
}
auto response_token =
jb::FromJson<OAuthResponse>(credentials, OAuthResponseBinder);
if (!response_token.ok()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid OAuthResponse: ", credentials.dump()));
}
return response_token;
}
Result<OAuthResponse> ParseOAuthResponse(std::string_view source) {
auto credentials = internal::ParseJson(source);
if (credentials.is_discarded()) {
return absl::UnauthenticatedError(
tensorstore::StrCat("Invalid OAuthResponse: ", source));
}
return ParseOAuthResponseImpl(credentials);
}
}
} | #include "tensorstore/internal/oauth2/oauth_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/oauth2/fake_private_key.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::internal_oauth2::GetFakePrivateKey;
using ::tensorstore::internal_oauth2::ParseGoogleServiceAccountCredentials;
using ::tensorstore::internal_oauth2::ParseOAuthResponse;
using ::tensorstore::internal_oauth2::ParseRefreshToken;
std::string GetJsonKeyFileContents() {
constexpr char kJsonKeyfilePrefix[] = R"""({
"type": "service_account",
"project_id": "foo-project",
"private_key_id": "a1a111aa1111a11a11a11aa111a111a1a1111111",
"client_email": "[email protected]",
"client_id": "100000000000000000001",
"auth_uri": "https:
"token_uri": "https:
"auth_provider_x509_cert_url": "https:
"client_x509_cert_url": "https:
)""";
return tensorstore::StrCat(kJsonKeyfilePrefix, " \"private_key\": \"",
absl::CEscape(GetFakePrivateKey()), "\" }");
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentials_Invalid) {
EXPECT_FALSE(ParseGoogleServiceAccountCredentials("{ }").ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "",
"private_key_id": "",
"client_email": "",
"token_uri": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "",
"private_key_id": "abc",
"client_email": "456"
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "",
"client_email": "456"
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456"
"token_uri": ""
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key_id": "abc",
"client_email": "456",
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"client_email": "456",
})")
.ok());
EXPECT_FALSE(ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
})")
.ok());
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentials) {
auto result = ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456",
"token_uri": "wxy"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().private_key);
EXPECT_EQ("abc", result.value().private_key_id);
EXPECT_EQ("456", result.value().client_email);
EXPECT_EQ("wxy", result.value().token_uri);
result = ParseGoogleServiceAccountCredentials(R"({
"private_key" : "123",
"private_key_id": "abc",
"client_email": "456"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().private_key);
EXPECT_EQ("abc", result.value().private_key_id);
EXPECT_EQ("456", result.value().client_email);
EXPECT_EQ("", result.value().token_uri);
}
TEST(OAuthUtilTest, GoogleServiceAccountCredentialsFile) {
auto result = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("[email protected]",
result->client_email);
}
TEST(OAuthUtilTest, ParseRefreshToken_Invalid) {
EXPECT_FALSE(ParseRefreshToken("{ }").ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "",
"client_secret": "",
"refresh_token": ""
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "",
"client_secret": "abc",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": ""
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": 456
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_secret": "abc",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"refresh_token": "456"
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
})")
.ok());
EXPECT_FALSE(ParseRefreshToken(R"json({
"error": "invalid_grant",
"error_description": "reauth related error (invalid_rapt)",
"error_uri": "https:
"error_subtype": "invalid_rapt"
})json")
.ok());
}
TEST(OAuthUtilTest, ParseRefreshToken) {
auto result = ParseRefreshToken(R"({
"client_id" : "123",
"client_secret": "abc",
"refresh_token": "456"
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().client_id);
EXPECT_EQ("abc", result.value().client_secret);
EXPECT_EQ("456", result.value().refresh_token);
}
TEST(OAuthUtilTest, ParseOAuthResponse_Invalid) {
EXPECT_FALSE(ParseOAuthResponse("{ }").ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "",
"access_token": "abc",
"expires_in": 456
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "123",
"access_token": "",
"expires_in": 456
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"token_type" : "123",
"access_token": "abc",
})json")
.ok());
EXPECT_FALSE(ParseOAuthResponse(R"json({
"error": "invalid_grant",
"error_description": "reauth related error (invalid_rapt)",
"error_uri": "https:
"error_subtype": "invalid_rapt"
})json")
.ok());
}
TEST(OAuthUtilTest, ParseOAuthResponse) {
EXPECT_TRUE(ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": "456"
})")
.ok());
auto result = ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456
})");
ASSERT_TRUE(result.ok()) << result.status();
EXPECT_EQ("123", result.value().token_type);
EXPECT_EQ("abc", result.value().access_token);
EXPECT_EQ(456, result.value().expires_in);
result = ParseOAuthResponse(R"({
"token_type" : "123",
"access_token": "abc",
"expires_in": 456,
"extra_fields": "are ignored"
})");
ASSERT_TRUE(result.ok()) << result.status();
}
TEST(OAuthUtilTest, BuildJWTClaimTest) {
using ::tensorstore::internal_oauth2::BuildJWTClaimBody;
using ::tensorstore::internal_oauth2::BuildJWTHeader;
EXPECT_EQ("eyJhbGciOiJSUzI1NiIsImtpZCI6ImEiLCJ0eXAiOiJKV1QifQ",
BuildJWTHeader("a"));
EXPECT_EQ(
"eyJhdWQiOiI0IiwiZXhwIjoxNTQ3NjY5NzAzLCJpYXQiOjE1NDc2NjYxMDMsImlzcyI6ImIi"
"LCJzY29wZSI6ImMifQ",
BuildJWTClaimBody("b", "c", "4", absl::FromUnixSeconds(1547666103),
3600));
}
TEST(OAuthUtilTest, Sign) {
using ::tensorstore::internal_oauth2::SignWithRSA256;
{
auto result = SignWithRSA256("", "something");
EXPECT_FALSE(result.ok());
}
{
constexpr char kBadKey[] =
"-----BEGIN PRIVATE KEY-----\n"
"Z23x2ZUyar6i0BQ8eJFAEN+IiUapEeCVazuxJSt4RjYfwSa/"
"p117jdZGEWD0GxMC\nlUtj+/nH3HDQjM4ltYfTPUg=\n"
"-----END PRIVATE KEY-----\n";
auto result = SignWithRSA256(kBadKey, "something");
EXPECT_FALSE(result.ok());
}
auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(creds.ok());
{
auto result = SignWithRSA256(creds->private_key, "something");
ASSERT_TRUE(result.ok());
EXPECT_EQ(
"A-sH4BVqtxu-6LECWJCb0VKGDj46pnpBpZB1KViuhG2CwugRVR6V3-"
"w8eBvAUbIRewSnXp_lWkxdy_rZBMau9VuILnLOC0t692-"
"L8WEqHsoFYBWvTZGCT5XkslVXhxt4d8jgM6U_8If4Cf3fGA4XAxpP-pyrbPGz-"
"VXn6R7jcLGOLsFtcuAXpJ9zkwYE72pGUtI_hiU-"
"tquIEayOQW9frXJlxt2oR4ld1l3p0FWibkNY8OfYPdTlRS0WcsgpWngTamHEBplJ5xNLD5"
"Ye5bG1DFqBJn0evxW0btbcfKCYuyirvgvHPsTt-"
"YMcPGo1xtlhT5c4ycEHOObFUGDpKPjljw",
*result);
}
}
TEST(OAuthUtilTest, BuildJWTRequestBody) {
using ::tensorstore::internal_oauth2::BuildSignedJWTRequest;
auto creds = ParseGoogleServiceAccountCredentials(GetJsonKeyFileContents());
ASSERT_TRUE(creds.ok());
auto result =
BuildSignedJWTRequest(creds->private_key, "header", "something");
ASSERT_TRUE(result.ok());
EXPECT_EQ(
"grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&"
"assertion=header.something.LyvY9ZVG6tL34g5Wji--3G5JGQP-"
"fza47yBQIrRHJqecVUTVGuEXti_deBjSbB36gvpBOE67-U9h1wgD2VR_"
"MDx8JaQHGct04gVZdKC7m4uqu5lI8u0jqXGG4UbRwfUMZ0UCjxJfyUbg6KUR7iyiqoH5szZv"
"31rJISnM4RQvH-lQFrE6BuXpvB09Hve4T3q5mtq7E9pd5rXz_"
"vlqL5ib5tkdBEg2cbydDZHeCx5uA9qcg3hGidrU1fLgreFKu3dSvzu4qFZL3-"
"0Pnt4XMqwslx2vBbFQB7_K8Dnz10F1TA5njOvwFRWNjKM1I0cRZ5N3O1CnGv1wyAz-"
"FIcKdk5_7Q",
*result);
}
} |
239 | #ifndef XLA_SERVICE_REDUCE_SCATTER_DECOMPOSER_H_
#define XLA_SERVICE_REDUCE_SCATTER_DECOMPOSER_H_
#include <functional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class ReduceScatterDecomposer : public HloModulePass {
public:
explicit ReduceScatterDecomposer(
std::function<void(Shape&)> update_layout = nullptr,
std::function<bool(const HloInstruction*)> should_decompose = nullptr)
: update_layout_(update_layout), should_decompose_(should_decompose) {}
absl::string_view name() const override {
return "reduce-scatter-decomposer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
std::function<void(Shape&)> update_layout_;
std::function<bool(const HloInstruction*)> should_decompose_;
};
}
#endif
#include "xla/service/reduce_scatter_decomposer.h"
#include <sys/types.h>
#include <limits>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape_util.h"
namespace xla {
absl::StatusOr<bool> ReduceScatterDecomposer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
bool changed = false;
int64_t next_channel_id = hlo_query::NextChannelId(*module);
for (HloComputation *computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction *instruction :
computation->MakeInstructionPostOrder()) {
auto *rs = DynCast<HloReduceScatterInstruction>(instruction);
if (!rs || !rs->shape().IsArray()) {
continue;
}
std::optional<int64_t> channel_id;
if (rs->channel_id()) {
channel_id = next_channel_id++;
}
if (should_decompose_ && !should_decompose_(rs)) {
continue;
}
VLOG(2) << "Decompose: " << rs->ToString();
HloComputation *apply_clone = module->AddComputationAndUnifyNamesAndIds(
rs->to_apply()->Clone(), false);
HloInstruction *ar =
computation->AddInstruction(HloInstruction::CreateAllReduce(
rs->operand(0)->shape(), rs->operands(), apply_clone,
rs->device_list(), rs->constrain_layout(), channel_id,
rs->use_global_device_ids()));
apply_clone->SetCollectiveCallInstruction(ar);
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(rs->channel_id().has_value(),
rs->use_global_device_ids()));
TF_ASSIGN_OR_RETURN(
std::vector<HloInstruction *> start_indices,
CreateStartIndicesForCollectiveDecomposition(
group_mode, rs->replica_groups(), rs->shape(),
rs->scatter_dimension(), computation, update_layout_));
HloInstruction *ds =
computation->AddInstruction(HloInstruction::CreateDynamicSlice(
rs->shape(), ar, start_indices, rs->shape().dimensions()));
TF_RETURN_IF_ERROR(rs->ReplaceAllUsesWith(ds));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(rs));
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_scatter_decomposer.h"
#include <utility>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ReduceScatterDecomposerTest : public HloTestBase {
public:
enum class PassAction {
kNoChange,
kTrivialGroups,
kTableLookup,
};
void RunPass(
absl::string_view hlo_module, PassAction action,
CollectiveOpGroupMode mode = CollectiveOpGroupMode::kCrossReplica,
int64_t shard_size = 0, int64_t shard_dimension = 0,
int64_t replica_count = 2,
std::function<bool(const HloInstruction *)> should_decompose =
[](const HloInstruction *) { return true; }) {
const int64_t partition_count = 2;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(hlo_module, replica_count,
partition_count));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
ReduceScatterDecomposer(nullptr,
should_decompose)
.Run(module.get()));
if (action == PassAction::kNoChange) {
ASSERT_FALSE(changed);
return;
}
ASSERT_TRUE(changed);
Literal multiplier = LiteralUtil::CreateR0<uint32_t>(shard_size);
::testing::Matcher<const ::xla::HloInstruction *> id_matcher = [&]() {
switch (mode) {
case CollectiveOpGroupMode::kCrossPartition:
return op::PartitionId();
case CollectiveOpGroupMode::kCrossReplica:
return op::ReplicaId();
case CollectiveOpGroupMode::kCrossReplicaAndPartition:
return op::ReplicaId();
case CollectiveOpGroupMode::kFlattenedID: {
return op::Add(
op::Multiply(op::ReplicaId(),
op::Constant(LiteralUtil::CreateR0<uint32_t>(
partition_count))),
op::PartitionId());
}
}
}();
auto root = module->entry_computation()->root_instruction();
const Shape &shape = root->shape();
::testing::Matcher<const ::xla::HloInstruction *> slice_index = id_matcher;
if (action == PassAction::kTableLookup) {
slice_index = op::Reshape(op::DynamicSlice(op::Constant(), id_matcher));
}
if (mode == CollectiveOpGroupMode::kCrossReplicaAndPartition) {
slice_index = op::Add(
op::Multiply(
slice_index,
op::Constant(LiteralUtil::CreateR0<uint32_t>(partition_count))),
op::PartitionId());
}
auto zero_matcher = op::Constant(LiteralUtil::Zero(U32));
std::vector<::testing::Matcher<const ::xla::HloInstruction *>> ds_operands(
shape.rank() + 1, zero_matcher);
ds_operands[0] = op::AllReduce(op::Parameter(0));
ds_operands[shard_dimension + 1] =
op::Multiply(slice_index, op::Constant(std::move(multiplier)));
EXPECT_THAT(root, op::DynamicSlice(ds_operands));
}
};
TEST_F(ReduceScatterDecomposerTest, TrivialReplicaID) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{0,1}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupReplicaId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[8] parameter(0)
ROOT rs = f32[4] reduce-scatter(p0), replica_groups={{1, 0}}, dimensions={0}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kCrossReplica,
4);
}
TEST_F(ReduceScatterDecomposerTest, TrivialCrossReplicaAndPartition) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{0, 1}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossReplicaAndPartition,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest,
TrivialCrossReplicaAndPartition_SingleReplica) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0}}, channel_id=1, dimensions={1}, to_apply=sum
}
)";
RunPass(hlo_string, PassAction::kTrivialGroups,
CollectiveOpGroupMode::kCrossPartition,
4, 1, 1);
}
TEST_F(ReduceScatterDecomposerTest, TableLookupFlattenedId) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 2] reduce-scatter(p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kTableLookup,
CollectiveOpGroupMode::kFlattenedID,
2, 1);
}
TEST_F(ReduceScatterDecomposerTest, NoChange) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = (f32[4, 2], f32[4,2]) reduce-scatter(p0, p0), replica_groups={{1,0, 3, 2}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange);
}
TEST_F(ReduceScatterDecomposerTest, NoChangeWithShouldDecompose) {
absl::string_view hlo_string = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
p0 = f32[4, 8] parameter(0)
ROOT rs = f32[4, 4] reduce-scatter(p0), replica_groups={{0,1}, {2,3}}, channel_id=1, dimensions={1}, to_apply=sum, use_global_device_ids=true
}
)";
RunPass(hlo_string, PassAction::kNoChange,
CollectiveOpGroupMode::kCrossReplica,
0, 0,
2, [](const HloInstruction *) { return false; });
}
}
} |
240 | #ifndef TENSORFLOW_TSL_LIB_CORE_BITMAP_H_
#define TENSORFLOW_TSL_LIB_CORE_BITMAP_H_
#include <string>
#include "tsl/platform/logging.h"
namespace tsl {
namespace core {
class Bitmap {
public:
Bitmap();
explicit Bitmap(size_t n);
~Bitmap();
Bitmap(const Bitmap&) = delete;
Bitmap& operator=(const Bitmap&) = delete;
size_t bits() const;
void Reset(size_t n);
bool get(size_t i) const;
void set(size_t i);
void clear(size_t i);
size_t FirstUnset(size_t start) const;
std::string ToString() const;
private:
typedef uint32_t Word;
static constexpr size_t kBits = 32;
static size_t NumWords(size_t n) { return (n + kBits - 1) / kBits; }
static Word Mask(size_t i) { return 1ull << i; }
size_t nbits_;
Word* word_;
};
inline Bitmap::Bitmap() : nbits_(0), word_(nullptr) {}
inline Bitmap::Bitmap(size_t n) : Bitmap() { Reset(n); }
inline Bitmap::~Bitmap() { delete[] word_; }
inline size_t Bitmap::bits() const { return nbits_; }
inline bool Bitmap::get(size_t i) const {
DCHECK_LT(i, nbits_);
return word_[i / kBits] & Mask(i % kBits);
}
inline void Bitmap::set(size_t i) {
DCHECK_LT(i, nbits_);
word_[i / kBits] |= Mask(i % kBits);
}
inline void Bitmap::clear(size_t i) {
DCHECK_LT(i, nbits_);
word_[i / kBits] &= ~Mask(i % kBits);
}
}
}
#endif
#include "tsl/lib/core/bitmap.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <string>
#include "absl/numeric/bits.h"
namespace tsl {
namespace core {
void Bitmap::Reset(size_t n) {
const size_t num_words = NumWords(n);
if (num_words != NumWords(nbits_)) {
Word* w = new Word[num_words];
delete[] word_;
word_ = w;
}
memset(word_, 0, sizeof(word_[0]) * num_words);
nbits_ = n;
}
static size_t FindFirstSet(uint32_t w) {
return w == 0 ? 0 : absl::countr_zero(w) + 1;
}
size_t Bitmap::FirstUnset(size_t start) const {
if (start >= nbits_) {
return nbits_;
}
size_t mask = (1ull << (start % kBits)) - 1;
const size_t nwords = NumWords(nbits_);
for (size_t i = start / kBits; i < nwords; i++) {
Word word = word_[i] | mask;
mask = 0;
size_t r = FindFirstSet(~word);
if (r) {
size_t result = i * kBits + (r - 1);
if (result > nbits_) result = nbits_;
return result;
}
}
return nbits_;
}
std::string Bitmap::ToString() const {
std::string result;
result.resize(bits());
for (size_t i = 0; i < nbits_; i++) {
result[i] = get(i) ? '1' : '0';
}
return result;
}
}
} | #include "tsl/lib/core/bitmap.h"
#include "tsl/lib/random/simple_philox.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace core {
namespace {
size_t NextSize(size_t n) { return n + ((n < 75) ? 1 : 25); }
static void MakeRandomBitmap(random::SimplePhilox* rnd, Bitmap* bitmap) {
size_t n = rnd->Uniform(200);
bitmap->Reset(n);
for (size_t i = 0; i < n; i++) {
if (rnd->OneIn(2)) bitmap->set(i);
}
}
TEST(BitmapTest, Basic) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
Bitmap bits(n);
for (size_t i = 0; i < n; i++) {
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.set(i);
EXPECT_TRUE(bits.get(i)) << n << " " << i << " " << bits.ToString();
bits.clear(i);
EXPECT_FALSE(bits.get(i)) << n << " " << i << " " << bits.ToString();
}
}
}
TEST(BitmapTest, ToString) {
Bitmap bits(10);
bits.set(1);
bits.set(3);
EXPECT_EQ(bits.ToString(), "0101000000");
}
TEST(BitmapTest, FirstUnset) {
for (size_t n = 0; n < 200; n = NextSize(n)) {
for (size_t p = 0; p <= 100; p++) {
for (size_t q = 0; q <= 100; q++) {
Bitmap bitmap(n);
int one_count = 0;
size_t i = 0;
while (i < p && i < n) {
one_count++;
bitmap.set(i);
i++;
}
while (i < n) {
i++;
for (size_t j = 0; j < q && i < n; j++, i++) {
one_count++;
bitmap.set(i);
}
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == n) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, n - one_count) << " " << bitmap.ToString();
}
}
}
}
TEST(BitmapTest, FirstUnsetRandom) {
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
for (int iter = 0; iter < 10000; iter++) {
Bitmap bitmap;
MakeRandomBitmap(&rnd, &bitmap);
size_t zero_bits = 0;
for (size_t i = 0; i < bitmap.bits(); i++) {
if (!bitmap.get(i)) zero_bits++;
}
int seen = 0;
size_t pos = 0;
while (true) {
pos = bitmap.FirstUnset(pos);
if (pos == bitmap.bits()) break;
ASSERT_FALSE(bitmap.get(pos)) << pos << " " << bitmap.ToString();
seen++;
pos++;
}
EXPECT_EQ(seen, zero_bits) << " " << bitmap.ToString();
}
}
}
}
} |
241 | #ifndef XLA_SERVICE_ALL_GATHER_DECOMPOSER_H_
#define XLA_SERVICE_ALL_GATHER_DECOMPOSER_H_
#include <cstdint>
#include <functional>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
namespace xla {
class AllGatherDecomposer : public HloModulePass {
public:
explicit AllGatherDecomposer(
std::function<bool(const HloAllGatherInstruction&)> should_decompose)
: should_decompose_(std::move(should_decompose)) {}
AllGatherDecomposer()
: should_decompose_(
[](const HloAllGatherInstruction& ag) { return true; }) {}
absl::string_view name() const override { return "all_gather_decomposer"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
protected:
virtual HloInstruction* TranslateAllGatherToAllReducePerOperand(
CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag,
const Shape& output_shape, HloInstruction* operand, HloComputation* comp,
int64_t ag_dim);
virtual bool ShouldDecompose(const HloAllGatherInstruction& ag) const {
return should_decompose_(ag);
}
absl::Status DecomposeAllGather(HloAllGatherInstruction* ag,
HloComputation* comp);
private:
std::function<bool(const HloAllGatherInstruction&)> should_decompose_;
};
}
#endif
#include "xla/service/all_gather_decomposer.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) {
HloComputation::Builder sum_b("add");
auto x = sum_b.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = sum_b.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
if (type == PRED) {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y));
} else {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y));
}
HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build());
return reduction;
}
}
HloInstruction* AllGatherDecomposer::TranslateAllGatherToAllReducePerOperand(
CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag,
const Shape& output_shape, HloInstruction* operand, HloComputation* comp,
int64_t ag_dim) {
std::vector<HloInstruction*> start_indices =
CreateStartIndicesForCollectiveDecomposition(
group_mode, ag.replica_groups(), operand->shape(), ag_dim, comp)
.value();
auto zero = comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(output_shape.element_type())));
zero = comp->AddInstruction(
HloInstruction::CreateBroadcast(output_shape, zero, {}));
auto dus = comp->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
zero->shape(), zero, operand, start_indices));
auto ar = comp->AddInstruction(HloInstruction::CreateAllReduce(
dus->shape(), {dus},
MakeBinaryAdd(dus->shape().element_type(), comp->parent()),
ag.device_list(),
ag.constrain_layout(), ag.channel_id(),
ag.use_global_device_ids()));
return ar;
}
absl::Status AllGatherDecomposer::DecomposeAllGather(
HloAllGatherInstruction* ag, HloComputation* comp) {
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ag->channel_id().has_value(),
ag->use_global_device_ids()));
if (ag->operand_count() > 1) {
std::vector<HloInstruction*> tuple_inputs;
for (int i = 0; i < ag->operand_count(); ++i) {
auto* input_operand = ag->mutable_operand(i);
const auto& output_shape = ag->shape().tuple_shapes(i);
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, output_shape, input_operand, comp,
ag->all_gather_dimension());
tuple_inputs.push_back(ar);
}
auto tup = comp->AddInstruction(HloInstruction::CreateTuple(tuple_inputs));
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(tup));
} else {
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, ag->shape(), ag->mutable_operand(0), comp,
ag->all_gather_dimension());
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(ar));
}
TF_RETURN_IF_ERROR(comp->RemoveInstructionAndUnusedOperands(ag));
return absl::OkStatus();
}
absl::StatusOr<bool> AllGatherDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kAllGather) {
continue;
}
auto ag = Cast<HloAllGatherInstruction>(hlo);
if (ShouldDecompose(*ag)) {
TF_RETURN_IF_ERROR(DecomposeAllGather(ag, comp));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/all_gather_decomposer.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllGatherDecomposerTest = HloTestBase;
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAndPartitionAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0}}, channel_id=1,
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::PartitionId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTrivialGroup) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0,1,2,3}},
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroups) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto id =
AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), op::ReplicaId())));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroupsGlobalIds) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}, channel_id=1,
use_global_device_ids=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto global_id =
op::Add(op::Multiply(op::ReplicaId(), op::Constant()), op::PartitionId());
auto id = AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), global_id)));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTuple) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
param1 = f32[10,16] parameter(1)
ROOT ag = (f32[10,80], f32[10,64]) all-gather(param0, param1),
replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(1), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant())))));
}
}
} |
242 | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TF_OP_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TEST_OP_TMPL_TF_OP_H_
#include "tensorflow/lite/kernels/shim/test_op/tmpl_op.h"
#include "tensorflow/lite/kernels/shim/tf_op_shim.h"
namespace tflite {
namespace shim {
template <typename AType, typename BType>
class TmplOpKernel : public TfOpKernel<TmplOp, AType, BType> {
public:
using TfOpKernel<TmplOp, AType, BType>::TfOpKernel;
};
}
}
#endif
#include "tensorflow/lite/kernels/shim/test_op/tmpl_tf_op.h"
#include <cstdint>
#include "tensorflow/core/framework/types.h"
namespace tflite {
namespace shim {
using TmplOpKernelInstance = TmplOpKernel<float, int32_t>;
REGISTER_TF_OP_SHIM(TmplOpKernelInstance);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<float>("AType")
.TypeConstraint<int32_t>("BType"),
TmplOpKernel<float, int32_t>);
REGISTER_KERNEL_BUILDER(Name(TmplOpKernelInstance::OpName())
.Device(::tensorflow::DEVICE_CPU)
.TypeConstraint<int32_t>("AType")
.TypeConstraint<int64_t>("BType"),
TmplOpKernel<int32_t, int64_t>);
}
} | #include <cstdint>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
namespace tflite {
namespace shim {
namespace {
using ::tensorflow::DT_FLOAT;
using ::tensorflow::DT_INT32;
using ::tensorflow::DT_INT64;
using ::tensorflow::FakeInput;
using ::tensorflow::NodeDefBuilder;
using ::tensorflow::TensorShape;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectTensorEqual;
class TmplOpTfTest : public ::tensorflow::OpsTestBase {};
TEST_F(TmplOpTfTest, float_int32) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_FLOAT)
.Attr("BType", DT_INT32)
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {10.5});
AddInputFromArray<int32_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0),
AsTensor<float>({30.5}, {}));
}
TEST_F(TmplOpTfTest, int32_int64) {
TF_ASSERT_OK(NodeDefBuilder("tmpl_op", "TemplatizedOperation")
.Attr("AType", DT_INT32)
.Attr("BType", DT_INT64)
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT64))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<int32_t>(TensorShape({}), {10});
AddInputFromArray<int64_t>(TensorShape({}), {20});
TF_ASSERT_OK(RunOpKernel());
ExpectTensorEqual<float>(*GetOutput(0), AsTensor<float>({30}, {}));
}
}
}
} |
243 | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_TOPOLOGY_VIEW_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_TOPOLOGY_VIEW_H_
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_view.h"
namespace tensorflow {
namespace grappler {
class GraphTopologyView {
public:
GraphTopologyView() = default;
explicit GraphTopologyView(bool skip_invalid_edges)
: skip_invalid_edges_(skip_invalid_edges) {}
Status InitializeFromGraph(const GraphDef& graph,
absl::Span<const GraphView::Edge> ephemeral_edges,
bool ignore_control_edges);
Status InitializeFromGraph(const GraphDef& graph,
absl::Span<const GraphView::Edge> ephemeral_edges);
Status InitializeFromGraph(const GraphDef& graph, bool ignore_control_edges);
Status InitializeFromGraph(const GraphDef& graph);
bool is_initialized() const { return graph_ != nullptr; }
int num_nodes() const { return num_nodes_; }
const GraphDef* graph() const { return graph_; }
bool HasNode(absl::string_view node_name) const;
const NodeDef* GetNode(absl::string_view node_name) const;
const NodeDef* GetNode(int node_idx) const;
const absl::optional<int> GetNodeIndex(absl::string_view node_name) const;
const absl::optional<int> GetNodeIndex(const NodeDef& node) const;
const absl::InlinedVector<int, 4>& GetFanin(int node_idx) const;
const absl::InlinedVector<int, 2>& GetFanout(int node_idx) const;
private:
bool skip_invalid_edges_ = false;
const GraphDef* graph_ = nullptr;
int num_nodes_ = 0;
std::vector<absl::string_view> index_to_node_name_;
absl::flat_hash_map<absl::string_view, int> node_name_to_index_;
std::vector<absl::InlinedVector<int, 4>> fanins_;
std::vector<absl::InlinedVector<int, 2>> fanouts_;
absl::InlinedVector<int, 4> empty_fanin_;
absl::InlinedVector<int, 2> empty_fanout_;
};
}
}
#endif
#include "tensorflow/core/grappler/graph_topology_view.h"
#include <algorithm>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
template <typename T>
inline void SortAndRemoveDuplicates(T* v) {
std::sort(v->begin(), v->end());
v->erase(std::unique(v->begin(), v->end()), v->end());
}
}
Status GraphTopologyView::InitializeFromGraph(
const GraphDef& graph,
const absl::Span<const GraphView::Edge> ephemeral_edges,
bool ignore_control_edges) {
if (graph_ != nullptr) {
return errors::InvalidArgument("GraphTopologyView is already initialized.");
}
graph_ = &graph;
num_nodes_ = graph.node_size();
index_to_node_name_.resize(num_nodes_);
node_name_to_index_.rehash(num_nodes_);
fanins_.resize(num_nodes_);
fanouts_.resize(num_nodes_);
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
node_name_to_index_.emplace(node.name(), node_idx);
index_to_node_name_.emplace_back(node.name());
}
for (const GraphView::Edge& edge : ephemeral_edges) {
const auto src = node_name_to_index_.find(edge.src.node->name());
const bool valid_src = src != node_name_to_index_.end();
if (!valid_src) {
const string error_message =
absl::StrCat("Non-existent src node: ", edge.src.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
const auto dst = node_name_to_index_.find(edge.dst.node->name());
const bool valid_dst = dst != node_name_to_index_.end();
if (!valid_dst) {
const string error_message =
absl::StrCat("Non-existent dst node: ", edge.dst.node->name());
if (skip_invalid_edges_) {
VLOG(0) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_dst && valid_src) {
const int src_idx = src->second;
const int dst_idx = dst->second;
if (ignore_control_edges && (src_idx < 0 || dst_idx < 0)) {
continue;
}
fanins_[dst_idx].push_back(src_idx);
fanouts_[src_idx].push_back(dst_idx);
}
}
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
const NodeDef& node = graph.node(node_idx);
fanins_[node_idx].reserve(node.input_size());
for (const string& input : node.input()) {
TensorId tensor = ParseTensorName(input);
if (ignore_control_edges && IsTensorIdControl(tensor)) {
continue;
}
const auto it = node_name_to_index_.find(tensor.node());
const bool valid_input = it != node_name_to_index_.end();
if (!valid_input) {
const string error_message = absl::StrCat("Non-existent input ", input,
" in node ", node.name());
if (skip_invalid_edges_) {
VLOG(3) << "Skip error: " << error_message;
} else {
return errors::InvalidArgument(error_message);
}
}
if (valid_input) {
const int input_idx = it->second;
fanins_[node_idx].push_back(input_idx);
fanouts_[input_idx].push_back(node_idx);
}
}
SortAndRemoveDuplicates(&fanins_[node_idx]);
}
for (int node_idx = 0; node_idx < num_nodes_; ++node_idx) {
SortAndRemoveDuplicates(&fanouts_[node_idx]);
}
return absl::OkStatus();
}
Status GraphTopologyView::InitializeFromGraph(
const GraphDef& graph,
const absl::Span<const GraphView::Edge> ephemeral_edges) {
return InitializeFromGraph(graph, ephemeral_edges,
false);
}
Status GraphTopologyView::InitializeFromGraph(const GraphDef& graph,
bool ignore_control_edges) {
return InitializeFromGraph(graph, absl::Span<GraphView::Edge>(),
ignore_control_edges);
}
Status GraphTopologyView::InitializeFromGraph(const GraphDef& graph) {
return InitializeFromGraph(graph, absl::Span<GraphView::Edge>(),
false);
}
bool GraphTopologyView::HasNode(const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
return it != node_name_to_index_.end();
}
const NodeDef* GraphTopologyView::GetNode(
const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
return it == node_name_to_index_.end() ? nullptr : &graph_->node(it->second);
}
const NodeDef* GraphTopologyView::GetNode(int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
DCHECK(node_idx >= 0 && node_idx < num_nodes_) << "node_idx is out of range";
return &graph_->node(node_idx);
}
const absl::optional<int> GraphTopologyView::GetNodeIndex(
const absl::string_view node_name) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const auto it = node_name_to_index_.find(node_name);
DCHECK(it != node_name_to_index_.end()) << "Node doesn't exist in a graph";
return it == node_name_to_index_.end() ? absl::nullopt
: absl::make_optional(it->second);
}
const absl::optional<int> GraphTopologyView::GetNodeIndex(
const NodeDef& node) const {
return GetNodeIndex(node.name());
}
const absl::InlinedVector<int, 4>& GraphTopologyView::GetFanin(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanins_[node_idx] : empty_fanin_;
}
const absl::InlinedVector<int, 2>& GraphTopologyView::GetFanout(
int node_idx) const {
DCHECK(is_initialized()) << "GraphTopologyView is not initialized";
const bool is_valid_node_idx = node_idx >= 0 && node_idx < num_nodes_;
DCHECK(is_valid_node_idx) << "node_idx is out of range";
return is_valid_node_idx ? fanouts_[node_idx] : empty_fanout_;
}
}
} | #include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class GraphTopologyViewTest : public ::testing::Test {
protected:
using NodeConfig = std::pair<string, std::vector<string>>;
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
const auto& node_name = node.first;
const auto& node_inputs = node.second;
NodeDef node_def;
node_def.set_name(node_name);
for (const string& input : node_inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(GraphTopologyViewTest, SimpleGraph) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b"}},
{"d", {"a", "c"}},
});
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
const NodeDef* a_by_name = graph_view.GetNode("a");
const NodeDef* a_by_idx = graph_view.GetNode(0);
ASSERT_TRUE(a_by_name);
ASSERT_TRUE(a_by_idx);
EXPECT_EQ(a_by_name, a_by_idx);
const NodeDef* b_by_name = graph_view.GetNode("b");
const NodeDef* b_by_idx = graph_view.GetNode(1);
ASSERT_TRUE(b_by_name);
ASSERT_TRUE(b_by_idx);
EXPECT_EQ(b_by_name, b_by_idx);
const absl::optional<int> b_idx = graph_view.GetNodeIndex(*b_by_name);
ASSERT_TRUE(b_idx.has_value());
EXPECT_EQ(b_idx.value(), 1);
const absl::optional<int> c_idx = graph_view.GetNodeIndex("c");
ASSERT_TRUE(c_idx.has_value());
EXPECT_EQ(c_idx.value(), 2);
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(0), Fanin());
EXPECT_EQ(graph_view.GetFanin(1), Fanin());
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(0), Fanout({2, 3}));
EXPECT_EQ(graph_view.GetFanout(1), Fanout({2}));
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout());
}
TEST_F(GraphTopologyViewTest, GraphWithALoop) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b", "d"}},
{"d", {"a", "c"}},
});
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1, 3}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({2}));
}
TEST_F(GraphTopologyViewTest, GraphWithControls) {
const GraphDef graph = CreateGraph({
{"a", {}},
{"b", {}},
{"c", {"a", "b", "^d"}},
{"d", {"a", "c"}},
});
{
GraphTopologyView graph_view;
TF_CHECK_OK(graph_view.InitializeFromGraph(graph));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1, 3}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({2}));
}
{
GraphTopologyView graph_view;
TF_CHECK_OK(
graph_view.InitializeFromGraph(graph, true));
EXPECT_TRUE(graph_view.is_initialized());
using Fanin = absl::InlinedVector<int, 4>;
EXPECT_EQ(graph_view.GetFanin(2), Fanin({0, 1}));
EXPECT_EQ(graph_view.GetFanin(3), Fanin({0, 2}));
using Fanout = absl::InlinedVector<int, 2>;
EXPECT_EQ(graph_view.GetFanout(2), Fanout({3}));
EXPECT_EQ(graph_view.GetFanout(3), Fanout({}));
}
}
}
} |
244 | #ifndef TENSORSTORE_DRIVER_JSON_JSON_CHANGE_MAP_H_
#define TENSORSTORE_DRIVER_JSON_JSON_CHANGE_MAP_H_
#include <string>
#include <string_view>
#include "absl/container/btree_map.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_pointer.h"
namespace tensorstore {
namespace internal_json_driver {
class JsonChangeMap {
private:
struct MapCompare {
using is_transparent = void;
bool operator()(std::string_view a, std::string_view b) const {
return json_pointer::Compare(a, b) < json_pointer::kEqual;
}
};
public:
using Map = absl::btree_map<std::string, ::nlohmann::json, MapCompare>;
Result<::nlohmann::json> Apply(const ::nlohmann::json& existing,
std::string_view sub_value_pointer = {}) const;
bool CanApplyUnconditionally(std::string_view sub_value_pointer) const;
absl::Status AddChange(std::string_view sub_value_pointer,
::nlohmann::json sub_value);
const Map& underlying_map() const { return map_; }
private:
Map map_;
};
}
}
#endif
#include "tensorstore/driver/json/json_change_map.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/btree_map.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_json_driver {
Result<::nlohmann::json> JsonChangeMap::Apply(
const ::nlohmann::json& existing,
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it = map_.lower_bound(sub_value_pointer),
changes_end = map_.end();
if (changes_it != changes_end && changes_it->first == sub_value_pointer) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, changes_it->second};
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto* modified_value,
json_pointer::Dereference(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
json_pointer::kMustExist));
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, prev_it->first,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, *modified_value};
}
}
::nlohmann::json new_value;
{
TENSORSTORE_ASSIGN_OR_RETURN(
const ::nlohmann::json* restricted_existing,
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate));
if (restricted_existing) {
new_value = *restricted_existing;
} else {
new_value = ::nlohmann::json(::nlohmann::json::value_t::discarded);
}
}
for (; changes_it != changes_end &&
json_pointer::Compare(changes_it->first, sub_value_pointer) ==
json_pointer::kContainedIn;
++changes_it) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Replace(new_value,
std::string_view(changes_it->first)
.substr(sub_value_pointer.size()),
changes_it->second),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
}
return new_value;
}
bool JsonChangeMap::CanApplyUnconditionally(
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it;
if (sub_value_pointer.empty()) {
changes_it = map_.begin();
} else {
changes_it = map_.lower_bound(sub_value_pointer);
}
if (changes_it != map_.end()) {
if (changes_it->first == sub_value_pointer) {
return true;
}
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
return json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains;
}
return false;
}
absl::Status JsonChangeMap::AddChange(std::string_view sub_value_pointer,
::nlohmann::json sub_value) {
auto it = map_.lower_bound(sub_value_pointer);
if (it != map_.end()) {
auto compare_result = json_pointer::Compare(sub_value_pointer, it->first);
assert(compare_result <= json_pointer::kEqual);
if (compare_result == json_pointer::kEqual) {
it->second = std::move(sub_value);
return absl::OkStatus();
}
while (compare_result == json_pointer::kContains) {
it = map_.erase(it);
if (it == map_.end()) break;
compare_result = json_pointer::Compare(sub_value_pointer, it->first);
}
}
if (it != map_.begin()) {
auto prev_it = std::prev(it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
return json_pointer::Replace(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
std::move(sub_value));
}
}
map_.try_emplace(it, std::string(sub_value_pointer), std::move(sub_value));
return absl::OkStatus();
}
}
} | #include "tensorstore/driver/json/json_change_map.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_json_driver::JsonChangeMap;
using ::testing::ElementsAre;
using ::testing::Optional;
using ::testing::Pair;
TEST(JsonChangeMapTest, AddChangeValid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(false)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", true));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(true)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}, {"a", false}}))));
}
TEST(JsonChangeMapTest, AddChangeValidIndependent) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/e", "xx"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/a", "yy"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/a", MatchesJson("yy")),
Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}})),
Pair("/a/e", MatchesJson("xx"))));
}
TEST(JsonChangeMapTest, AddChangeInvalid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.AddChange("/a/b", 43),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyEmptyChangeMap) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}),
Optional(MatchesJson(::nlohmann::json{{"x", "y"}, {"z", "w"}})));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}, "/x"),
Optional(MatchesJson(::nlohmann::json("y"))));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", {{"b", {{"c", 42}}}}}}));
EXPECT_THAT(changes.Apply("old", "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyInvalidContainingChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply(false, "/a/b/c"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyChangeMapPriorNonContaining) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 10));
EXPECT_THAT(changes.Apply({{"b", 42}}, "/b"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply({{"e", "f"}}, "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"e", "f"}}),
Optional(MatchesJson(::nlohmann::json{
{"a", {{"b", {{"c", 42}}}}}, {"e", 42}, {"x", "y"}})));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply(42),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/4", 42));
EXPECT_THAT(changes.Apply({1, 2, 3}),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointer) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointerNoChanges) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestNewMember) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(::nlohmann::json::object_t{}, "/a"),
Optional(MatchesJson(::nlohmann::json{{"b", 42}})));
}
TEST(JsonChangeMapTest, ApplyIncompatibleChangeExactRequest) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, AddIncompatibleChanges) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", 42));
EXPECT_THAT(changes.AddChange("/a", 50),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/a\" cannot be applied "
"to number value: 42"));
}
TEST(JsonChangeMapTest, CanApplyUnconditionally) {
JsonChangeMap changes;
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/b/c"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"c", 42}}));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/c"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/d"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/x"));
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", false}}));
EXPECT_TRUE(changes.CanApplyUnconditionally(""));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a"));
}
} |
245 | #ifndef QUICHE_SPDY_CORE_HPACK_HPACK_ENTRY_H_
#define QUICHE_SPDY_CORE_HPACK_HPACK_ENTRY_H_
#include <cstddef>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_export.h"
namespace spdy {
inline constexpr size_t kHpackEntrySizeOverhead = 32;
struct QUICHE_EXPORT HpackLookupEntry {
absl::string_view name;
absl::string_view value;
bool operator==(const HpackLookupEntry& other) const {
return name == other.name && value == other.value;
}
template <typename H>
friend H AbslHashValue(H h, const HpackLookupEntry& entry) {
return H::combine(std::move(h), entry.name, entry.value);
}
};
class QUICHE_EXPORT HpackEntry {
public:
HpackEntry(std::string name, std::string value);
HpackEntry(const HpackEntry&) = delete;
HpackEntry& operator=(const HpackEntry&) = delete;
HpackEntry(HpackEntry&&) = default;
HpackEntry& operator=(HpackEntry&&) = default;
absl::string_view name() const { return name_; }
absl::string_view value() const { return value_; }
static size_t Size(absl::string_view name, absl::string_view value);
size_t Size() const;
std::string GetDebugString() const;
private:
std::string name_;
std::string value_;
};
}
#endif
#include "quiche/spdy/core/hpack/hpack_entry.h"
#include <cstddef>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace spdy {
HpackEntry::HpackEntry(std::string name, std::string value)
: name_(std::move(name)), value_(std::move(value)) {}
size_t HpackEntry::Size(absl::string_view name, absl::string_view value) {
return name.size() + value.size() + kHpackEntrySizeOverhead;
}
size_t HpackEntry::Size() const { return Size(name(), value()); }
std::string HpackEntry::GetDebugString() const {
return absl::StrCat("{ name: \"", name_, "\", value: \"", value_, "\" }");
}
} | #include "quiche/spdy/core/hpack/hpack_entry.h"
#include "absl/hash/hash.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
namespace {
TEST(HpackLookupEntryTest, EntryNamesDiffer) {
HpackLookupEntry entry1{"header", "value"};
HpackLookupEntry entry2{"HEADER", "value"};
EXPECT_FALSE(entry1 == entry2);
EXPECT_NE(absl::Hash<HpackLookupEntry>()(entry1),
absl::Hash<HpackLookupEntry>()(entry2));
}
TEST(HpackLookupEntryTest, EntryValuesDiffer) {
HpackLookupEntry entry1{"header", "value"};
HpackLookupEntry entry2{"header", "VALUE"};
EXPECT_FALSE(entry1 == entry2);
EXPECT_NE(absl::Hash<HpackLookupEntry>()(entry1),
absl::Hash<HpackLookupEntry>()(entry2));
}
TEST(HpackLookupEntryTest, EntriesEqual) {
HpackLookupEntry entry1{"name", "value"};
HpackLookupEntry entry2{"name", "value"};
EXPECT_TRUE(entry1 == entry2);
EXPECT_EQ(absl::Hash<HpackLookupEntry>()(entry1),
absl::Hash<HpackLookupEntry>()(entry2));
}
TEST(HpackEntryTest, BasicEntry) {
HpackEntry entry("header-name", "header value");
EXPECT_EQ("header-name", entry.name());
EXPECT_EQ("header value", entry.value());
EXPECT_EQ(55u, entry.Size());
EXPECT_EQ(55u, HpackEntry::Size("header-name", "header value"));
}
}
} |
246 | #ifndef TENSORFLOW_TSL_LIB_RANDOM_WEIGHTED_PICKER_H_
#define TENSORFLOW_TSL_LIB_RANDOM_WEIGHTED_PICKER_H_
#include <assert.h>
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
class SimplePhilox;
class WeightedPicker {
public:
explicit WeightedPicker(int N);
~WeightedPicker();
int Pick(SimplePhilox* rnd) const;
int PickAt(int32_t weight_index) const;
int32 get_weight(int index) const;
void set_weight(int index, int32_t weight);
int32 total_weight() const;
int num_elements() const;
void SetAllWeights(int32_t weight);
void SetWeightsFromArray(int N, const int32* weights);
void Resize(int N);
void Append(int32_t weight);
private:
int N_;
int num_levels_;
int32** level_;
static int LevelSize(int level) { return 1 << level; }
void RebuildTreeWeights();
WeightedPicker(const WeightedPicker&) = delete;
void operator=(const WeightedPicker&) = delete;
};
inline int32 WeightedPicker::get_weight(int index) const {
DCHECK_GE(index, 0);
DCHECK_LT(index, N_);
return level_[num_levels_ - 1][index];
}
inline int32 WeightedPicker::total_weight() const { return level_[0][0]; }
inline int WeightedPicker::num_elements() const { return N_; }
}
}
#endif
#include "tsl/lib/random/weighted_picker.h"
#include <string.h>
#include <algorithm>
#include "tsl/lib/random/simple_philox.h"
namespace tsl {
namespace random {
WeightedPicker::WeightedPicker(int N) {
CHECK_GE(N, 0);
N_ = N;
num_levels_ = 1;
while (LevelSize(num_levels_ - 1) < N) {
num_levels_++;
}
level_ = new int32*[num_levels_];
for (int l = 0; l < num_levels_; l++) {
level_[l] = new int32[LevelSize(l)];
}
SetAllWeights(1);
}
WeightedPicker::~WeightedPicker() {
for (int l = 0; l < num_levels_; l++) {
delete[] level_[l];
}
delete[] level_;
}
static int32 UnbiasedUniform(SimplePhilox* r, int32_t n) {
CHECK_LE(0, n);
const uint32 range = ~static_cast<uint32>(0);
if (n == 0) {
return r->Rand32() * n;
} else if (0 == (n & (n - 1))) {
return r->Rand32() & (n - 1);
} else {
uint32 rem = (range % n) + 1;
uint32 rnd;
do {
rnd = r->Rand32();
} while (rnd < rem);
return rnd % n;
}
}
int WeightedPicker::Pick(SimplePhilox* rnd) const {
if (total_weight() == 0) return -1;
return PickAt(UnbiasedUniform(rnd, total_weight()));
}
int WeightedPicker::PickAt(int32_t weight_index) const {
if (weight_index < 0 || weight_index >= total_weight()) return -1;
int32_t position = weight_index;
int index = 0;
for (int l = 1; l < num_levels_; l++) {
const int32_t left_weight = level_[l][2 * index];
if (position < left_weight) {
index = 2 * index;
} else {
index = 2 * index + 1;
position -= left_weight;
}
}
CHECK_GE(index, 0);
CHECK_LT(index, N_);
CHECK_LE(position, level_[num_levels_ - 1][index]);
return index;
}
void WeightedPicker::set_weight(int index, int32_t weight) {
assert(index >= 0);
assert(index < N_);
const int32_t delta = weight - get_weight(index);
for (int l = num_levels_ - 1; l >= 0; l--) {
level_[l][index] += delta;
index >>= 1;
}
}
void WeightedPicker::SetAllWeights(int32_t weight) {
int32* leaves = level_[num_levels_ - 1];
for (int i = 0; i < N_; i++) leaves[i] = weight;
for (int i = N_; i < LevelSize(num_levels_ - 1); i++) leaves[i] = 0;
RebuildTreeWeights();
}
void WeightedPicker::SetWeightsFromArray(int N, const int32* weights) {
Resize(N);
int32* leaves = level_[num_levels_ - 1];
for (int i = 0; i < N_; i++) leaves[i] = weights[i];
for (int i = N_; i < LevelSize(num_levels_ - 1); i++) leaves[i] = 0;
RebuildTreeWeights();
}
void WeightedPicker::RebuildTreeWeights() {
for (int l = num_levels_ - 2; l >= 0; l--) {
int32* level = level_[l];
int32* children = level_[l + 1];
for (int i = 0; i < LevelSize(l); i++) {
level[i] = children[2 * i] + children[2 * i + 1];
}
}
}
void WeightedPicker::Append(int32_t weight) {
Resize(num_elements() + 1);
set_weight(num_elements() - 1, weight);
}
void WeightedPicker::Resize(int new_size) {
CHECK_GE(new_size, 0);
if (new_size <= LevelSize(num_levels_ - 1)) {
for (int i = new_size; i < N_; i++) {
set_weight(i, 0);
}
N_ = new_size;
return;
}
assert(new_size > N_);
WeightedPicker new_picker(new_size);
int32* dst = new_picker.level_[new_picker.num_levels_ - 1];
int32* src = this->level_[this->num_levels_ - 1];
memcpy(dst, src, sizeof(dst[0]) * N_);
memset(dst + N_, 0, sizeof(dst[0]) * (new_size - N_));
new_picker.RebuildTreeWeights();
std::swap(new_picker.N_, this->N_);
std::swap(new_picker.num_levels_, this->num_levels_);
std::swap(new_picker.level_, this->level_);
assert(this->N_ == new_size);
}
}
} | #include "tsl/lib/random/weighted_picker.h"
#include <string.h>
#include <vector>
#include "tsl/lib/random/simple_philox.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
static void TestPicker(SimplePhilox* rnd, int size);
static void CheckUniform(SimplePhilox* rnd, WeightedPicker* picker, int trials);
static void CheckSkewed(SimplePhilox* rnd, WeightedPicker* picker, int trials);
static void TestPickAt(int items, const int32* weights);
TEST(WeightedPicker, Simple) {
PhiloxRandom philox(testing::RandomSeed(), 17);
SimplePhilox rnd(&philox);
{
VLOG(0) << "======= Zero-length picker";
WeightedPicker picker(0);
EXPECT_EQ(picker.Pick(&rnd), -1);
}
{
VLOG(0) << "======= Singleton picker";
WeightedPicker picker(1);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
}
{
VLOG(0) << "======= Grown picker";
WeightedPicker picker(0);
for (int i = 0; i < 10; i++) {
picker.Append(1);
}
CheckUniform(&rnd, &picker, 100000);
}
{
VLOG(0) << "======= Grown picker with zero weights";
WeightedPicker picker(1);
picker.Resize(10);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
}
{
VLOG(0) << "======= Shrink picker and check weights";
WeightedPicker picker(1);
picker.Resize(10);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
EXPECT_EQ(picker.Pick(&rnd), 0);
for (int i = 0; i < 10; i++) {
picker.set_weight(i, i);
}
EXPECT_EQ(picker.total_weight(), 45);
picker.Resize(5);
EXPECT_EQ(picker.total_weight(), 10);
picker.Resize(2);
EXPECT_EQ(picker.total_weight(), 1);
picker.Resize(1);
EXPECT_EQ(picker.total_weight(), 0);
}
}
TEST(WeightedPicker, BigWeights) {
PhiloxRandom philox(testing::RandomSeed() + 1, 17);
SimplePhilox rnd(&philox);
VLOG(0) << "======= Check uniform with big weights";
WeightedPicker picker(2);
picker.SetAllWeights(2147483646L / 3);
CheckUniform(&rnd, &picker, 100000);
}
TEST(WeightedPicker, Deterministic) {
VLOG(0) << "======= Testing deterministic pick";
static const int32 weights[] = {1, 0, 200, 5, 42};
TestPickAt(TF_ARRAYSIZE(weights), weights);
}
TEST(WeightedPicker, Randomized) {
PhiloxRandom philox(testing::RandomSeed() + 10, 17);
SimplePhilox rnd(&philox);
TestPicker(&rnd, 1);
TestPicker(&rnd, 2);
TestPicker(&rnd, 3);
TestPicker(&rnd, 4);
TestPicker(&rnd, 7);
TestPicker(&rnd, 8);
TestPicker(&rnd, 9);
TestPicker(&rnd, 10);
TestPicker(&rnd, 100);
}
static void TestPicker(SimplePhilox* rnd, int size) {
VLOG(0) << "======= Testing size " << size;
{
WeightedPicker picker(size);
picker.SetAllWeights(0);
for (int i = 0; i < 100; i++) EXPECT_EQ(picker.Pick(rnd), -1);
}
std::vector<int32> weights(size);
for (int elem = 0; elem < size; elem++) {
weights[elem] = 0;
}
for (int elem = 0; elem < size; elem++) {
WeightedPicker picker(size);
picker.SetAllWeights(0);
picker.set_weight(elem, elem + 1);
for (int i = 0; i < 100; i++) EXPECT_EQ(picker.Pick(rnd), elem);
weights[elem] = 10;
picker.SetWeightsFromArray(size, &weights[0]);
for (int i = 0; i < 100; i++) EXPECT_EQ(picker.Pick(rnd), elem);
weights[elem] = 0;
}
{
WeightedPicker picker(size);
CheckUniform(rnd, &picker, 100000);
}
if (size / 3 > 0) {
WeightedPicker picker(size / 3);
while (picker.num_elements() != size) {
picker.Append(1);
}
CheckUniform(rnd, &picker, 100000);
}
if (size <= 10) {
WeightedPicker picker(size);
int32_t weight = 1;
for (int elem = 0; elem < size; elem++) {
picker.set_weight(elem, weight);
weights[elem] = weight;
weight *= 2;
}
CheckSkewed(rnd, &picker, 1000000);
WeightedPicker array_picker(0);
array_picker.SetWeightsFromArray(size, &weights[0]);
CheckSkewed(rnd, &array_picker, 1000000);
}
}
static void CheckUniform(SimplePhilox* rnd, WeightedPicker* picker,
int trials) {
const int size = picker->num_elements();
int* count = new int[size];
memset(count, 0, sizeof(count[0]) * size);
for (int i = 0; i < size * trials; i++) {
const int elem = picker->Pick(rnd);
EXPECT_GE(elem, 0);
EXPECT_LT(elem, size);
count[elem]++;
}
const int expected_min = int(0.9 * trials);
const int expected_max = int(1.1 * trials);
for (int i = 0; i < size; i++) {
EXPECT_GE(count[i], expected_min);
EXPECT_LE(count[i], expected_max);
}
delete[] count;
}
static void CheckSkewed(SimplePhilox* rnd, WeightedPicker* picker, int trials) {
const int size = picker->num_elements();
int* count = new int[size];
memset(count, 0, sizeof(count[0]) * size);
for (int i = 0; i < size * trials; i++) {
const int elem = picker->Pick(rnd);
EXPECT_GE(elem, 0);
EXPECT_LT(elem, size);
count[elem]++;
}
for (int i = 0; i < size - 1; i++) {
LOG(INFO) << i << ": " << count[i];
const float ratio = float(count[i + 1]) / float(count[i]);
EXPECT_GE(ratio, 1.6f);
EXPECT_LE(ratio, 2.4f);
}
delete[] count;
}
static void TestPickAt(int items, const int32* weights) {
WeightedPicker picker(items);
picker.SetWeightsFromArray(items, weights);
int weight_index = 0;
for (int i = 0; i < items; ++i) {
for (int j = 0; j < weights[i]; ++j) {
int pick = picker.PickAt(weight_index);
EXPECT_EQ(pick, i);
++weight_index;
}
}
EXPECT_EQ(weight_index, picker.total_weight());
}
static void BM_Create(::testing::benchmark::State& state) {
int arg = state.range(0);
for (auto s : state) {
WeightedPicker p(arg);
}
}
BENCHMARK(BM_Create)->Range(1, 1024);
static void BM_CreateAndSetWeights(::testing::benchmark::State& state) {
int arg = state.range(0);
std::vector<int32> weights(arg);
for (int i = 0; i < arg; i++) {
weights[i] = i * 10;
}
for (auto s : state) {
WeightedPicker p(arg);
p.SetWeightsFromArray(arg, &weights[0]);
}
}
BENCHMARK(BM_CreateAndSetWeights)->Range(1, 1024);
static void BM_Pick(::testing::benchmark::State& state) {
int arg = state.range(0);
PhiloxRandom philox(301, 17);
SimplePhilox rnd(&philox);
WeightedPicker p(arg);
int result = 0;
for (auto s : state) {
result += p.Pick(&rnd);
}
VLOG(4) << result;
}
BENCHMARK(BM_Pick)->Range(1, 1024);
}
} |
247 | #ifndef AROLLA_EXPR_OPERATORS_STRINGS_REGISTER_OPERATORS_H_
#define AROLLA_EXPR_OPERATORS_STRINGS_REGISTER_OPERATORS_H_
#include "absl/status/status.h"
#include "arolla/expr/operators/registration.h"
namespace arolla::expr_operators {
absl::Status InitStrings();
AROLLA_DECLARE_EXPR_OPERATOR(StringsCompileRegex);
AROLLA_DECLARE_EXPR_OPERATOR(StringsContainsRegex);
AROLLA_DECLARE_EXPR_OPERATOR(StringsExtractRegex);
AROLLA_DECLARE_EXPR_OPERATOR(StringsJoin);
AROLLA_DECLARE_EXPR_OPERATOR(StringsJoinWithSeparator);
}
#endif
#include "arolla/expr/operators/strings/register_operators.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/backend_wrapping_operator.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operators/dynamic_lifting.h"
#include "arolla/expr/operators/register_operators.h"
#include "arolla/expr/operators/registration.h"
#include "arolla/expr/operators/strings/string_operators.h"
#include "arolla/expr/operators/type_meta_eval_strategies.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/strings/regex.h"
#include "arolla/util/indestructible.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::RegisterOperator;
namespace tm = ::arolla::expr_operators::type_meta;
using tm::Binary;
using tm::CallableStrategy;
using tm::Chain;
using tm::Is;
using tm::LiftNthType;
using tm::Nth;
using tm::NthMatch;
using tm::Returns;
using tm::ScalarOrOptional;
using tm::ScalarTypeIs;
using tm::String;
using tm::ToOptional;
using tm::ToTestResult;
using tm::Unary;
}
AROLLA_DEFINE_EXPR_OPERATOR(StringsCompileRegex,
RegisterBackendOperator("strings._compile_regex",
Chain(Unary, Is<Text>,
Returns<Regex>)));
AROLLA_DEFINE_EXPR_OPERATOR(
StringsJoinWithSeparator,
RegisterOperator(
"strings._join_with_separator",
LiftDynamically(std::make_shared<expr::BackendWrappingOperator>(
"strings._join_with_separator",
ExprOperatorSignature::MakeVariadicArgs(),
CallableStrategy(Chain(ScalarOrOptional, String,
LiftNthType(0)))))));
AROLLA_DEFINE_EXPR_OPERATOR(
StringsContainsRegex, []() -> absl::StatusOr<expr::ExprOperatorPtr> {
RETURN_IF_ERROR(
RegisterOperator(
"strings._contains_regex",
LiftDynamically(std::make_shared<expr::BackendWrappingOperator>(
"strings._contains_regex",
ExprOperatorSignature{{"s"}, {"regex"}},
CallableStrategy(Chain(Binary, NthMatch(1, Is<Regex>), Nth(0),
ScalarOrOptional, ScalarTypeIs<Text>,
ToTestResult)))))
.status());
return RegisterOperator("strings.contains_regex", MakeContainsRegexOp());
}());
AROLLA_DEFINE_EXPR_OPERATOR(
StringsExtractRegex, []() -> absl::StatusOr<expr::ExprOperatorPtr> {
RETURN_IF_ERROR(
RegisterOperator(
"strings._extract_regex",
LiftDynamically(std::make_shared<expr::BackendWrappingOperator>(
"strings._extract_regex",
ExprOperatorSignature{{"s"}, {"regex"}},
CallableStrategy(Chain(Binary, NthMatch(1, Is<Regex>), Nth(0),
ScalarOrOptional, ScalarTypeIs<Text>,
ToOptional)))))
.status());
return RegisterOperator("strings.extract_regex", MakeExtractRegexOp());
}());
AROLLA_DEFINE_EXPR_OPERATOR(StringsJoin,
RegisterOperator("strings.join", MakeJoinOp()));
absl::Status InitStrings() {
static Indestructible<absl::Status> init_status([]() -> absl::Status {
RETURN_IF_ERROR(InitCore());
RETURN_IF_ERROR(InitArray());
RETURN_IF_ERROR(RegisterStringsCompileRegex());
RETURN_IF_ERROR(RegisterStringsJoinWithSeparator());
RETURN_IF_ERROR(RegisterStringsContainsRegex());
RETURN_IF_ERROR(RegisterStringsExtractRegex());
RETURN_IF_ERROR(RegisterStringsJoin());
return absl::OkStatus();
}());
return *init_status;
}
} | #include <cstdint>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::ExprAttributes;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
absl::StatusOr<QTypePtr> GetOutputQType(
const ExprOperatorPtr& op, absl::Span<const QTypePtr> input_qtypes) {
std::vector<ExprAttributes> inputs;
inputs.reserve(input_qtypes.size());
for (auto* input_qtype : input_qtypes) {
inputs.emplace_back(input_qtype);
}
ASSIGN_OR_RETURN(auto output, op->InferAttributes(inputs));
return output.qtype();
}
class RegisterOperatorsTest : public ::testing::Test {
protected:
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(RegisterOperatorsTest, PresenceAndOr) {
ASSERT_OK_AND_ASSIGN(auto pand_or,
expr::LookupOperator("core._presence_and_or"));
auto f64 = GetQType<double>();
auto i64 = GetQType<int64_t>();
auto i32 = GetQType<int32_t>();
EXPECT_THAT(GetOutputQType(pand_or, {i64, GetQType<OptionalUnit>(), i64}),
IsOkAndHolds(i64));
EXPECT_THAT(GetOutputQType(pand_or, {i64, GetQType<OptionalUnit>(), i32}),
IsOkAndHolds(i64));
EXPECT_THAT(GetOutputQType(pand_or, {i32, GetQType<OptionalUnit>(), i64}),
IsOkAndHolds(i64));
EXPECT_THAT(GetOutputQType(pand_or, {i32, GetQType<OptionalUnit>(), i32}),
IsOkAndHolds(i32));
auto oi64 = GetOptionalQType<int64_t>();
auto oi32 = GetOptionalQType<int32_t>();
EXPECT_THAT(GetOutputQType(pand_or, {oi32, GetQType<OptionalUnit>(), i64}),
IsOkAndHolds(i64));
EXPECT_THAT(GetOutputQType(pand_or, {oi64, GetQType<OptionalUnit>(), i32}),
IsOkAndHolds(i64));
EXPECT_THAT(GetOutputQType(pand_or, {i32, GetQType<OptionalUnit>(), oi64}),
IsOkAndHolds(oi64));
auto daunit = GetDenseArrayQType<Unit>();
auto dai64 = GetDenseArrayQType<int64_t>();
EXPECT_THAT(
GetOutputQType(pand_or, {oi32, daunit, dai64}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be scalar or optional scalar, "
"but got DENSE_ARRAY_UNIT for 1-th argument")));
EXPECT_THAT(GetOutputQType(pand_or, {GetQType<Unit>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected 3 but got 1")));
EXPECT_THAT(GetOutputQType(pand_or, {i64, GetQType<OptionalUnit>(), f64}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no common QType for (INT64,FLOAT64)")));
}
TEST_F(RegisterOperatorsTest, PresenceAnd) {
ASSERT_OK_AND_ASSIGN(auto presence_and,
expr::LookupOperator("core.presence_and"));
EXPECT_THAT(
GetOutputQType(presence_and, {GetQType<int32_t>(), GetQType<bool>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected scalar type to be UNIT")));
}
TEST_F(RegisterOperatorsTest, ShortCircuitWhere) {
ASSERT_OK_AND_ASSIGN(auto where,
expr::LookupOperator("core._short_circuit_where"));
EXPECT_THAT(GetOutputQType(where, {GetQType<OptionalUnit>(),
GetQType<int64_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(GetOutputQType(where, {GetQType<OptionalUnit>(),
GetQType<float>(), GetQType<double>()}),
IsOkAndHolds(GetQType<double>()));
EXPECT_THAT(GetOutputQType(where, {GetQType<OptionalUnit>(),
GetQType<int64_t>(), GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no common QType for (INT64,FLOAT32)")));
}
}
} |
248 | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_NUMERICAL_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_NUMERICAL_UTILS_H_
#include <cstdint>
#include <optional>
#include <utility>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
using QuantizedMultiplier = std::pair<int32_t, int32_t>;
using QuantizedRange = std::pair<int32_t, int32_t>;
QuantizedMultiplier QuantizeMultiplier(double double_multiplier);
QuantizedRange CalculateQuantizedRange(double scale, int32_t zero_point,
std::optional<double> rmin,
std::optional<double> rmax, int32_t qmin,
int32_t qmax);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <optional>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
QuantizedMultiplier QuantizeMultiplier(double double_multiplier) {
if (double_multiplier < 1e-6) {
return {0, 0};
}
int32_t shift;
const double q = frexp(double_multiplier, &shift);
int64_t quantized_multiplier = round(q * (1LL << 31));
assert(quantized_multiplier <= (1LL << 31));
if (quantized_multiplier == (1LL << 31)) {
quantized_multiplier /= 2;
++shift;
}
assert(quantized_multiplier <= std::numeric_limits<int32_t>::max());
if (shift > 31 || shift < -31) {
return {0, 0};
}
return {static_cast<int32_t>(quantized_multiplier), shift};
}
QuantizedRange CalculateQuantizedRange(double scale, int32_t zero_point,
std::optional<double> rmin,
std::optional<double> rmax, int32_t qmin,
int32_t qmax) {
auto quantize = [scale, zero_point](float f) {
return zero_point + static_cast<int32_t>(std::round(f / scale));
};
if (rmin.has_value() && rmax.has_value()) {
return {std::max(qmin, quantize(rmin.value())),
std::min(qmax, quantize(rmax.value()))};
} else if (rmin.has_value()) {
return {std::max(qmin, quantize(rmin.value())), qmax};
} else if (rmax.has_value()) {
return {qmin, std::min(qmax, quantize(rmax.value()))};
} else {
return {qmin, qmax};
}
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <cmath>
#include <optional>
#include <gtest/gtest.h>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
namespace {
double ComposeScale(const QuantizedMultiplier& input) {
return input.first * exp2(-31 + input.second);
}
TEST(NumericalUtils, QuantizeMultiplier) {
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e6)), 1.0e6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e3)), 1.0e3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(10.)), 10.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(5.)), 5.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(2.)), 2.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(0.0)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0)), 1.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-1)), 1.0e-1);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-2)), 1.0e-2);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-3)), 1.0e-3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-4)), 1.0e-4);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-5)), 1.0e-5);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-6)), 1.0e-6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-7)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-8)), 0.0);
}
TEST(NumericalUtils, ActivationRange) {
auto a =
CalculateQuantizedRange(1e-6, 0, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(a.first, -128);
ASSERT_EQ(a.second, 127);
auto b = CalculateQuantizedRange(1e-6, 0, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(b.first, 0);
ASSERT_EQ(b.second, 127);
auto c = CalculateQuantizedRange(1e-6, 0, -1.0, 1.0, -128, 127);
ASSERT_EQ(c.first, -128);
ASSERT_EQ(c.second, 127);
auto d = CalculateQuantizedRange(1e-6, 0, 0.0, 6.0, -128, 127);
ASSERT_EQ(d.first, 0);
ASSERT_EQ(d.second, 127);
auto e =
CalculateQuantizedRange(1e-6, 100, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(e.first, -128);
ASSERT_EQ(e.second, 127);
auto f = CalculateQuantizedRange(1e-6, 100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(f.first, 100);
ASSERT_EQ(f.second, 127);
auto g = CalculateQuantizedRange(1e-6, 100, -1.0, 1.0, -128, 127);
ASSERT_EQ(g.first, -128);
ASSERT_EQ(g.second, 127);
auto h = CalculateQuantizedRange(1e-6, 100, 0.0, 6.0, -128, 127);
ASSERT_EQ(h.first, 100);
ASSERT_EQ(h.second, 127);
auto i = CalculateQuantizedRange(1e-6, -100, std::nullopt, std::nullopt, -128,
127);
ASSERT_EQ(i.first, -128);
ASSERT_EQ(i.second, 127);
auto j = CalculateQuantizedRange(1e-6, -100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(j.first, -100);
ASSERT_EQ(j.second, 127);
auto k = CalculateQuantizedRange(1e-6, -100, -1.0, 1.0, -128, 127);
ASSERT_EQ(k.first, -128);
ASSERT_EQ(k.second, 127);
auto l = CalculateQuantizedRange(1e-6, -100, 0.0, 6.0, -128, 127);
ASSERT_EQ(l.first, -100);
ASSERT_EQ(l.second, 127);
}
}
}
} |
249 | #ifndef XLA_SERVICE_CONSTANT_VALUE_H_
#define XLA_SERVICE_CONSTANT_VALUE_H_
#include <string>
#include "absl/status/statusor.h"
#include "xla/literal.h"
#include "xla/util.h"
namespace xla {
class ConstantValue {
public:
ConstantValue(uint64_t value, int32_t bitwidth, bool is_signed)
: value_(is_signed
? absl::bit_cast<uint64_t>(
absl::bit_cast<int64_t>(
value << (8 * sizeof(uint64_t) - bitwidth)) >>
(8 * sizeof(uint64_t) - bitwidth))
: KeepLowerBits(value, bitwidth)),
bitwidth_(bitwidth),
is_signed_(is_signed) {}
static ConstantValue GetZero(int32_t bitwidth, bool is_signed) {
return ConstantValue(0, bitwidth, is_signed);
}
static ConstantValue GetOne(int32_t bitwidth, bool is_signed) {
return ConstantValue(1, bitwidth, is_signed);
}
static ConstantValue GetSigned(int64_t value, int32_t bitwidth) {
return ConstantValue(absl::bit_cast<uint64_t>(value), bitwidth,
true);
}
static ConstantValue GetUnsigned(uint64_t value, int32_t bitwidth) {
return ConstantValue(value, bitwidth, false);
}
static absl::StatusOr<ConstantValue> FromLiteral(const Literal& literal);
ConstantValue add(const ConstantValue& other) const {
return ConstantValue(value_ + other.value_, bitwidth_, is_signed_);
}
ConstantValue sub(const ConstantValue& other) const {
return ConstantValue(value_ - other.value_, bitwidth_, is_signed_);
}
ConstantValue div(const ConstantValue& other) const;
ConstantValue mod(const ConstantValue& other) const;
ConstantValue mul(const ConstantValue& other) const;
bool lt(const ConstantValue& other) const;
bool gt(const ConstantValue& other) const;
bool eq(const ConstantValue& other) const { return *this == other; }
int64_t GetSignedValue() const { return absl::bit_cast<int64_t>(value_); }
uint64_t GetUnsignedValue() const { return value_; }
int32_t GetBitwidth() const { return bitwidth_; }
bool IsSigned() const { return is_signed_; }
bool operator==(const ConstantValue& other) const {
return value_ == other.value_ && bitwidth_ == other.bitwidth_ &&
is_signed_ == other.is_signed_;
}
std::string ToString() const;
private:
uint64_t value_;
int32_t bitwidth_;
bool is_signed_;
};
}
#endif
#include "xla/service/constant_value.h"
#include <string>
namespace xla {
absl::StatusOr<ConstantValue> ConstantValue::FromLiteral(
const Literal& literal) {
CHECK_EQ(literal.shape().dimensions_size(), 0) << "Expected scalar literal";
return primitive_util::PrimitiveTypeSwitch<absl::StatusOr<ConstantValue>>(
[&](auto primitive_type_constant) -> absl::StatusOr<ConstantValue> {
if constexpr (primitive_util::IsIntegralType(primitive_type_constant)) {
return ConstantValue(
static_cast<uint64_t>(
literal.GetFirstElement<
primitive_util::NativeTypeOf<primitive_type_constant>>()),
primitive_util::BitWidth(primitive_type_constant),
primitive_util::IsSignedIntegralType(primitive_type_constant));
}
return InvalidArgument("Unsupported type");
},
literal.shape().element_type());
}
ConstantValue ConstantValue::div(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ / other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) /
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
ConstantValue ConstantValue::mod(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ % other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) %
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
ConstantValue ConstantValue::mul(const ConstantValue& other) const {
if (!is_signed_) {
return ConstantValue(value_ * other.value_, bitwidth_, is_signed_);
}
return ConstantValue(
absl::bit_cast<uint64_t>(absl::bit_cast<int64_t>(value_) *
absl::bit_cast<int64_t>(other.value_)),
bitwidth_, is_signed_);
}
bool ConstantValue::lt(const ConstantValue& other) const {
if (!is_signed_) {
return value_ < other.value_;
}
return absl::bit_cast<int64_t>(value_) <
absl::bit_cast<int64_t>(other.value_);
}
bool ConstantValue::gt(const ConstantValue& other) const {
if (!is_signed_) {
return value_ > other.value_;
}
return absl::bit_cast<int64_t>(value_) >
absl::bit_cast<int64_t>(other.value_);
}
std::string ConstantValue::ToString() const {
return is_signed_ ? absl::StrCat(GetSignedValue())
: absl::StrCat(GetUnsignedValue());
}
} | #include "xla/service/constant_value.h"
#include <gtest/gtest.h>
#include "xla/literal_util.h"
namespace xla {
namespace {
class ConstantValueTest : public ::testing::Test {};
TEST_F(ConstantValueTest, ZeroTest32) {
ConstantValue zero = ConstantValue::GetZero(32, false);
EXPECT_EQ(zero.GetSignedValue(), 0);
EXPECT_EQ(zero.GetUnsignedValue(), 0);
EXPECT_EQ(zero.GetBitwidth(), 32);
EXPECT_FALSE(zero.IsSigned());
ConstantValue zero_s = ConstantValue::GetZero(32, true);
EXPECT_EQ(zero_s.GetSignedValue(), 0);
EXPECT_EQ(zero_s.GetUnsignedValue(), 0);
EXPECT_EQ(zero_s.GetBitwidth(), 32);
EXPECT_TRUE(zero_s.IsSigned());
}
TEST_F(ConstantValueTest, OneTest32) {
ConstantValue one = ConstantValue::GetOne(32, false);
EXPECT_EQ(one.GetSignedValue(), 1);
EXPECT_EQ(one.GetUnsignedValue(), 1);
EXPECT_EQ(one.GetBitwidth(), 32);
EXPECT_FALSE(one.IsSigned());
ConstantValue one_s = ConstantValue::GetOne(32, true);
EXPECT_EQ(one_s.GetSignedValue(), 1);
EXPECT_EQ(one_s.GetUnsignedValue(), 1);
EXPECT_EQ(one_s.GetBitwidth(), 32);
EXPECT_TRUE(one_s.IsSigned());
}
TEST_F(ConstantValueTest, Signed23) {
ConstantValue signed_number = ConstantValue::GetSigned(4194303, 23);
EXPECT_EQ(signed_number.GetSignedValue(), 4194303);
EXPECT_EQ(signed_number.GetBitwidth(), 23);
EXPECT_TRUE(signed_number.IsSigned());
ConstantValue signed_number_of = ConstantValue::GetSigned(4194304, 23);
EXPECT_EQ(signed_number_of.GetSignedValue(), -4194304);
EXPECT_EQ(signed_number_of.GetBitwidth(), 23);
EXPECT_TRUE(signed_number_of.IsSigned());
}
TEST_F(ConstantValueTest, Unsigned23) {
ConstantValue unsigned_number = ConstantValue::GetUnsigned(8388607, 23);
EXPECT_EQ(unsigned_number.GetUnsignedValue(), 8388607);
EXPECT_EQ(unsigned_number.GetBitwidth(), 23);
EXPECT_FALSE(unsigned_number.IsSigned());
ConstantValue unsigned_number_of = ConstantValue::GetUnsigned(8388608, 23);
EXPECT_EQ(unsigned_number_of.GetUnsignedValue(), 0);
EXPECT_EQ(unsigned_number_of.GetBitwidth(), 23);
EXPECT_FALSE(unsigned_number_of.IsSigned());
}
TEST_F(ConstantValueTest, FromLiteral) {
auto cv_8 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int8_t>(-32)));
EXPECT_TRUE(cv_8.ok());
EXPECT_TRUE(cv_8->IsSigned());
EXPECT_EQ(cv_8->GetBitwidth(), 8);
EXPECT_EQ(cv_8->GetSignedValue(), -32);
auto cv_u8 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int8_t>(32)));
EXPECT_TRUE(cv_u8.ok());
EXPECT_TRUE(cv_u8->IsSigned());
EXPECT_EQ(cv_u8->GetBitwidth(), 8);
EXPECT_EQ(cv_u8->GetUnsignedValue(), 32);
auto cv_16 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int16_t>(32000)));
EXPECT_TRUE(cv_16.ok());
EXPECT_TRUE(cv_16->IsSigned());
EXPECT_EQ(cv_16->GetBitwidth(), 16);
EXPECT_EQ(cv_16->GetSignedValue(), 32000);
auto cv_u16 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint16_t>(33000)));
EXPECT_TRUE(cv_u16.ok());
EXPECT_FALSE(cv_u16->IsSigned());
EXPECT_EQ(cv_u16->GetBitwidth(), 16);
EXPECT_EQ(cv_u16->GetUnsignedValue(), 33000);
auto cv_32 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int32_t>(-2000000000)));
EXPECT_TRUE(cv_32.ok());
EXPECT_TRUE(cv_32->IsSigned());
EXPECT_EQ(cv_32->GetBitwidth(), 32);
EXPECT_EQ(cv_32->GetSignedValue(), -2000000000);
auto cv_u32 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint32_t>(3000000000)));
EXPECT_TRUE(cv_u32.ok());
EXPECT_FALSE(cv_u32->IsSigned());
EXPECT_EQ(cv_u32->GetBitwidth(), 32);
EXPECT_EQ(cv_u32->GetUnsignedValue(), 3000000000);
auto cv_64 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<int64_t>(3000000000)));
EXPECT_TRUE(cv_64.ok());
EXPECT_TRUE(cv_64->IsSigned());
EXPECT_EQ(cv_64->GetBitwidth(), 64);
EXPECT_EQ(cv_64->GetSignedValue(), 3000000000);
auto cv_u64 = ConstantValue::FromLiteral(
LiteralUtil::CreateR0(static_cast<uint64_t>(6000000000)));
EXPECT_TRUE(cv_u64.ok());
EXPECT_FALSE(cv_u64->IsSigned());
EXPECT_EQ(cv_u64->GetBitwidth(), 64);
EXPECT_EQ(cv_u64->GetUnsignedValue(), 6000000000);
}
TEST_F(ConstantValueTest, Add) {
ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(1, 23);
ConstantValue result = lhs.add(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(8388600, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388607);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetSignedValue(), -6);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(-1, 23);
result = lhs.add(rhs);
EXPECT_EQ(result.GetSignedValue(), 4194303);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Sub) {
ConstantValue lhs = ConstantValue::GetUnsigned(8388607, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(1, 23);
ConstantValue result = lhs.sub(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388606);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388607);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetSignedValue(), -14);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(1, 23);
result = lhs.sub(rhs);
EXPECT_EQ(result.GetSignedValue(), 4194303);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Div) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.div(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 2);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(4, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetSignedValue(), -2);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
result = lhs.div(rhs);
EXPECT_EQ(result.GetSignedValue(), -2097152);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Mod) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.mod(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(6, 23);
rhs = ConstantValue::GetUnsigned(7, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 6);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), -1);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(1, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, Mul) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
ConstantValue result = lhs.mul(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 4418);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetUnsigned(8388607, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
result = lhs.mul(rhs);
EXPECT_EQ(result.GetUnsignedValue(), 8388606);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_FALSE(result.IsSigned());
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
result = lhs.mul(rhs);
EXPECT_EQ(result.GetSignedValue(), -30);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
result = lhs.mod(rhs);
EXPECT_EQ(result.GetSignedValue(), 0);
EXPECT_EQ(result.GetBitwidth(), 23);
EXPECT_TRUE(result.IsSigned());
}
TEST_F(ConstantValueTest, LtGtEq) {
ConstantValue lhs = ConstantValue::GetUnsigned(94, 23);
ConstantValue rhs = ConstantValue::GetUnsigned(47, 23);
EXPECT_FALSE(lhs.lt(rhs));
EXPECT_TRUE(lhs.gt(rhs));
lhs = ConstantValue::GetUnsigned(8388607, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
EXPECT_FALSE(lhs.lt(rhs));
EXPECT_TRUE(lhs.gt(rhs));
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(3, 23);
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
EXPECT_TRUE(lhs.lt(rhs));
EXPECT_FALSE(lhs.gt(rhs));
lhs = ConstantValue::GetUnsigned(43, 23);
rhs = ConstantValue::GetUnsigned(43, 23);
EXPECT_TRUE(lhs.eq(rhs));
EXPECT_TRUE(rhs.eq(lhs));
lhs = ConstantValue::GetSigned(-10, 23);
rhs = ConstantValue::GetSigned(-10, 23);
EXPECT_TRUE(lhs.eq(rhs));
EXPECT_TRUE(rhs.eq(lhs));
lhs = ConstantValue::GetUnsigned(4194304, 23);
rhs = ConstantValue::GetUnsigned(2, 23);
EXPECT_FALSE(lhs.eq(rhs));
EXPECT_FALSE(rhs.eq(lhs));
lhs = ConstantValue::GetSigned(-4194304, 23);
rhs = ConstantValue::GetSigned(2, 23);
EXPECT_FALSE(lhs.eq(rhs));
EXPECT_FALSE(rhs.eq(lhs));
}
}
} |
250 | #ifndef TENSORFLOW_COMPILER_TF2XLA_TF2XLA_H_
#define TENSORFLOW_COMPILER_TF2XLA_TF2XLA_H_
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "xla/client/client.h"
#include "xla/client/xla_computation.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
Status ConvertGraphDefToXla(GraphDef graph_def, const tf2xla::Config& config,
xla::Client* client,
xla::XlaComputation* computation);
Status ConvertGraphDefToXlaViaMlir(
GraphDef graph_def, const tf2xla::Config& config,
xla::XlaComputation* computation, absl::string_view debug_info_filename,
absl::string_view debug_info_path_begin_marker);
}
#endif
#include "tensorflow/compiler/tf2xla/tf2xla.h"
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/aot/aot_only_var_handle_op.h"
#include "tensorflow/compiler/tf2xla/graph_compiler_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/xla_computation.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
Status ConvertGraphToXla(std::unique_ptr<Graph> graph,
const tf2xla::Config& config, xla::Client* client,
xla::XlaComputation* computation) {
XlaOpRegistry::RegisterCompilationKernels();
for (Node* node : graph->nodes()) {
node->set_assigned_device_name(
absl::StrCat("/device:", DEVICE_CPU_XLA_JIT));
}
std::vector<XlaCompiler::Argument> xla_args;
TF_RETURN_IF_ERROR(CreateXlaArgs(*graph, &xla_args));
PopulateXlaArgs(config, &xla_args);
XlaCompiler::Options compiler_options;
compiler_options.client = client;
compiler_options.device_type = DeviceType(DEVICE_CPU_XLA_JIT);
compiler_options.flib_def = &graph->flib_def();
compiler_options.graph_def_version = graph->versions().producer();
compiler_options.allow_cpu_custom_calls = true;
XlaCompiler compiler(compiler_options);
XlaCompiler::CompilationResult result;
XlaCompiler::CompileOptions options;
options.alias_resource_update = true;
TF_RETURN_IF_ERROR(compiler.CompileGraph(
options, "tfcompile", std::move(graph), xla_args, &result));
*computation = std::move(*result.computation);
int num_const_results = 0;
for (int i = 0, end = result.outputs.size(); i < end; ++i) {
if (result.outputs[i].is_constant) {
++num_const_results;
LOG(ERROR) << "ConstRetVal index:" << i
<< " value:" << result.outputs[i].constant_value.DebugString();
}
}
if (num_const_results > 0) {
return errors::Unimplemented(
"Conversion from TensorFlow graph to XLA resulted in ",
num_const_results,
" constant results. The configuration of "
"the output args (i.e. fetch ids) is probably wrong.");
}
{
std::vector<bool> updated_inputs(xla_args.size());
for (const XlaCompiler::ResourceUpdate& update : result.resource_updates) {
updated_inputs[update.input_index] = true;
}
int64_t input_index = xla_args.size() - config.variable_size();
for (const tf2xla::Variable& variable : config.variable()) {
if (variable.readonly() == updated_inputs[input_index]) {
return errors::InvalidArgument(
"Variable \"", variable.node_name(), "\" is marked as ",
variable.readonly() ? "" : "not ", "readonly, but is ",
updated_inputs[input_index] ? "" : "not ",
"modified by the computation.");
}
++input_index;
}
}
return absl::OkStatus();
}
Status ConvertVarHandlesToAotVarHandles(GraphDef* graph_def) {
auto update_var_handle_op_node = [](NodeDef& node) -> Status {
if (node.op() == "VarHandleOp") {
node.set_op(tfcompile::kXlaAotOnlyVarHandleOp);
const auto& it = node.attr().find("allowed_devices");
if (it != node.attr().end()) {
if (!it->second.list().s().empty()) {
return errors::InvalidArgument(
"VarHandleOp with non-empty allowed devices is not supported.");
}
node.mutable_attr()->erase("allowed_devices");
}
}
return absl::OkStatus();
};
for (auto& node : *graph_def->mutable_node()) {
TF_RETURN_IF_ERROR(update_var_handle_op_node(node));
}
for (auto& fn : *graph_def->mutable_library()->mutable_function()) {
for (auto& node : *fn.mutable_node_def()) {
TF_RETURN_IF_ERROR(update_var_handle_op_node(node));
}
}
return absl::OkStatus();
}
}
Status ConvertGraphDefToXla(GraphDef graph_def, const tf2xla::Config& config,
xla::Client* client,
xla::XlaComputation* computation) {
std::unique_ptr<Graph> graph;
TF_RETURN_IF_ERROR(ConvertVarHandlesToAotVarHandles(&graph_def));
TF_RETURN_IF_ERROR(InitGraph(graph_def, config, &graph));
TF_RETURN_IF_ERROR(
ConvertGraphToXla(std::move(graph), config, client, computation));
return absl::OkStatus();
}
} | #include "tensorflow/compiler/tf2xla/tf2xla.h"
#include <vector>
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_computation.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace {
class ConvertGraphDefToXlaWithTF32Disabled : public ::testing::Test {
public:
ConvertGraphDefToXlaWithTF32Disabled() {
tsl::enable_tensor_float_32_execution(false);
}
~ConvertGraphDefToXlaWithTF32Disabled() override {
tsl::enable_tensor_float_32_execution(true);
}
};
AttrValue TypeAttrValue(DataType type) {
AttrValue attr_value;
SetAttrValue(type, &attr_value);
return attr_value;
}
AttrValue StringAttrValue(StringPiece str) {
AttrValue attr_value;
SetAttrValue(str, &attr_value);
return attr_value;
}
AttrValue IntAttrValue(int i) {
AttrValue attr_value;
SetAttrValue(i, &attr_value);
return attr_value;
}
AttrValue IntVectorAttrValue(const std::vector<int>& ints) {
AttrValue attr_value;
SetAttrValue(ints, &attr_value);
return attr_value;
}
TensorShapeProto TensorShape(const std::vector<int>& dims) {
TensorShapeProto shape;
for (int i = 0; i < dims.size(); ++i) {
shape.add_dim();
shape.mutable_dim(i)->set_size(dims[i]);
}
return shape;
}
GraphDef SumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* sum = graph_def.add_node();
sum->set_name("sum");
sum->set_op("Add");
sum->add_input("x");
sum->add_input("y");
(*sum->mutable_attr())["T"] = TypeAttrValue(DT_INT32);
return graph_def;
}
tf2xla::Config SumConfig() {
tf2xla::Config config;
config.add_feed()->mutable_id()->set_node_name("x");
config.add_feed()->mutable_id()->set_node_name("y");
config.add_fetch()->mutable_id()->set_node_name("sum");
return config;
}
TEST(ConvertGraphDefToXla, Sum) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
auto x_literal = xla::LiteralUtil::CreateR0<int32>(10);
auto y_literal = xla::LiteralUtil::CreateR0<int32>(32);
auto x_global_or = client->TransferToServer(x_literal);
auto y_global_or = client->TransferToServer(y_literal);
TF_EXPECT_OK(x_global_or.status());
TF_EXPECT_OK(y_global_or.status());
std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value());
std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value());
auto result_or =
client->ExecuteAndTransfer(computation, {x_global.get(), y_global.get()});
TF_EXPECT_OK(result_or.status());
xla::Literal result = std::move(result_or.value());
EXPECT_EQ("(\ns32[] 42\n)", result.ToString());
config.mutable_feed(0)->mutable_id()->set_output_index(
123);
EXPECT_TRUE(errors::IsInvalidArgument(
ConvertGraphDefToXla(graph_def, config, client, &computation)));
}
GraphDef EinsumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* einsum = graph_def.add_node();
einsum->set_name("einsum");
einsum->set_op("Einsum");
einsum->add_input("x");
einsum->add_input("y");
(*einsum->mutable_attr())["equation"] = StringAttrValue("ij,jk->ik");
(*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT);
(*einsum->mutable_attr())["N"] = IntAttrValue(2);
return graph_def;
}
tf2xla::Config EinsumConfig() {
tf2xla::Config config;
tf2xla::Feed* x_feed = config.add_feed();
x_feed->mutable_id()->set_node_name("x");
*x_feed->mutable_shape() = TensorShape({2, 2});
tf2xla::Feed* y_feed = config.add_feed();
y_feed->mutable_id()->set_node_name("y");
*y_feed->mutable_shape() = TensorShape({2, 2});
config.add_fetch()->mutable_id()->set_node_name("einsum");
return config;
}
TEST(ConvertGraphDefToXla, EinsumIsConvertedToDotWithDefaultPrecision) {
GraphDef graph_def = EinsumGraph();
tf2xla::Config config = EinsumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_dots = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "dot") {
num_dots++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::DEFAULT);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::DEFAULT);
}
}
}
EXPECT_EQ(num_dots, 1);
}
TEST_F(ConvertGraphDefToXlaWithTF32Disabled,
EinsumIsConvertedToDotWithHighestPrecision) {
GraphDef graph_def = EinsumGraph();
tf2xla::Config config = EinsumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_dots = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "dot") {
num_dots++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::HIGHEST);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::HIGHEST);
}
}
}
EXPECT_EQ(num_dots, 1);
}
GraphDef Conv2DGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* einsum = graph_def.add_node();
einsum->set_name("conv2d");
einsum->set_op("Conv2D");
einsum->add_input("x");
einsum->add_input("y");
(*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT);
(*einsum->mutable_attr())["padding"] = StringAttrValue("VALID");
(*einsum->mutable_attr())["strides"] = IntVectorAttrValue({1, 1, 1, 1});
return graph_def;
}
tf2xla::Config Conv2DConfig() {
tf2xla::Config config;
tf2xla::Feed* x_feed = config.add_feed();
x_feed->mutable_id()->set_node_name("x");
*x_feed->mutable_shape() = TensorShape({1, 1, 2, 2});
tf2xla::Feed* y_feed = config.add_feed();
y_feed->mutable_id()->set_node_name("y");
*y_feed->mutable_shape() = TensorShape({1, 1, 2, 2});
config.add_fetch()->mutable_id()->set_node_name("conv2d");
return config;
}
TEST(ConvertGraphDefToXla, Conv2DIsConvertedToConvolutionWithDefaultPrecision) {
GraphDef graph_def = Conv2DGraph();
tf2xla::Config config = Conv2DConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_convolutions = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "convolution") {
num_convolutions++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::DEFAULT);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::DEFAULT);
}
}
}
EXPECT_EQ(num_convolutions, 1);
}
TEST_F(ConvertGraphDefToXlaWithTF32Disabled,
Conv2DIsConvertedToConvolutionWithHighestPrecision) {
GraphDef graph_def = Conv2DGraph();
tf2xla::Config config = Conv2DConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_convolutions = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "convolution") {
num_convolutions++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::HIGHEST);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::HIGHEST);
}
}
}
EXPECT_EQ(num_convolutions, 1);
}
TEST(ConvertGraphDefToXla, SumWithUnusedArgument) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
NodeDef* unused = graph_def.add_node();
unused->set_name("unused");
unused->set_op("Placeholder");
(*unused->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
config.add_feed()->mutable_id()->set_node_name("unused");
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
auto x_literal = xla::LiteralUtil::CreateR0<int32>(10);
auto y_literal = xla::LiteralUtil::CreateR0<int32>(32);
auto x_global_or = client->TransferToServer(x_literal);
auto y_global_or = client->TransferToServer(y_literal);
auto unused_global_or = client->TransferToServer(y_literal);
TF_EXPECT_OK(x_global_or.status());
TF_EXPECT_OK(y_global_or.status());
TF_EXPECT_OK(unused_global_or.status());
std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value());
std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value());
std::unique_ptr<xla::GlobalData> unused_global =
std::move(unused_global_or.value());
auto result_or = client->ExecuteAndTransfer(
computation, {x_global.get(), y_global.get(), unused_global.get()});
TF_EXPECT_OK(result_or.status());
xla::Literal result = std::move(result_or.value());
EXPECT_EQ("(\ns32[] 42\n)", result.ToString());
}
}
} |
251 | #ifndef TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_H_
#define TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_H_
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bad_indices_policy.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
class OpKernelContext;
class Tensor;
namespace functor {
template <typename Device, typename T, typename Index, int IXDIM>
struct GatherNdSlice {
Index operator()(const Device& d, const Index slice_size,
typename TTypes<int32>::Scalar Tscratch,
typename TTypes<T, IXDIM + 1>::ConstTensor Tparams,
typename TTypes<Index>::ConstMatrix Tindices,
typename TTypes<T>::Matrix Tout);
};
template <typename Device, typename T, typename Index>
Status DoGatherNd(
OpKernelContext* c, const Tensor& params, const Tensor& indices,
Tensor* out,
BadIndicesPolicy bad_indices_policy = BadIndicesPolicy::kDefault) {
if (!TensorShapeUtils::IsVectorOrHigher(params.shape())) {
return errors::InvalidArgument("params must be at least a vector");
}
if (!TensorShapeUtils::IsVectorOrHigher(indices.shape())) {
return errors::InvalidArgument("indices must be at least a vector");
}
if (indices.dim_size(indices.dims() - 1) > params.dims()) {
return errors::InvalidArgument(
"index innermost dimension length must be <= params rank; saw: ",
indices.dim_size(indices.dims() - 1), " vs. ", params.dims());
}
const TensorShape& indices_shape(indices.shape());
const int64_t indices_nd = indices_shape.dim_size(indices_shape.dims() - 1);
int64_t N_big = 1;
for (int i = 0; i < indices_shape.dims() - 1; ++i) {
N_big *= indices_shape.dim_size(i);
}
if (N_big > std::numeric_limits<int>::max()) {
return errors::InvalidArgument(
"indices has too many elements for int indexing: ", N_big, " > ",
std::numeric_limits<int>::max());
}
if (params.NumElements() > std::numeric_limits<Index>::max()) {
return errors::InvalidArgument("params.NumElements() too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params.NumElements(), " > ",
std::numeric_limits<Index>::max());
}
Index N_result = 1;
for (int i = 0; i < indices_shape.dims() - 1; ++i) {
N_result *= indices_shape.dim_size(i);
}
const TensorShape& params_shape(params.shape());
Index total_nd = params_shape.dims();
TensorShape result_shape(indices_shape);
result_shape.RemoveLastDims(1);
int64_t slice_size_big = 1;
for (Index i = indices_nd; i < total_nd; ++i) {
slice_size_big *= params_shape.dim_size(i);
TF_RETURN_IF_ERROR(result_shape.AddDimWithStatus(params_shape.dim_size(i)));
}
if (slice_size_big > std::numeric_limits<Index>::max()) {
return errors::InvalidArgument(
"slice size is too large for indexing: ", slice_size_big, " > ",
std::numeric_limits<Index>::max());
}
const Index slice_size = static_cast<Index>(slice_size_big);
TF_RETURN_IF_ERROR(
c->allocate_temp(DataTypeToEnum<T>::value, result_shape, out));
if (N_result > 0) {
if (params_shape.num_elements() == 0) {
return errors::InvalidArgument(
"Requested more than 0 entries, but "
"params is empty. Params shape: ",
params_shape.DebugString());
}
auto indices_mat = indices.flat_inner_dims<Index>();
Index bad_i = -1;
auto out_mat = out->shaped<T, 2>({N_result, slice_size});
Tensor scratch;
TF_RETURN_IF_ERROR(c->allocate_temp(DT_INT32, TensorShape(), &scratch));
auto scratch_scalar = scratch.scalar<int32>();
switch (indices_nd) {
#define PARAMS_CASE(IXDIM) \
case IXDIM: { \
functor::GatherNdSlice<Device, T, Index, IXDIM> func; \
auto params_flat = params.flat_outer_dims<T, IXDIM + 1>(); \
bad_i = func(c->eigen_device<Device>(), slice_size, scratch_scalar, \
params_flat, indices_mat, out_mat); \
} break
PARAMS_CASE(0);
PARAMS_CASE(1);
PARAMS_CASE(2);
PARAMS_CASE(3);
PARAMS_CASE(4);
PARAMS_CASE(5);
PARAMS_CASE(6);
PARAMS_CASE(7);
#undef PARAMS_CASE
default:
return errors::InvalidArgument(
"Only indices.shape[-1] values between 1 and 7 "
"are currently supported. Requested rank: ",
indices_nd);
}
using CPUDevice = Eigen::ThreadPoolDevice;
const bool check_bad_indices =
((std::is_same<Device, CPUDevice>::value &&
bad_indices_policy == BadIndicesPolicy::kDefault) ||
bad_indices_policy == BadIndicesPolicy::kError);
if (check_bad_indices && bad_i >= 0) {
auto shape = indices.shape();
shape.RemoveLastDims(1);
return errors::InvalidArgument(
"indices", SliceDebugString(shape, bad_i), " = [",
str_util::Join(
gtl::ArraySlice<Index>(&indices_mat(bad_i, 0), indices_nd), ", "),
"] does not index into param shape ", params.shape().DebugString(),
", node name: ", c->op_kernel().name());
}
}
return absl::OkStatus();
}
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/gather_nd_op.h"
#include <string>
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/bad_indices_policy.h"
namespace tensorflow {
namespace {
constexpr char kBadIndicesPolicyAtrr[] = "bad_indices_policy";
}
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T, typename Index>
class GatherNdOp : public OpKernel {
public:
explicit GatherNdOp(OpKernelConstruction* c) : OpKernel(c) {
const DataType dt = DataTypeToEnum<T>::v();
const DataType index_t = DataTypeToEnum<Index>::v();
OP_REQUIRES_OK(c, c->MatchSignature({dt, index_t}, {dt}));
if (c->HasAttr(kBadIndicesPolicyAtrr)) {
std::string bad_indices_policy_str;
OP_REQUIRES_OK(
c, c->GetAttr(kBadIndicesPolicyAtrr, &bad_indices_policy_str));
absl::StatusOr<BadIndicesPolicy> bad_indices_policy =
BadIndicesPolicyFromString(bad_indices_policy_str);
OP_REQUIRES_OK(c, bad_indices_policy.status());
bad_indices_policy_ = *bad_indices_policy;
}
}
void Compute(OpKernelContext* c) override {
const Tensor& params = c->input(0);
const Tensor& indices = c->input(1);
Tensor out;
OP_REQUIRES_OK(c, functor::DoGatherNd<Device, T, Index>(
c, params, indices, &out, bad_indices_policy_));
c->set_output(0, out);
}
private:
BadIndicesPolicy bad_indices_policy_ = BadIndicesPolicy::kDefault;
};
#define REGISTER_GATHER_ND_FULL(dev, type, index_type) \
REGISTER_KERNEL_BUILDER( \
Name("GatherNd") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("Tparams") \
.TypeConstraint<index_type>("Tindices") \
.AttrConstraint<std::string>( \
"bad_indices_policy", \
{"", "DEFAULT", "ERROR", "IGNORE"}), \
GatherNdOp<dev##Device, type, index_type>)
#define REGISTER_GATHER_ND_CPU(type) \
REGISTER_GATHER_ND_FULL(CPU, type, int16); \
REGISTER_GATHER_ND_FULL(CPU, type, int32); \
REGISTER_GATHER_ND_FULL(CPU, type, int64_t)
TF_CALL_ALL_TYPES(REGISTER_GATHER_ND_CPU);
TF_CALL_QUANTIZED_TYPES(REGISTER_GATHER_ND_CPU);
TF_CALL_float8_e5m2(REGISTER_GATHER_ND_CPU);
TF_CALL_float8_e4m3fn(REGISTER_GATHER_ND_CPU);
#undef REGISTER_GATHER_ND_CPU
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
#define DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, NDIM) \
template <> \
Index GatherNdSlice<GPUDevice, T, Index, NDIM>::operator()( \
const GPUDevice& d, const Index slice_size, \
typename TTypes<int32>::Scalar Tscratch, \
typename TTypes<T, NDIM + 1>::ConstTensor Tparams, \
typename TTypes<Index>::ConstMatrix Tindices, \
typename TTypes<T>::Matrix Tout); \
extern template struct GatherNdSlice<GPUDevice, T, Index, NDIM>;
#define DECLARE_GPU_SPECS_INDEX(T, Index) \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 0); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 1); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 2); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 3); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 4); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 5); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 6); \
DECLARE_GPU_SPECS_INDEX_NDIM(T, Index, 7);
#define DECLARE_GPU_SPECS(T) \
DECLARE_GPU_SPECS_INDEX(T, int32); \
DECLARE_GPU_SPECS_INDEX(T, int64_t)
TF_CALL_int32(DECLARE_GPU_SPECS);
TF_CALL_int64(DECLARE_GPU_SPECS);
TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPECS);
TF_CALL_COMPLEX_TYPES(DECLARE_GPU_SPECS);
#undef DECLARE_GPU_SPECS
#undef DECLARE_GPU_SPECS_INDEX
}
#undef REGISTER_GATHER_ND_FULL
#define REGISTER_GATHER_ND_FULL(dev, type, index_type) \
REGISTER_KERNEL_BUILDER( \
Name("GatherNd") \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("Tparams") \
.TypeConstraint<index_type>("Tindices") \
.AttrConstraint<std::string>("bad_indices_policy", \
{"", "DEFAULT", "IGNORE"}), \
GatherNdOp<dev##Device, type, index_type>)
#define REGISTER_GATHER_ND_GPU(type) \
REGISTER_GATHER_ND_FULL(GPU, type, int32); \
REGISTER_GATHER_ND_FULL(GPU, type, int64_t)
TF_CALL_int32(REGISTER_GATHER_ND_GPU);
TF_CALL_int64(REGISTER_GATHER_ND_GPU);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GATHER_ND_GPU);
TF_CALL_COMPLEX_TYPES(REGISTER_GATHER_ND_GPU);
#undef REGISTER_GATHER_ND_GPU
#endif
#undef REGISTER_GATHER_ND_FULL
} | #include <functional>
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace test {
namespace graph {
class Node* GatherNd(Graph* g, class Node* in0, class Node* in1) {
class Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "GatherNd")
.Input(in0)
.Input(in1)
.Finalize(g, &ret));
return ret;
}
}
}
namespace {
class GatherNdOpTest : public OpsTestBase {
protected:
void MakeOp(DataType param_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(param_type))
.Input(FakeInput(index_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(GatherNdOpTest, Simple) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected, {8, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(GatherNdOpTest, Error_OutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 5});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.message(), "indices[1] = [5] does not index into param shape [5]"))
<< s.message();
}
TEST_F(GatherNdOpTest, Quantized_UINT8) {
MakeOp(DT_QUINT8, DT_INT32);
AddInputFromArray<quint8>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({2}));
test::FillValues<quint8>(&expected, {8, 4});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
}
TEST_F(GatherNdOpTest, Quantized_INT8) {
MakeOp(DT_QINT8, DT_INT32);
AddInputFromArray<qint8>(TensorShape({5}), {0, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({2, 1}), {3, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({2}));
test::FillValues<qint8>(&expected, {8, 4});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
}
class GatherNdOpIgnoreBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType param_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(param_type))
.Input(FakeInput(index_type))
.Attr("bad_indices_policy", "IGNORE")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(GatherNdOpIgnoreBadIndicesTest, IgnoreOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {9, 1, 2, 8, 4});
AddInputFromArray<int32>(TensorShape({3, 1}), {3, 5, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {8, 0, 1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class GatherNdOpConstructionTest : public OpsTestBase {};
TEST_F(GatherNdOpConstructionTest, Error_BadIndicesPolicyInvalid) {
TF_ASSERT_OK(NodeDefBuilder("myop", "GatherNd")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY")
.Finalize(node_def()));
EXPECT_NE(InitOp(), absl::OkStatus());
}
constexpr int kLookups = 2000;
template <typename Index>
static Graph* GatherNd(int dim) {
Graph* g = new Graph(OpRegistry::Global());
Tensor params(DT_FLOAT, TensorShape({dim, 8, 16, 32}));
params.flat<float>().setRandom();
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
Tensor indices(DataTypeToEnum<Index>::value, TensorShape({kLookups, 4}));
auto indices_mat = indices.matrix<Index>();
for (int i = 0; i < kLookups; i++) {
indices_mat(i, 0) = rnd.Uniform(dim);
indices_mat(i, 1) = rnd.Uniform(8);
indices_mat(i, 2) = rnd.Uniform(16);
indices_mat(i, 3) = rnd.Uniform(32);
}
test::graph::GatherNd(g, test::graph::Constant(g, params),
test::graph::Constant(g, indices));
return g;
}
#define BM_GATHER_ND(DEVICE, INDEX) \
static void BM_##DEVICE##_gather_nd_##INDEX( \
::testing::benchmark::State& state) { \
const int dim = state.range(0); \
test::Benchmark(#DEVICE, GatherNd<INDEX>(dim), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * kLookups * 4; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(float)); \
} \
BENCHMARK(BM_##DEVICE##_gather_nd_##INDEX) \
->UseRealTime() \
->Arg(10) \
->Arg(100) \
->Arg(1000) \
->Arg(10000)
BM_GATHER_ND(cpu, int32);
BM_GATHER_ND(gpu, int32);
BM_GATHER_ND(cpu, int64_t);
BM_GATHER_ND(gpu, int64_t);
}
} |
252 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESIZE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESIZE_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewResizeNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/resize.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Resize : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const Resize2DAttributes&>(ctx.op_attr);
if (ctx.input_shapes[0][2] > ctx.output_shapes[0][2] ||
ctx.input_shapes[0][1] > ctx.output_shapes[0][1]) {
return absl::UnimplementedError(
"Downsampling is currently not supported by the resize op on GPU.");
}
if (ctx.output_shapes[0][2] != attr.new_shape.w ||
ctx.output_shapes[0][1] != attr.new_shape.h) {
return absl::InvalidArgumentError(
"Output size does not match new_size in attributes.");
}
if (ctx.input_shapes[0][3] != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError("Input/output channels mismatch.");
}
if (ctx.input_shapes[0][1] == 1 && ctx.input_shapes[0][2] == 1) {
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
"value_0 = $input_data_0[0, 0, gid.z]$;",
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
std::vector<Variable> parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"scale_factor",
float2(CalculateResizeScale(ctx.input_shapes[0][2],
ctx.output_shapes[0][2], attr),
CalculateResizeScale(ctx.input_shapes[0][1],
ctx.output_shapes[0][1], attr))},
};
std::string source;
if (attr.type == SamplingType::BILINEAR) {
if (attr.half_pixel_centers) {
source = "vec2 coord = (vec2(gid.xy) + 0.5) * $scale_factor$ - 0.5;";
} else {
source = "vec2 coord = vec2(gid.xy) * $scale_factor$;";
}
source += R"(
vec2 coord_floor = floor(coord);
ivec2 icoord_floor = ivec2(coord_floor);
ivec2 borders = ivec2($input_data_0_w$, $input_data_0_h$) - ivec2(1, 1);
ivec4 st;
st.xy = max(icoord_floor, ivec2(0, 0));
st.zw = min(icoord_floor + ivec2(1, 1), borders);
vec2 t = coord - coord_floor;
vec4 tex11 = $input_data_0[st.x, st.y, gid.z]$;
vec4 tex21 = $input_data_0[st.z, st.y, gid.z]$;
vec4 tex12 = $input_data_0[st.x, st.w, gid.z]$;
vec4 tex22 = $input_data_0[st.z, st.w, gid.z]$;
value_0 = mix(mix(tex11, tex21, t.x), mix(tex12, tex22, t.x), t.y);)";
} else if (attr.type == SamplingType::NEAREST) {
std::string fxc;
std::string fyc;
if (attr.half_pixel_centers) {
fxc = "(float(gid.x) + 0.5) * $scale_factor.x$";
fyc = "(float(gid.y) + 0.5) * $scale_factor.y$";
} else {
fxc = "float(gid.x) * $scale_factor.x$";
fyc = "float(gid.y) * $scale_factor.y$";
}
if (attr.align_corners) {
fxc += " + 0.5";
fyc += " + 0.5";
}
source += " ivec2 coord;\n";
source += " coord.x = int(" + fxc + ");\n";
source += " coord.y = int(" + fyc + ");\n";
source += " coord.x = max(0, coord.x);\n";
source += " coord.y = max(0, coord.y);\n";
source += " coord.x = min(coord.x, $input_data_0_w$ - 1);\n";
source += " coord.y = min(coord.y, $input_data_0_h$ - 1);\n";
source += R"(
value_0 = $input_data_0[coord.x, coord.y, gid.z]$;
)";
} else {
return absl::InvalidArgumentError("Unknown sampling type");
}
*generated_code = {
std::move(parameters),
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewResizeNodeShader() {
return std::make_unique<Resize>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/resize.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(ResizeTest, Bilinear1x1x2To2x2x2) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 1, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 2, 2, 2);
Resize2DAttributes attr;
attr.align_corners = true;
attr.new_shape = HW(2, 2);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0}));
}
TEST(ResizeTest, Bilinear1x2x1To1x4x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 1, 4, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.new_shape = HW(1, 4);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 4.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.5, 4.0, 4.0}));
}
TEST(ResizeTest, Bilinear2x2x1To4x4x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 4, 4, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.new_shape = HW(4, 4);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 4.0, 6.0, 8.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 2.5, 4.0, 4.0, 3.5, 4.75, 6.0, 6.0, 6.0,
7.0, 8.0, 8.0, 6.0, 7.0, 8.0, 8.0}));
}
TEST(ResizeTest, Bilinear2x2x1To3x3x1WithoutHalfPixel) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.half_pixel_centers = false;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 1.666666, 2.0, 2.333333, 3.0,
3.333333, 3.0, 3.666666, 4.0}));
}
TEST(ResizeTest, Bilinear2x2x1To3x3x1WithHalfPixel) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.half_pixel_centers = true;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::BILINEAR;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{1.0, 1.5, 2.0, 2.0, 2.5, 3.0, 3.0, 3.5, 4.0}));
}
TEST(ResizeTest, Nearest1x2x1To2x4x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 1, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 4, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.new_shape = HW(2, 4);
attr.type = SamplingType::NEAREST;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0}));
}
TEST(ResizeTest, NearestAlignCorners) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = true;
attr.half_pixel_centers = false;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::NEAREST;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {3.0f, 6.0f, 9.0f, 12.0f}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {3.0f, 6.0f, 6.0f, 9.0f, 12.0f, 12.0f,
9.0f, 12.0f, 12.0f}));
}
TEST(ResizeTest, NearestHalfPixelCenters) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 3, 3, 1);
Resize2DAttributes attr;
attr.align_corners = false;
attr.half_pixel_centers = true;
attr.new_shape = HW(3, 3);
attr.type = SamplingType::NEAREST;
SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {3.0f, 6.0f, 9.0f, 12.0f}));
ASSERT_OK(model.Invoke(*NewResizeNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {3.0f, 6.0f, 6.0f, 9.0f, 12.0f, 12.0f,
9.0f, 12.0f, 12.0f}));
}
}
}
}
} |
253 | #ifndef ABSL_LOG_INTERNAL_FLAGS_H_
#define ABSL_LOG_INTERNAL_FLAGS_H_
#include <string>
#include "absl/flags/declare.h"
ABSL_DECLARE_FLAG(int, stderrthreshold);
ABSL_DECLARE_FLAG(int, minloglevel);
ABSL_DECLARE_FLAG(std::string, log_backtrace_at);
ABSL_DECLARE_FLAG(bool, log_prefix);
ABSL_DECLARE_FLAG(int, v);
ABSL_DECLARE_FLAG(std::string, vmodule);
#endif
#include "absl/log/internal/flags.h"
#include <stddef.h>
#include <algorithm>
#include <cstdlib>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/flags/flag.h"
#include "absl/flags/marshalling.h"
#include "absl/log/globals.h"
#include "absl/log/internal/config.h"
#include "absl/log/internal/vlog_config.h"
#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
namespace {
void SyncLoggingFlags() {
absl::SetFlag(&FLAGS_minloglevel, static_cast<int>(absl::MinLogLevel()));
absl::SetFlag(&FLAGS_log_prefix, absl::ShouldPrependLogPrefix());
}
bool RegisterSyncLoggingFlags() {
log_internal::SetLoggingGlobalsListener(&SyncLoggingFlags);
return true;
}
ABSL_ATTRIBUTE_UNUSED const bool unused = RegisterSyncLoggingFlags();
template <typename T>
T GetFromEnv(const char* varname, T dflt) {
const char* val = ::getenv(varname);
if (val != nullptr) {
std::string err;
ABSL_INTERNAL_CHECK(absl::ParseFlag(val, &dflt, &err), err.c_str());
}
return dflt;
}
constexpr absl::LogSeverityAtLeast StderrThresholdDefault() {
return absl::LogSeverityAtLeast::kError;
}
}
}
ABSL_NAMESPACE_END
}
ABSL_FLAG(int, stderrthreshold,
static_cast<int>(absl::log_internal::StderrThresholdDefault()),
"Log messages at or above this threshold level are copied to stderr.")
.OnUpdate([] {
absl::log_internal::RawSetStderrThreshold(
static_cast<absl::LogSeverityAtLeast>(
absl::GetFlag(FLAGS_stderrthreshold)));
});
ABSL_FLAG(int, minloglevel, static_cast<int>(absl::LogSeverityAtLeast::kInfo),
"Messages logged at a lower level than this don't actually "
"get logged anywhere")
.OnUpdate([] {
absl::log_internal::RawSetMinLogLevel(
static_cast<absl::LogSeverityAtLeast>(
absl::GetFlag(FLAGS_minloglevel)));
});
ABSL_FLAG(std::string, log_backtrace_at, "",
"Emit a backtrace when logging at file:linenum.")
.OnUpdate([] {
const std::string log_backtrace_at =
absl::GetFlag(FLAGS_log_backtrace_at);
if (log_backtrace_at.empty()) {
absl::ClearLogBacktraceLocation();
return;
}
const size_t last_colon = log_backtrace_at.rfind(':');
if (last_colon == log_backtrace_at.npos) {
absl::ClearLogBacktraceLocation();
return;
}
const absl::string_view file =
absl::string_view(log_backtrace_at).substr(0, last_colon);
int line;
if (!absl::SimpleAtoi(
absl::string_view(log_backtrace_at).substr(last_colon + 1),
&line)) {
absl::ClearLogBacktraceLocation();
return;
}
absl::SetLogBacktraceLocation(file, line);
});
ABSL_FLAG(bool, log_prefix, true,
"Prepend the log prefix to the start of each log line")
.OnUpdate([] {
absl::log_internal::RawEnableLogPrefix(absl::GetFlag(FLAGS_log_prefix));
});
ABSL_FLAG(int, v, 0,
"Show all VLOG(m) messages for m <= this. Overridable by --vmodule.")
.OnUpdate([] {
absl::log_internal::UpdateGlobalVLogLevel(absl::GetFlag(FLAGS_v));
});
ABSL_FLAG(
std::string, vmodule, "",
"per-module log verbosity level."
" Argument is a comma-separated list of <module name>=<log level>."
" <module name> is a glob pattern, matched against the filename base"
" (that is, name ignoring .cc/.h./-inl.h)."
" A pattern without slashes matches just the file name portion, otherwise"
" the whole file path below the workspace root"
" (still without .cc/.h./-inl.h) is matched."
" ? and * in the glob pattern match any single or sequence of characters"
" respectively including slashes."
" <log level> overrides any value given by --v.")
.OnUpdate([] {
absl::log_internal::UpdateVModule(absl::GetFlag(FLAGS_vmodule));
}); | #include "absl/log/internal/flags.h"
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/log_severity.h"
#include "absl/flags/flag.h"
#include "absl/flags/reflection.h"
#include "absl/log/globals.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/internal/test_matchers.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/strings/str_cat.h"
namespace {
using ::absl::log_internal::TextMessage;
using ::testing::HasSubstr;
using ::testing::Not;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() {
return absl::LogSeverityAtLeast::kError;
}
class LogFlagsTest : public ::testing::Test {
protected:
absl::FlagSaver flag_saver_;
};
TEST_F(LogFlagsTest, DISABLED_StderrKnobsDefault) {
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
}
TEST_F(LogFlagsTest, SetStderrThreshold) {
absl::SetFlag(&FLAGS_stderrthreshold,
static_cast<int>(absl::LogSeverityAtLeast::kInfo));
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kInfo);
absl::SetFlag(&FLAGS_stderrthreshold,
static_cast<int>(absl::LogSeverityAtLeast::kError));
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
}
TEST_F(LogFlagsTest, SetMinLogLevel) {
absl::SetFlag(&FLAGS_minloglevel,
static_cast<int>(absl::LogSeverityAtLeast::kError));
EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
absl::log_internal::ScopedMinLogLevel scoped_min_log_level(
absl::LogSeverityAtLeast::kWarning);
EXPECT_EQ(absl::GetFlag(FLAGS_minloglevel),
static_cast<int>(absl::LogSeverityAtLeast::kWarning));
}
TEST_F(LogFlagsTest, PrependLogPrefix) {
absl::SetFlag(&FLAGS_log_prefix, false);
EXPECT_EQ(absl::ShouldPrependLogPrefix(), false);
absl::EnableLogPrefix(true);
EXPECT_EQ(absl::GetFlag(FLAGS_log_prefix), true);
}
TEST_F(LogFlagsTest, EmptyBacktraceAtFlag) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at, "");
LOG(INFO) << "hello world";
}
TEST_F(LogFlagsTest, BacktraceAtNonsense) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at, "gibberish");
LOG(INFO) << "hello world";
}
TEST_F(LogFlagsTest, BacktraceAtWrongFile) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("some_other_file.cc:", log_line));
do_log();
}
TEST_F(LogFlagsTest, BacktraceAtWrongLine) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("flags_test.cc:", log_line + 1));
do_log();
}
TEST_F(LogFlagsTest, BacktraceAtWholeFilename) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at, absl::StrCat(__FILE__, ":", log_line));
do_log();
}
TEST_F(LogFlagsTest, BacktraceAtNonmatchingSuffix) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("flags_test.cc:", log_line, "gibberish"));
do_log();
}
TEST_F(LogFlagsTest, LogsBacktrace) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
testing::InSequence seq;
EXPECT_CALL(test_sink, Send(TextMessage(HasSubstr("(stacktrace:"))));
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("flags_test.cc:", log_line));
do_log();
absl::SetFlag(&FLAGS_log_backtrace_at, "");
do_log();
}
} |
254 | #ifndef ABSL_DEBUGGING_LEAK_CHECK_H_
#define ABSL_DEBUGGING_LEAK_CHECK_H_
#include <cstddef>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
bool HaveLeakSanitizer();
bool LeakCheckerIsActive();
void DoIgnoreLeak(const void* ptr);
template <typename T>
T* IgnoreLeak(T* ptr) {
DoIgnoreLeak(ptr);
return ptr;
}
bool FindAndReportLeaks();
class LeakCheckDisabler {
public:
LeakCheckDisabler();
LeakCheckDisabler(const LeakCheckDisabler&) = delete;
LeakCheckDisabler& operator=(const LeakCheckDisabler&) = delete;
~LeakCheckDisabler();
};
void RegisterLivePointers(const void* ptr, size_t size);
void UnRegisterLivePointers(const void* ptr, size_t size);
ABSL_NAMESPACE_END
}
#endif
#include "absl/debugging/leak_check.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#if defined(ABSL_HAVE_LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#if ABSL_HAVE_ATTRIBUTE_WEAK
extern "C" ABSL_ATTRIBUTE_WEAK int __lsan_is_turned_off();
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
bool HaveLeakSanitizer() { return true; }
#if ABSL_HAVE_ATTRIBUTE_WEAK
bool LeakCheckerIsActive() {
return !(&__lsan_is_turned_off && __lsan_is_turned_off());
}
#else
bool LeakCheckerIsActive() { return true; }
#endif
bool FindAndReportLeaks() { return __lsan_do_recoverable_leak_check(); }
void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); }
void RegisterLivePointers(const void* ptr, size_t size) {
__lsan_register_root_region(ptr, size);
}
void UnRegisterLivePointers(const void* ptr, size_t size) {
__lsan_unregister_root_region(ptr, size);
}
LeakCheckDisabler::LeakCheckDisabler() { __lsan_disable(); }
LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); }
ABSL_NAMESPACE_END
}
#else
namespace absl {
ABSL_NAMESPACE_BEGIN
bool HaveLeakSanitizer() { return false; }
bool LeakCheckerIsActive() { return false; }
void DoIgnoreLeak(const void*) { }
void RegisterLivePointers(const void*, size_t) { }
void UnRegisterLivePointers(const void*, size_t) { }
LeakCheckDisabler::LeakCheckDisabler() = default;
LeakCheckDisabler::~LeakCheckDisabler() = default;
ABSL_NAMESPACE_END
}
#endif | #include <string>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/debugging/leak_check.h"
#include "absl/log/log.h"
namespace {
TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) {
if (!absl::LeakCheckerIsActive()) {
GTEST_SKIP() << "LeakChecker is not active";
}
auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string"));
LOG(INFO) << "Ignoring leaked string " << foo;
}
TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
if (!absl::LeakCheckerIsActive()) {
GTEST_SKIP() << "LeakChecker is not active";
}
absl::LeakCheckDisabler disabler;
auto foo = new std::string("some string leaked while checks are disabled");
LOG(INFO) << "Ignoring leaked string " << foo;
}
} |
255 | #ifndef QUICHE_QUIC_QBONE_QBONE_STREAM_H_
#define QUICHE_QUIC_QBONE_QBONE_STREAM_H_
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/platform/api/quic_export.h"
namespace quic {
class QboneSessionBase;
class QUIC_EXPORT_PRIVATE QboneWriteOnlyStream : public QuicStream {
public:
QboneWriteOnlyStream(QuicStreamId id, QuicSession* session);
void OnDataAvailable() override {}
void WritePacketToQuicStream(absl::string_view packet);
};
class QUIC_EXPORT_PRIVATE QboneReadOnlyStream : public QuicStream {
public:
QboneReadOnlyStream(QuicStreamId id, QboneSessionBase* session);
~QboneReadOnlyStream() override = default;
void OnDataAvailable() override;
private:
std::string buffer_;
QboneSessionBase* session_;
};
}
#endif
#include "quiche/quic/qbone/qbone_stream.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/qbone/qbone_constants.h"
#include "quiche/quic/qbone/qbone_session_base.h"
#include "quiche/common/platform/api/quiche_command_line_flags.h"
DEFINE_QUICHE_COMMAND_LINE_FLAG(int, qbone_stream_ttl_secs, 3,
"The QBONE Stream TTL in seconds.");
namespace quic {
QboneWriteOnlyStream::QboneWriteOnlyStream(QuicStreamId id,
QuicSession* session)
: QuicStream(id, session, false, WRITE_UNIDIRECTIONAL) {
MaybeSetTtl(QuicTime::Delta::FromSeconds(
quiche::GetQuicheCommandLineFlag(FLAGS_qbone_stream_ttl_secs)));
}
void QboneWriteOnlyStream::WritePacketToQuicStream(absl::string_view packet) {
WriteOrBufferData(packet, true, nullptr);
}
QboneReadOnlyStream::QboneReadOnlyStream(QuicStreamId id,
QboneSessionBase* session)
: QuicStream(id, session,
false, READ_UNIDIRECTIONAL),
session_(session) {
MaybeSetTtl(QuicTime::Delta::FromSeconds(
quiche::GetQuicheCommandLineFlag(FLAGS_qbone_stream_ttl_secs)));
}
void QboneReadOnlyStream::OnDataAvailable() {
sequencer()->Read(&buffer_);
if (sequencer()->IsClosed()) {
session_->ProcessPacketFromPeer(buffer_);
OnFinRead();
return;
}
if (buffer_.size() > QboneConstants::kMaxQbonePacketBytes) {
if (!rst_sent()) {
Reset(QUIC_BAD_APPLICATION_PAYLOAD);
}
StopReading();
}
}
} | #include "quiche/quic/qbone/qbone_stream.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_stream_priority.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_test_loopback.h"
#include "quiche/quic/qbone/qbone_constants.h"
#include "quiche/quic/qbone/qbone_session_base.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/mock_connection_id_generator.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quic {
namespace {
using ::testing::_;
using ::testing::StrictMock;
class MockQuicSession : public QboneSessionBase {
public:
MockQuicSession(QuicConnection* connection, const QuicConfig& config)
: QboneSessionBase(connection, nullptr , config,
CurrentSupportedVersions(), nullptr ) {}
~MockQuicSession() override {}
QuicConsumedData WritevData(QuicStreamId id, size_t write_length,
QuicStreamOffset offset, StreamSendingState state,
TransmissionType type,
EncryptionLevel level) override {
if (!writable_) {
return QuicConsumedData(0, false);
}
return QuicConsumedData(write_length, state != StreamSendingState::NO_FIN);
}
QboneReadOnlyStream* CreateIncomingStream(QuicStreamId id) override {
return nullptr;
}
MOCK_METHOD(void, MaybeSendRstStreamFrame,
(QuicStreamId stream_id, QuicResetStreamError error,
QuicStreamOffset bytes_written),
(override));
MOCK_METHOD(void, MaybeSendStopSendingFrame,
(QuicStreamId stream_id, QuicResetStreamError error), (override));
void set_writable(bool writable) { writable_ = writable; }
void RegisterReliableStream(QuicStreamId stream_id) {
write_blocked_streams()->RegisterStream(stream_id,
false,
QuicStreamPriority());
}
void ActivateReliableStream(std::unique_ptr<QuicStream> stream) {
ActivateStream(std::move(stream));
}
std::unique_ptr<QuicCryptoStream> CreateCryptoStream() override {
return std::make_unique<test::MockQuicCryptoStream>(this);
}
MOCK_METHOD(void, ProcessPacketFromPeer, (absl::string_view), (override));
MOCK_METHOD(void, ProcessPacketFromNetwork, (absl::string_view), (override));
private:
bool writable_ = true;
};
class DummyPacketWriter : public QuicPacketWriter {
public:
DummyPacketWriter() {}
WriteResult WritePacket(const char* buffer, size_t buf_len,
const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address,
PerPacketOptions* options,
const QuicPacketWriterParams& params) override {
return WriteResult(WRITE_STATUS_ERROR, 0);
}
bool IsWriteBlocked() const override { return false; };
void SetWritable() override {}
std::optional<int> MessageTooBigErrorCode() const override {
return std::nullopt;
}
QuicByteCount GetMaxPacketSize(
const QuicSocketAddress& peer_address) const override {
return 0;
}
bool SupportsReleaseTime() const override { return false; }
bool IsBatchMode() const override { return false; }
bool SupportsEcn() const override { return false; }
QuicPacketBuffer GetNextWriteLocation(
const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address) override {
return {nullptr, nullptr};
}
WriteResult Flush() override { return WriteResult(WRITE_STATUS_OK, 0); }
};
class QboneReadOnlyStreamTest : public ::testing::Test,
public QuicConnectionHelperInterface {
public:
void CreateReliableQuicStream() {
Perspective perspective = Perspective::IS_SERVER;
bool owns_writer = true;
alarm_factory_ = std::make_unique<test::MockAlarmFactory>();
connection_.reset(new QuicConnection(
test::TestConnectionId(0), QuicSocketAddress(TestLoopback(), 0),
QuicSocketAddress(TestLoopback(), 0),
this , alarm_factory_.get(),
new DummyPacketWriter(), owns_writer, perspective,
ParsedVersionOfIndex(CurrentSupportedVersions(), 0),
connection_id_generator_));
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
session_ = std::make_unique<StrictMock<MockQuicSession>>(connection_.get(),
QuicConfig());
session_->Initialize();
stream_ = new QboneReadOnlyStream(kStreamId, session_.get());
session_->ActivateReliableStream(
std::unique_ptr<QboneReadOnlyStream>(stream_));
}
~QboneReadOnlyStreamTest() override {}
const QuicClock* GetClock() const override { return &clock_; }
QuicRandom* GetRandomGenerator() override {
return QuicRandom::GetInstance();
}
quiche::QuicheBufferAllocator* GetStreamSendBufferAllocator() override {
return &buffer_allocator_;
}
protected:
QboneReadOnlyStream* stream_;
std::unique_ptr<StrictMock<MockQuicSession>> session_;
std::unique_ptr<QuicAlarmFactory> alarm_factory_;
std::unique_ptr<QuicConnection> connection_;
quiche::SimpleBufferAllocator buffer_allocator_;
MockClock clock_;
const QuicStreamId kStreamId = QuicUtils::GetFirstUnidirectionalStreamId(
CurrentSupportedVersions()[0].transport_version, Perspective::IS_CLIENT);
quic::test::MockConnectionIdGenerator connection_id_generator_;
};
TEST_F(QboneReadOnlyStreamTest, ReadDataWhole) {
std::string packet = "Stuff";
CreateReliableQuicStream();
QuicStreamFrame frame(kStreamId, true, 0, packet);
EXPECT_CALL(*session_, ProcessPacketFromPeer("Stuff"));
stream_->OnStreamFrame(frame);
}
TEST_F(QboneReadOnlyStreamTest, ReadBuffered) {
CreateReliableQuicStream();
std::string packet = "Stuf";
{
QuicStreamFrame frame(kStreamId, false, 0, packet);
stream_->OnStreamFrame(frame);
}
packet = "f";
EXPECT_CALL(*session_, ProcessPacketFromPeer("Stuff"));
{
QuicStreamFrame frame(kStreamId, true, 4, packet);
stream_->OnStreamFrame(frame);
}
}
TEST_F(QboneReadOnlyStreamTest, ReadOutOfOrder) {
CreateReliableQuicStream();
std::string packet = "f";
{
QuicStreamFrame frame(kStreamId, true, 4, packet);
stream_->OnStreamFrame(frame);
}
packet = "S";
{
QuicStreamFrame frame(kStreamId, false, 0, packet);
stream_->OnStreamFrame(frame);
}
packet = "tuf";
EXPECT_CALL(*session_, ProcessPacketFromPeer("Stuff"));
{
QuicStreamFrame frame(kStreamId, false, 1, packet);
stream_->OnStreamFrame(frame);
}
}
TEST_F(QboneReadOnlyStreamTest, ReadBufferedTooLarge) {
CreateReliableQuicStream();
std::string packet = "0123456789";
int iterations = (QboneConstants::kMaxQbonePacketBytes / packet.size()) + 2;
EXPECT_CALL(*session_, MaybeSendStopSendingFrame(
kStreamId, QuicResetStreamError::FromInternal(
QUIC_BAD_APPLICATION_PAYLOAD)));
EXPECT_CALL(
*session_,
MaybeSendRstStreamFrame(
kStreamId,
QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD), _));
for (int i = 0; i < iterations; ++i) {
QuicStreamFrame frame(kStreamId, i == (iterations - 1), i * packet.size(),
packet);
if (!stream_->reading_stopped()) {
stream_->OnStreamFrame(frame);
}
}
EXPECT_TRUE(stream_->reading_stopped());
}
}
} |
256 | #ifndef XLA_MLIR_TOOLS_MLIR_INTERPRETER_DIALECTS_COMPARATORS_H_
#define XLA_MLIR_TOOLS_MLIR_INTERPRETER_DIALECTS_COMPARATORS_H_
#include <complex>
#include <cstdint>
#include <type_traits>
#include "llvm/Support/ErrorHandling.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value_util.h"
namespace mlir {
namespace interpreter {
template <int64_t v, bool r, bool nan_result>
struct FloatCompare : CwiseAll {
template <typename T>
static bool Apply(T a, T b) {
if (isnan(a) || isnan(b)) return nan_result;
if constexpr (v == 0) {
return (a == b) == r;
} else if constexpr (std::is_floating_point_v<T> || std::is_integral_v<T>) {
auto cmp = a > b ? 1 : (a < b ? -1 : 0);
return (cmp == v) == r;
} else {
llvm_unreachable("operation not supported for this type");
}
}
template <typename T>
static bool isnan(T a) {
return std::isnan(a);
}
template <typename T>
static bool isnan(std::complex<T> a) {
return std::isnan(std::real(a)) || std::isnan(std::imag(a));
}
};
using Foeq = FloatCompare<0, true, false>;
using Foge = FloatCompare<-1, false, false>;
using Fogt = FloatCompare<1, true, false>;
using Fole = FloatCompare<1, false, false>;
using Folt = FloatCompare<-1, true, false>;
using Fone = FloatCompare<0, false, false>;
using Ford = FloatCompare<99, false, false>;
using Fueq = FloatCompare<0, true, true>;
using Fuge = FloatCompare<-1, false, true>;
using Fugt = FloatCompare<1, true, true>;
using Fule = FloatCompare<1, false, true>;
using Fult = FloatCompare<-1, true, true>;
using Fune = FloatCompare<0, false, true>;
using Funo = FloatCompare<99, true, true>;
template <int64_t v, bool r>
struct UnsignedCompare : CwiseInt {
template <typename T>
static bool Apply(T a, T b) {
using U = std::make_unsigned_t<T>;
auto a_u = static_cast<U>(a);
auto b_u = static_cast<U>(b);
auto cmp = a_u > b_u ? 1 : (a_u < b_u ? -1 : 0);
return (cmp == v) == r;
}
};
using Iuge = UnsignedCompare<-1, false>;
using Iule = UnsignedCompare<1, false>;
using Iugt = UnsignedCompare<1, true>;
using Iult = UnsignedCompare<-1, true>;
struct Iumax {
template <typename T>
static T apply(T a, T b) {
return Iuge::Apply(a, b) ? a : b;
}
};
struct Iumin {
template <typename T>
static T apply(T a, T b) {
return Iule::Apply(a, b) ? a : b;
}
};
}
}
#endif
#include "xla/client/lib/comparators.h"
#include <limits>
#include <optional>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/primitive_util.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using XlaCompareOp = XlaOp (*)(XlaOp, XlaOp, absl::Span<const int64_t>);
XlaComputation CreateScalarComparisonComputation(
const std::string& name, const std::vector<PrimitiveType>& operand_types,
XlaBuilder* builder, XlaCompareOp generator) {
CHECK_NE(operand_types.size(), 0);
std::vector<std::optional<XlaCompareOp>> generators(operand_types.size());
generators[0] = generator;
return CreateScalarComparisonComputation(name, operand_types, generators,
builder);
}
}
XlaComputation CreateScalarComparisonComputation(
const std::string& name, const std::vector<PrimitiveType>& operand_types,
const std::vector<std::optional<XlaCompareOp>>& generators,
XlaBuilder* builder) {
auto b = builder->CreateSubBuilder(name);
if (operand_types.empty()) {
b->ReportError(InvalidArgument("operand_types should not be empty"));
return b->BuildAndNoteError();
}
CHECK_EQ(operand_types.size(), generators.size());
int parameter_count = 0;
int last_generator_index = 0;
std::vector<XlaOp> lhs_params;
std::vector<XlaOp> rhs_params;
for (auto operand_type : operand_types) {
auto scalar_shape = ShapeUtil::MakeShape(operand_type, {});
auto lhs_param = Parameter(b.get(), parameter_count * 2, scalar_shape,
absl::StrCat("p.", parameter_count, ".lhs"));
auto rhs_param = Parameter(b.get(), parameter_count * 2 + 1, scalar_shape,
absl::StrCat("p.", parameter_count, ".rhs"));
lhs_params.emplace_back(lhs_param);
rhs_params.emplace_back(rhs_param);
if (generators[parameter_count].has_value()) {
last_generator_index = parameter_count;
}
parameter_count++;
}
CHECK_NE(parameter_count, 0);
XlaOp result;
XlaOp prev_equal;
for (int i = 0; i < parameter_count; i++) {
if (generators[i].has_value()) {
XlaOp cmp_op = generators[i].value()(lhs_params[i], rhs_params[i], {});
result = prev_equal.valid() ? Select(prev_equal, cmp_op, result) : cmp_op;
if (i != last_generator_index) {
XlaOp eq_op = EqTotalOrder(lhs_params[i], rhs_params[i]);
prev_equal = prev_equal.valid() ? And(prev_equal, eq_op) : eq_op;
}
}
}
CHECK(result.valid());
return b->BuildAndNoteError();
}
XlaComputation CreateScalarLtComputation(
const std::vector<PrimitiveType>& operand_types, XlaBuilder* builder) {
return CreateScalarComparisonComputation("compare-less-than", operand_types,
builder, LtTotalOrder);
}
XlaComputation CreateScalarGtComputation(
const std::vector<PrimitiveType>& operand_types, XlaBuilder* builder) {
return CreateScalarComparisonComputation(
"compare-greater-than", operand_types, builder, GtTotalOrder);
}
} | #include "xla/client/lib/comparators.h"
#include <cmath>
#include <limits>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace {
class ComparatorsTest : public ClientLibraryTestBase {
public:
ComparatorsTest() : builder_(TestName()) {}
XlaBuilder* builder() { return &builder_; }
private:
XlaBuilder builder_;
};
template <
PrimitiveType type,
typename T = typename primitive_util::PrimitiveTypeToNative<type>::type>
void BuildComparatorAndComparisons(ComparatorsTest* test,
bool compare_less_than,
absl::InlinedVector<bool, 10>* expected) {
auto compare = compare_less_than
? CreateScalarLtComputation({type}, test->builder())
: CreateScalarGtComputation({type}, test->builder());
auto negative_nan = ConstantR0<T>(
test->builder(), -T(std::numeric_limits<float>::quiet_NaN()));
auto positive_nan = ConstantR0<T>(test->builder(),
T(std::numeric_limits<float>::quiet_NaN()));
auto negative_zero = ConstantR0<T>(test->builder(), T(-0.));
auto positive_zero = ConstantR0<T>(test->builder(), T(0.));
auto negative_infinity = MinValue(test->builder(), type);
auto positive_infinity = MaxValue(test->builder(), type);
std::vector<XlaOp> all_constants{negative_nan, negative_infinity,
negative_zero, positive_zero,
positive_infinity, positive_nan};
std::vector<XlaOp> all_comparisons;
all_comparisons.reserve(std::pow(all_constants.size(), 2));
for (const XlaOp& lhs_constant : all_constants) {
for (const XlaOp& rhs_constant : all_constants) {
all_comparisons.push_back(Broadcast(
Call(test->builder(), compare, {lhs_constant, rhs_constant}), {1}));
}
}
ConcatInDim(test->builder(), all_comparisons, 0);
expected->clear();
for (int i = 0; i < all_constants.size(); ++i) {
for (int j = 0; j < all_constants.size(); ++j) {
expected->push_back(compare_less_than ? i < j : i > j);
}
}
}
XLA_TEST_F(ComparatorsTest, CompareLtBF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<BF16>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtBF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<BF16>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F16>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF16) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F16>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF32) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F32>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF32) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F32>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareLtF64) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F64>(this, true,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
XLA_TEST_F(ComparatorsTest, CompareGtF64) {
absl::InlinedVector<bool, 10> expected;
BuildComparatorAndComparisons<F64>(this, false,
&expected);
ComputeAndCompareR1<bool>(builder(), expected, {});
}
const auto kCompareStr = HloOpcodeString(xla::HloOpcode::kCompare);
const auto kParameterStr = HloOpcodeString(xla::HloOpcode::kParameter);
const auto kSelectStr = HloOpcodeString(xla::HloOpcode::kSelect);
void ExpectCompareOp(
const xla::HloInstructionProto op, xla::PrimitiveType type,
absl::string_view direction, int parameter0_number, int parameter1_number,
const tsl::protobuf::RepeatedPtrField<xla::HloInstructionProto>& all_ops) {
EXPECT_EQ(op.opcode(), kCompareStr);
const auto& operand0 = all_ops.at(op.operand_ids(0) - 1);
EXPECT_EQ(operand0.opcode(), kParameterStr);
EXPECT_EQ(operand0.parameter_number(), parameter0_number);
EXPECT_EQ(operand0.shape().element_type(), type);
const auto& operand1 = all_ops.at(op.operand_ids(1) - 1);
EXPECT_EQ(operand1.opcode(), kParameterStr);
EXPECT_EQ(operand1.parameter_number(), parameter1_number);
EXPECT_EQ(operand1.shape().element_type(), type);
}
TEST(VariadicComparatorTest, OneOperandOneComparison) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16}, {LtTotalOrder}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 2);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
ExpectCompareOp(root, U16, "LT", 0, 1, instr);
}
TEST(VariadicComparatorTest, TwoOperandsOneComparison) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16, U32}, {LtTotalOrder, {}}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 4);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
ExpectCompareOp(root, U16, "LT", 0, 1, instr);
}
TEST(VariadicComparatorTest, TwoOperandsTwoComparisons) {
XlaBuilder builder("test");
XlaComputation comp = CreateScalarComparisonComputation(
"computation", {U16, U32}, {LtTotalOrder, LtTotalOrder}, &builder);
EXPECT_EQ(comp.proto().computations_size(), 1);
EXPECT_EQ(comp.proto().computations(0).program_shape().parameters_size(), 4);
const auto& instr = comp.proto().computations(0).instructions();
const auto& root = instr.at(comp.proto().computations(0).root_id() - 1);
EXPECT_EQ(root.opcode(), HloOpcodeString(xla::HloOpcode::kSelect));
ExpectCompareOp(instr.at(root.operand_ids(0) - 1), U16, "EQ", 0, 1, instr);
ExpectCompareOp(instr.at(root.operand_ids(1) - 1), U32, "LT", 2, 3, instr);
ExpectCompareOp(instr.at(root.operand_ids(2) - 1), U16, "LT", 0, 1, instr);
}
}
} |
257 | #ifndef TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_XLA_SHARDING_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_TENSORFLOW_UTILS_XLA_SHARDING_UTIL_H_
#include <map>
#include <string>
#include "absl/status/statusor.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace tensorflow {
inline constexpr llvm::StringRef kInputShardingAttr =
"input_sharding_configuration";
inline constexpr llvm::StringRef kOutputShardingAttr =
"output_sharding_configuration";
mlir::LogicalResult DecodeShardingAttribute(const std::string& shard_str,
xla::OpSharding& sharding,
bool report_error = true);
mlir::LogicalResult DecodeShardingAttribute(mlir::Attribute shard_attr,
xla::OpSharding& sharding,
bool report_error = true);
void EncodeSharding(mlir::Operation* op, llvm::StringRef shard_str);
mlir::LogicalResult ExtractInputsForLogicalDevices(
int num_cores_per_replica, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::OpBuilder* builder,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list);
mlir::LogicalResult ParseAndValidateOutputSharding(
int num_cores_per_replica, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::SmallVector<xla::OpSharding, 4>* output_sharding_list);
mlir::LogicalResult GetOutputTypesForLogicalDeviceComputation(
int core_id, llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func,
llvm::SmallVectorImpl<mlir::Type>* output_types,
llvm::SmallVectorImpl<int>* cluster_to_core_index);
mlir::LogicalResult RemapOutputsFromLogicalDevices(
const mlir::Location& location,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int num_results_pre_cluster,
mlir::tf_device::ParallelExecuteOp old_parallel_execute, int cluster_idx,
mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder);
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> GetMetadataArgumentMapping(
const tpu::TPUCompileMetadataProto& metadata);
int GetDimsFromXLAShardingTiled(const xla::OpSharding& xla_sharding);
bool IsOtherReplicatedSharding(const xla::OpSharding& xla_sharding);
bool IsSplitSharding(const xla::OpSharding& sharding);
bool IsReplicatedSharding(const xla::OpSharding& sharding);
absl::StatusOr<std::map<int, int>> GetDimensionIndicesAndNumSplitsFromSharding(
const xla::OpSharding& sharding);
}
#endif
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <cstdint>
#include <map>
#include <numeric>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/client/sharding_builder.h"
#include "xla/service/hlo_parser.h"
#include "xla/xla_data.pb.h"
namespace tensorflow {
namespace {
constexpr char kNumSplitAttr[] = "num_split";
mlir::LogicalResult CreateSplitOp(const int num_split,
const int split_dimension,
const mlir::Location& location,
mlir::Value src_input,
mlir::OpBuilder* builder,
mlir::TF::SplitOp* split_op) {
auto split_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto split_dimension_attr =
mlir::DenseElementsAttr::get(split_dim_type, split_dimension);
auto split_dimension_op = builder->create<mlir::TF::ConstOp>(
location, split_dim_type, split_dimension_attr);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
if (input_type.hasRank()) {
if (input_type.getShape()[split_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
if (shape[split_dimension] % num_split != 0) {
return mlir::emitError(
location,
llvm::formatv(
"incorrect input sharding configuration received. "
"{0}-th dimension of the input must be evenly divisible by {1}",
split_dimension, num_split));
}
shape[split_dimension] = shape[split_dimension] / num_split;
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
llvm::SmallVector<mlir::Type, 4> output_types(num_split, output_type);
*split_op = builder->create<mlir::TF::SplitOp>(
location, output_types, split_dimension_op.getOutput(), src_input);
(*split_op)->setAttr(
kNumSplitAttr,
builder->getIntegerAttr(builder->getIntegerType(32), num_split));
return mlir::success();
}
mlir::TF::ConcatOp CreateConcatOp(const int concat_dimension,
const mlir::Location& location,
mlir::ArrayRef<mlir::Value> inputs,
mlir::OpBuilder* builder) {
auto concat_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto concat_dimension_attr =
mlir::DenseElementsAttr::get(concat_dim_type, concat_dimension);
auto concat_dimension_op = builder->create<mlir::TF::ConstOp>(
location, concat_dim_type, concat_dimension_attr);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(inputs[0].getType());
if (input_type.hasRank()) {
if (input_type.getShape()[concat_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
shape[concat_dimension] = shape[concat_dimension] * inputs.size();
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
return builder->create<mlir::TF::ConcatOp>(
location, output_type, concat_dimension_op.getOutput(), inputs);
}
mlir::LogicalResult HandleTileShardedInputs(
const mlir::Location& location, const xla::OpSharding& input_sharding,
const mlir::Value& original_source, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<mlir::Value>* tiled_inputs) {
llvm::SmallVector<mlir::TF::SplitOp, 4> split_ops_for_tiled_input;
split_ops_for_tiled_input.reserve(
input_sharding.tile_assignment_devices_size());
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(input_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (const auto& dimension_and_num_splits : *dimension_to_splits_map) {
const int dimension = dimension_and_num_splits.first;
const int num_splits = dimension_and_num_splits.second;
if (split_ops_for_tiled_input.empty()) {
mlir::TF::SplitOp root_split_op;
auto result = CreateSplitOp(num_splits, dimension, location,
original_source, builder, &root_split_op);
if (mlir::failed(result)) return mlir::failure();
split_ops_for_tiled_input.emplace_back(root_split_op);
continue;
}
llvm::SmallVector<mlir::TF::SplitOp, 4> new_split_ops;
new_split_ops.reserve(split_ops_for_tiled_input.size() * num_splits);
for (auto split_op : split_ops_for_tiled_input) {
for (auto parent_split_output_value : split_op.getResults()) {
mlir::TF::SplitOp child_split_op;
auto result =
CreateSplitOp(num_splits, dimension, location,
parent_split_output_value, builder, &child_split_op);
if (mlir::failed(result)) return mlir::failure();
new_split_ops.emplace_back(child_split_op);
}
}
std::swap(new_split_ops, split_ops_for_tiled_input);
}
tiled_inputs->clear();
tiled_inputs->reserve(input_sharding.tile_assignment_devices_size());
for (auto split_op : split_ops_for_tiled_input) {
for (auto split_op_output : split_op.getResults()) {
int64_t repeat_count =
input_sharding.replicate_on_last_tile_dim()
? *input_sharding.tile_assignment_dimensions().rbegin()
: 1;
for (int64_t i = 0; i < repeat_count; ++i) {
tiled_inputs->push_back(split_op_output);
}
}
}
return mlir::success();
}
bool UnsupportedPartitionedShardingType(xla::OpSharding::Type sharding) {
return sharding != xla::OpSharding::REPLICATED &&
sharding != xla::OpSharding::OTHER;
}
}
absl::StatusOr<std::map<int, int>> GetDimensionIndicesAndNumSplitsFromSharding(
const xla::OpSharding& sharding) {
int64_t tensor_tile_rank = sharding.tile_assignment_dimensions_size();
if (sharding.replicate_on_last_tile_dim()) {
tensor_tile_rank--;
}
std::map<int, int> dimension_to_splits_map;
for (int dim_index = 0; dim_index < tensor_tile_rank; ++dim_index) {
if (sharding.tile_assignment_dimensions(dim_index) > 1) {
dimension_to_splits_map.emplace(
dim_index, sharding.tile_assignment_dimensions(dim_index));
}
}
if (dimension_to_splits_map.empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Arg has unnecessary tiled sharding: ", sharding.DebugString()));
}
return dimension_to_splits_map;
}
int GetDimsFromXLAShardingTiled(const xla::OpSharding& xla_sharding) {
return xla_sharding.tile_assignment_dimensions_size() -
(xla_sharding.replicate_on_last_tile_dim() ? 1 : 0) -
xla_sharding.last_tile_dims_size();
}
bool IsOtherReplicatedSharding(const xla::OpSharding& xla_sharding) {
int max_dim = GetDimsFromXLAShardingTiled(xla_sharding);
for (int i = 0; i < max_dim; ++i) {
if (xla_sharding.tile_assignment_dimensions(i) != 1) {
return false;
}
}
return xla_sharding.type() == xla::OpSharding::OTHER &&
(xla_sharding.replicate_on_last_tile_dim() ||
!xla_sharding.last_tile_dims().empty());
}
bool IsSplitSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::OTHER &&
!IsOtherReplicatedSharding(sharding);
}
bool IsReplicatedSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::REPLICATED ||
IsOtherReplicatedSharding(sharding);
}
mlir::LogicalResult DecodeShardingAttribute(const std::string& shard_str,
xla::OpSharding& sharding,
bool report_error) {
if (sharding.ParseFromString(shard_str)) return mlir::success();
absl::StatusOr<xla::HloSharding> sharding_hlo = xla::ParseSharding(shard_str);
if (sharding_hlo.ok()) {
sharding = sharding_hlo->ToProto();
return mlir::success();
}
if (report_error)
llvm::errs() << std::string(sharding_hlo.status().message()) << "\n";
return mlir::failure();
}
mlir::LogicalResult DecodeShardingAttribute(mlir::Attribute shard_attr,
xla::OpSharding& sharding,
bool report_error) {
if (!mlir::isa<mlir::StringAttr>(shard_attr)) return mlir::failure();
auto shard_str = mlir::cast<mlir::StringAttr>(shard_attr).getValue().str();
return DecodeShardingAttribute(shard_str, sharding, report_error);
}
void EncodeSharding(mlir::Operation* op, llvm::StringRef shard_str) {
if (!op->hasAttrOfType<mlir::StringAttr>(shard_str)) return;
::xla::OpSharding sharding;
auto sharding_proto_str =
op->getAttrOfType<mlir::StringAttr>(shard_str).getValue().str();
if (!sharding.ParseFromString(sharding_proto_str)) return;
auto hlosharding = xla::HloSharding::FromProto(sharding);
if (!hlosharding.ok()) {
op->emitError("Unable to encode sharding to human readable ")
<< hlosharding.status().message();
return;
}
op->setAttr(shard_str,
mlir::StringAttr::get(op->getContext(), hlosharding->ToString()));
}
mlir::LogicalResult ExtractInputsForLogicalDevices(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list) {
input_list->reserve(num_cores_per_replica);
for (int i = 0; i < num_cores_per_replica; ++i)
input_list->emplace_back(llvm::SmallVector<mlir::Value, 4>());
llvm::SmallVector<mlir::Value, 4> cluster_func_inputs(
cluster_func.getOperands());
auto sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kInputShardingAttr);
if (!sharding_attrs) {
(*input_list)[0] = cluster_func_inputs;
return mlir::success();
}
for (const auto& sharding_attr_and_index : llvm::enumerate(sharding_attrs)) {
const auto& sharding_attr = sharding_attr_and_index.value();
const auto input_index = sharding_attr_and_index.index();
const auto& input_value = cluster_func_inputs[input_index];
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(sharding_attr).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for inputs");
}
const auto input_sharding_type = sharding.type();
auto tiled_sharding_mismatched = [&](int tiled_input_size) {
return cluster_func.emitError(
llvm::formatv("incorrect {0}-th tiled input sharding received. "
"Product of tile sharding splits({1}) must be equal to "
"number of logical devices : {2}",
input_index, tiled_input_size, num_cores_per_replica));
};
if (auto partitioned_input =
llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedInputV2Op>(
input_value.getDefiningOp())) {
if (UnsupportedPartitionedShardingType(input_sharding_type))
return cluster_func->emitOpError()
<< "unsupported input sharding type "
<< OpSharding_Type_Name(input_sharding_type) << " for "
<< input_index << "-th input";
if (input_sharding_type == xla::OpSharding::REPLICATED) {
for (const auto& index_and_inputs : llvm::enumerate(*input_list)) {
index_and_inputs.value().emplace_back(
partitioned_input.getOperand(index_and_inputs.index()));
}
} else {
assert(input_sharding_type == xla::OpSharding::OTHER);
if (partitioned_input.getInputs().size() != num_cores_per_replica)
return tiled_sharding_mismatched(
partitioned_input.getInputs().size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device =
sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(
partitioned_input.getInputs()[i]);
}
}
continue;
}
if (IsSplitSharding(sharding)) {
llvm::SmallVector<mlir::Value, 4> tiled_inputs;
auto result = HandleTileShardedInputs(
cluster_func.getLoc(), sharding, input_value, builder, &tiled_inputs);
if (mlir::failed(result)) return mlir::failure();
const int64_t tiled_inputs_size = tiled_inputs.size();
if (tiled_inputs_size != num_cores_per_replica)
return tiled_sharding_mismatched(tiled_inputs.size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device = sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(tiled_inputs[i]);
}
} else if (IsReplicatedSharding(sharding)) {
for (auto& inputs : *input_list) inputs.emplace_back(input_value);
} else {
assert(input_sharding_type == xla::OpSharding::MAXIMAL);
const int logical_device_id = sharding.tile_assignment_devices(0);
(*input_list)[logical_device_id].emplace_back(input_value);
}
}
return mlir::success();
}
mlir::LogicalResult ParseAndValidateOutputSharding(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func,
mlir::SmallVector<xla::OpSharding, 4>* output_sharding_list) {
output_sharding_list->reserve(cluster_func.getNumResults());
const auto output_sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kOutputShardingAttr);
if (!output_sharding_attrs)
return cluster_func.emitError(
"output_sharding_configuration missing from cluster func");
if (output_sharding_attrs.size() != cluster_func.getNumResults())
return cluster_func.emitError("incorrect number of output sharding");
for (const auto& output_sharding_and_index :
llvm::enumerate(output_sharding_attrs)) {
const auto& output_sharding = output_sharding_and_index.value();
const int sharding_index = output_sharding_and_index.index();
if (!mlir::isa<mlir::StringAttr>(output_sharding))
return cluster_func.emitError(llvm::formatv(
"non-string output sharding at index {0}", sharding_index));
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(output_sharding).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for outputs");
}
if (sharding.type() == xla::OpSharding::OTHER &&
sharding.tile_assignment_devices_size() != num_cores_per_replica)
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Number of "
"tiled outputs({0}) must match the number of logical "
"devices({1})",
sharding.tile_assignment_devices_size(), num_cores_per_replica));
if (sharding.type() == xla::OpSharding::MAXIMAL &&
((sharding.tile_assignment_devices(0) >= num_cores_per_replica) ||
(sharding.tile_assignment_devices(0) < 0)))
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Maximal "
"sharding should be assigned to device id in range "
"[0, {0}). Currently assigned to {1}",
num_cores_per_replica, sharding.tile_assignment_devices(0)));
output_sharding_list->emplace_back(std::move(sharding));
}
return mlir::success();
}
namespace {
bool IsAssignedToLogicalDevice(const int core_id,
const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::MAXIMAL &&
sharding.tile_assignment_devices(0) == core_id;
}
mlir::LogicalResult LookupClusterToCoreIndex(
const mlir::Location& location,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const int core_id, const int cluster_func_output_index,
int* core_output_index) {
*core_output_index =
cluster_to_core_index[core_id][cluster_func_output_index];
if (*core_output_index == -1) {
mlir::emitError(
location,
llvm::formatv("Attempted to map cluster_func output index {0} to "
"program assigned to core {1}. The tensor at this output "
"index was not assigned or sharded to this core.",
cluster_func_output_index, core_id));
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetTileShardedOutputsToMerge(
const mlir::Location& location, const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
llvm::SmallVector<mlir::Value, 4>* outputs_to_merge) {
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
outputs_to_merge->reserve(sharding.tile_assignment_devices_size());
for (const auto& core_id_and_index :
llvm::enumerate(sharding.tile_assignment_devices())) {
auto core_id = core_id_and_index.value();
auto tile_index = core_id_and_index.index();
int last_tile_dim_size = *sharding.tile_assignment_dimensions().rbegin();
if (sharding.replicate_on_last_tile_dim() &&
tile_index % last_tile_dim_size != 0) {
continue;
}
int region_output_index;
auto status = LookupClusterToCoreIndex(location, cluster_to_core_index,
core_id, cluster_func_output_index,
®ion_output_index);
if (failed(status)) return mlir::failure();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(cluster_idx +
core_id)[region_output_index];
outputs_to_merge->emplace_back(output_from_logical_device);
}
return mlir::success();
}
mlir::LogicalResult HandleTileShardedOutputs(
const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const mlir::Location& location, mlir::Value cluster_func_output,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder) {
builder->setInsertionPointAfter(new_parallel_execute);
llvm::SmallVector<mlir::Value, 4> outputs_to_merge;
auto status = GetTileShardedOutputsToMerge(
location, cluster_func_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&outputs_to_merge);
if (failed(status)) return mlir::failure();
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (auto it = dimension_to_splits_map->rbegin();
it != dimension_to_splits_map->rend(); ++it) {
int concat_dimension = it->first;
int num_splits = it->second;
llvm::SmallVector<mlir::Value, 4> new_outputs;
new_outputs.reserve(num_splits);
for (int i = 0, end = outputs_to_merge.size(); i < end;
i = i + num_splits) {
mlir::TF::ConcatOp concat_op =
CreateConcatOp(concat_dimension, location,
llvm::ArrayRef<mlir::Value>{
outputs_to_merge.begin() + i,
outputs_to_merge.begin() + i + num_splits},
builder);
new_outputs.emplace_back(concat_op.getResult());
}
std::swap(new_outputs, outputs_to_merge);
}
assert(outputs_to_merge.size() == 1);
cluster_func_output.replaceAllUsesWith(outputs_to_merge[0]);
return mlir::success();
}
mlir::LogicalResult ValidateAndGetTiledExecuteOutputShape(
const mlir::Location& location,
const mlir::TensorType cluster_func_output_type,
const xla::OpSharding& output_sharding,
mlir::Type* tiled_logical_computation_type) {
const auto output_shape = cluster_func_output_type.getShape();
auto new_output_shape = llvm::to_vector<4>(output_shape);
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(output_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (const auto& dimension_and_output_splits : *dimension_to_splits_map) {
const auto dimension = dimension_and_output_splits.first;
const auto output_splits = dimension_and_output_splits.second;
if (output_shape[dimension] == mlir::ShapedType::kDynamic) {
*tiled_logical_computation_type = cluster_func_output_type;
break;
}
if (output_shape[dimension] % output_splits != 0) {
mlir::emitError(
location,
llvm::formatv("incorrect output sharding received. "
"{0}-th dimension of the output must be "
"evenly divisible by {1}, got dimension "
"shape {2}",
dimension, output_splits, output_shape[dimension]));
}
new_output_shape[dimension] = output_shape[dimension] / output_splits;
}
*tiled_l | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <string>
#include <gtest/gtest.h>
#include "mlir/Support/LogicalResult.h"
#include "xla/xla_data.pb.h"
inline constexpr llvm::StringRef kXlaShardingAttrName = "_XlaSharding";
namespace tensorflow {
namespace {
TEST(DecodeShardingAttributeTest, CheckInvalidString) {
xla::OpSharding sharding;
EXPECT_TRUE(DecodeShardingAttribute("", sharding).succeeded());
EXPECT_TRUE(DecodeShardingAttribute("manual", sharding).failed());
}
TEST(DecodeShardingAttributeTest, CheckManualShardString) {
xla::OpSharding sharding;
EXPECT_TRUE(DecodeShardingAttribute("{manual}", sharding).succeeded());
EXPECT_TRUE(sharding.type() == sharding.MANUAL);
EXPECT_EQ(0, sharding.tile_assignment_devices_size());
}
TEST(DecodeShardingAttributeTest, CheckMaximalShardString) {
xla::OpSharding sharding;
EXPECT_TRUE(
DecodeShardingAttribute("{maximal device=0}", sharding).succeeded());
EXPECT_TRUE(sharding.type() == sharding.MAXIMAL);
EXPECT_EQ(1, sharding.tile_assignment_devices_size());
}
}
} |
258 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MAKE_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MAKE_FULLY_CONNECTED_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
namespace tflite {
namespace gpu {
std::unique_ptr<NodeTransformation> NewMakeFullyConnectedFromConvolution();
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
bool IsConvEquivalentToFullyConnected(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 1 &&
attr.weights.shape.h == 1 &&
attr.strides == HW(1, 1) &&
attr.dilations == HW(1, 1) &&
attr.padding.prepended == HW(0, 0) &&
attr.padding.appended == HW(0, 0);
}
class MakeFullyConnectedFromConvolution : public NodeTransformation {
public:
TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final {
if (node->operation.type != ToString(OperationType::CONVOLUTION_2D)) {
return {TransformStatus::SKIPPED, ""};
}
auto inputs = graph->FindInputs(node->id);
if (inputs.size() != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& input_shape = inputs[0]->tensor.shape;
if (input_shape.w != 1 || input_shape.h != 1) {
return {TransformStatus::SKIPPED, ""};
}
const auto& conv_attr = absl::any_cast<const Convolution2DAttributes&>(
node->operation.attributes);
if (!IsConvEquivalentToFullyConnected(conv_attr)) {
return {TransformStatus::SKIPPED, ""};
}
FullyConnectedAttributes fc_attr;
fc_attr.weights = conv_attr.weights;
fc_attr.bias = conv_attr.bias;
node->operation.attributes = fc_attr;
node->operation.type = ToString(OperationType::FULLY_CONNECTED);
return {TransformStatus::APPLIED,
"Replaced convolution with fully connected."};
}
};
}
std::unique_ptr<NodeTransformation> NewMakeFullyConnectedFromConvolution() {
return absl::make_unique<MakeFullyConnectedFromConvolution>();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/types/any.h"
#include "tensorflow/lite/delegates/gpu/common/model.h"
#include "tensorflow/lite/delegates/gpu/common/model_transformer.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
namespace {
TEST(MakeFullyConnected, Smoke) {
GraphFloat32 graph;
auto input = graph.NewValue();
input->tensor.shape = BHWC(1, 4, 4, 8);
Convolution2DAttributes attr0;
attr0.padding.prepended = HW(0, 0);
attr0.padding.appended = HW(0, 0);
attr0.strides = HW(1, 1);
attr0.dilations = HW(1, 1);
attr0.weights.shape = OHWI(16, 1, 1, 8);
attr0.bias.shape = Linear(16);
Convolution2DAttributes attr1;
attr1.padding.prepended = HW(0, 0);
attr1.padding.appended = HW(0, 0);
attr1.strides = HW(4, 4);
attr1.dilations = HW(1, 1);
attr1.weights.shape = OHWI(16, 4, 4, 16);
attr1.bias.shape = Linear(16);
Convolution2DAttributes attr2;
attr2.padding.prepended = HW(0, 0);
attr2.padding.appended = HW(0, 0);
attr2.strides = HW(1, 1);
attr2.dilations = HW(1, 1);
attr2.weights.shape = OHWI(32, 1, 1, 16);
attr2.bias.shape = Linear(32);
auto conv1x1_node0 = graph.NewNode();
conv1x1_node0->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node0->operation.attributes = attr0;
auto conv4x4_node1 = graph.NewNode();
conv4x4_node1->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv4x4_node1->operation.attributes = attr1;
auto conv1x1_node2 = graph.NewNode();
conv1x1_node2->operation.type = ToString(OperationType::CONVOLUTION_2D);
conv1x1_node2->operation.attributes = attr2;
ASSERT_TRUE(graph.AddConsumer(conv1x1_node0->id, input->id).ok());
Value* output = nullptr;
ASSERT_TRUE(AddOutput(&graph, conv1x1_node2, &output).ok());
output->tensor.shape = BHWC(1, 1, 1, 32);
Value* link1 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv1x1_node0, conv4x4_node1, &link1).ok());
link1->tensor.shape = BHWC(1, 4, 4, 16);
Value* link2 = nullptr;
ASSERT_TRUE(
ConnectTwoNodes(&graph, conv4x4_node1, conv1x1_node2, &link2).ok());
link2->tensor.shape = BHWC(1, 1, 1, 16);
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
auto transformation = NewMakeFullyConnectedFromConvolution();
ModelTransformer transformer(&graph);
transformer.Apply("make_fully_connected", transformation.get());
ASSERT_EQ(3, graph.nodes().size());
ASSERT_EQ(4, graph.values().size());
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[0]->operation.type);
ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D),
graph.nodes()[1]->operation.type);
ASSERT_EQ(ToString(OperationType::FULLY_CONNECTED),
graph.nodes()[2]->operation.type);
auto fc_attr = absl::any_cast<FullyConnectedAttributes>(
graph.nodes()[2]->operation.attributes);
EXPECT_EQ(OHWI(32, 1, 1, 16), fc_attr.weights.shape);
EXPECT_EQ(Linear(32), fc_attr.bias.shape);
}
}
}
} |
259 | #ifndef TENSORFLOW_CORE_KERNELS_IMAGE_SAMPLING_KERNELS_H_
#define TENSORFLOW_CORE_KERNELS_IMAGE_SAMPLING_KERNELS_H_
#include <cmath>
#include "tensorflow/core/lib/core/stringpiece.h"
namespace tensorflow {
namespace functor {
enum SamplingKernelType {
Lanczos1Kernel,
Lanczos3Kernel,
Lanczos5Kernel,
GaussianKernel,
BoxKernel,
TriangleKernel,
KeysCubicKernel,
MitchellCubicKernel,
SamplingKernelTypeEnd
};
SamplingKernelType SamplingKernelTypeFromString(const StringPiece str);
struct LanczosKernelFunc {
explicit LanczosKernelFunc(float _radius) : radius(_radius) {}
float operator()(float x) const {
constexpr float kPI = 3.14159265359;
x = std::abs(x);
if (x > radius) return 0.0;
if (x <= 1e-3) {
return 1.0;
}
return radius * std::sin(kPI * x) * std::sin(kPI * x / radius) /
(kPI * kPI * x * x);
}
float Radius() const { return radius; }
const float radius;
};
struct GaussianKernelFunc {
static constexpr float kRadiusMultiplier = 3.0f;
explicit GaussianKernelFunc(float _radius = 1.5f)
: radius(_radius), sigma(_radius / kRadiusMultiplier) {}
float operator()(float x) const {
x = std::abs(x);
if (x >= radius) return 0.0;
return std::exp(-x * x / (2.0 * sigma * sigma));
}
float Radius() const { return radius; }
const float radius;
const float sigma;
};
struct BoxKernelFunc {
float operator()(float x) const {
x = std::abs(x);
return x < 0.5f ? 1. : x == 0.5f ? 0.5f : 0.0f;
}
float Radius() const { return 1.f; }
};
struct TriangleKernelFunc {
float operator()(float x) const {
x = std::abs(x);
return x < 1.0f ? 1.0f - x : 0.0f;
}
float Radius() const { return 1.f; }
};
struct KeysCubicKernelFunc {
float operator()(float x) const {
x = std::abs(x);
if (x >= 2.0f) {
return 0.0f;
} else if (x >= 1.0f) {
return ((-0.5f * x + 2.5f) * x - 4.0f) * x + 2.0f;
} else {
return ((1.5f * x - 2.5f) * x) * x + 1.0f;
}
}
float Radius() const { return 2.f; }
};
struct MitchellCubicKernelFunc {
float operator()(float x) const {
x = std::abs(x);
if (x >= 2.0f) {
return 0.0f;
} else if (x >= 1.0f) {
return (((-7.0f / 18.0f) * x + 2.0f) * x - 10.0f / 3.0f) * x +
16.0f / 9.0f;
} else {
return (((7.0f / 6.0f) * x - 2.0f) * x) * x + 8.0f / 9.0f;
}
}
float Radius() const { return 2.f; }
};
inline LanczosKernelFunc CreateLanczos1Kernel() {
return LanczosKernelFunc(1.0);
}
inline LanczosKernelFunc CreateLanczos3Kernel() {
return LanczosKernelFunc(3.0);
}
inline LanczosKernelFunc CreateLanczos5Kernel() {
return LanczosKernelFunc(5.0);
}
inline GaussianKernelFunc CreateGaussianKernel() {
return GaussianKernelFunc(1.5);
}
inline BoxKernelFunc CreateBoxKernel() { return BoxKernelFunc(); }
inline TriangleKernelFunc CreateTriangleKernel() {
return TriangleKernelFunc();
}
inline KeysCubicKernelFunc CreateKeysCubicKernel() {
return KeysCubicKernelFunc();
}
inline MitchellCubicKernelFunc CreateMitchellCubicKernel() {
return MitchellCubicKernelFunc();
}
}
}
#endif
#include "tensorflow/core/kernels/image/sampling_kernels.h"
#include <string>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace functor {
SamplingKernelType SamplingKernelTypeFromString(const StringPiece str) {
const string lower_case = absl::AsciiStrToLower(str);
if (lower_case == "lanczos1") return Lanczos1Kernel;
if (lower_case == "lanczos3") return Lanczos3Kernel;
if (lower_case == "lanczos5") return Lanczos5Kernel;
if (lower_case == "gaussian") return GaussianKernel;
if (lower_case == "box") return BoxKernel;
if (lower_case == "triangle") return TriangleKernel;
if (lower_case == "keyscubic") return KeysCubicKernel;
if (lower_case == "mitchellcubic") return MitchellCubicKernel;
return SamplingKernelTypeEnd;
}
}
} | #include "tensorflow/core/kernels/image/sampling_kernels.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace functor {
namespace {
class KernelsTest : public ::testing::Test {
protected:
template <typename KernelType>
void TestKernelValues(const KernelType& kernel, const std::vector<float>& x,
const std::vector<float>& expected) const {
ASSERT_EQ(x.size(), expected.size());
for (int i = 0; i < x.size(); ++i) {
constexpr float kTolerance = 1e-3;
EXPECT_NEAR(kernel(x[i]), expected[i], kTolerance);
EXPECT_NEAR(kernel(-x[i]), expected[i], kTolerance);
}
}
};
TEST_F(KernelsTest, TestKernelValues) {
TestKernelValues(CreateLanczos1Kernel(), {0.0f, 0.5f, 1.0f, 1.5},
{1.0f, 0.4052f, 0.0f, 0.0f});
TestKernelValues(CreateLanczos3Kernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5f, 3.5},
{1.0f, 0.6079f, 0.0f, -0.1351f, 0.0243f, 0.0f});
TestKernelValues(
CreateLanczos5Kernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5f, 3.5f, 4.5f, 5.5},
{1.0f, 0.6262f, 0.0f, -0.1822f, 0.0810569f, -0.0334f, 0.0077f, 0.0f});
TestKernelValues(CreateGaussianKernel(), {0.0f, 0.5f, 1.0f, 1.5},
{1.0f, 0.6065f, 0.1353f, 0.0f});
TestKernelValues(CreateBoxKernel(), {0.0f, 0.25f, 0.5f, 1.0f},
{1.0f, 1.0f, 0.5f, 0.0f});
TestKernelValues(CreateTriangleKernel(), {0.0f, 0.5f, 1.0f},
{1.0f, 0.5f, 0.0f});
TestKernelValues(CreateKeysCubicKernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5},
{1.0f, 0.5625f, 0.0f, -0.0625f, 0.0f});
TestKernelValues(CreateMitchellCubicKernel(), {0.0f, 0.5f, 1.0f, 1.5f, 2.5},
{0.8889f, 0.5347f, 0.0556f, -0.0347f, 0.0f});
}
TEST(SamplingKernelTypeFromStringTest, Works) {
EXPECT_EQ(SamplingKernelTypeFromString("lanczos1"), Lanczos1Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("lanczos3"), Lanczos3Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("lanczos5"), Lanczos5Kernel);
EXPECT_EQ(SamplingKernelTypeFromString("gaussian"), GaussianKernel);
EXPECT_EQ(SamplingKernelTypeFromString("box"), BoxKernel);
EXPECT_EQ(SamplingKernelTypeFromString("triangle"), TriangleKernel);
EXPECT_EQ(SamplingKernelTypeFromString("mitchellcubic"), MitchellCubicKernel);
EXPECT_EQ(SamplingKernelTypeFromString("keyscubic"), KeysCubicKernel);
EXPECT_EQ(SamplingKernelTypeFromString("not a kernel"),
SamplingKernelTypeEnd);
}
}
}
} |
260 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_COLOCATE_PREDECESSOR_TREES_PASS_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_COLOCATE_PREDECESSOR_TREES_PASS_H_
#include "tensorflow/core/common_runtime/optimization_registry.h"
namespace tensorflow {
class ColocatePredecessorTreesPass : public GraphOptimizationPass {
public:
Status Run(const GraphOptimizationPassOptions& options) override;
};
}
#endif
#include "tensorflow/core/common_runtime/colocate_predecessor_trees_pass.h"
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kClassAttr = "_class";
constexpr absl::string_view kFill = "Fill";
bool IsValidFillOp(const Node& node) {
if (node.type_string() != kFill) {
return false;
}
if (node.IsArg()) {
return false;
}
if (node.has_assigned_device_name()) {
return false;
}
if (!node.requested_device().empty()) {
return false;
}
if (HasNodeAttr(node.def(), kClassAttr)) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), node.def())) {
return false;
}
return true;
}
bool IsValidIdentityNode(const Node& node) {
if (!node.IsIdentity()) {
return false;
}
if (node.requested_device().empty()) {
return false;
}
auto device_name = node.requested_device();
DeviceNameUtils::ParsedName parsed_device_name;
DeviceNameUtils::ParseFullName(device_name, &parsed_device_name);
if (parsed_device_name.type != DEVICE_CPU) {
return false;
}
if (node.IsArg()) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), node.def())) {
return false;
}
return true;
}
std::optional<std::string> GetColocateStringName(const Node& fill_node) {
std::string device = "";
std::string colocation_prefix = "loc:@";
std::string colocation_name = "";
for (auto output_node : fill_node.out_nodes()) {
if (!IsValidIdentityNode(*output_node)) return std::nullopt;
if (device.empty()) {
device = output_node->requested_device();
colocation_name = absl::StrCat(colocation_prefix, output_node->name());
} else if (device != output_node->requested_device()) {
return std::nullopt;
}
}
if (colocation_name.empty()) return std::nullopt;
return colocation_name;
}
bool AreAllInNodesQualifiedConst(const Node& node) {
for (auto in_node : node.in_nodes()) {
if (!in_node->IsConstant()) {
return false;
}
if (in_node->IsArg()) {
return false;
}
if (in_node->has_assigned_device_name()) {
return false;
}
if (!in_node->requested_device().empty()) {
return false;
}
if (HasNodeAttr(in_node->def(), kClassAttr)) {
return false;
}
if (!KernelDefAvailable(DeviceType(DEVICE_CPU), in_node->def())) {
return false;
}
}
return true;
}
}
Status ColocatePredecessorTreesPass::Run(
const GraphOptimizationPassOptions& options) {
if (!flags::Global().enable_tf2min_ici_weight.value()) {
return absl::OkStatus();
}
if (options.graph == nullptr) {
VLOG(1) << "No graph in colocate_predecessor_trees_pass.\n";
return absl::OkStatus();
}
Graph* graph = options.graph->get();
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("before_colocate_predecessor_trees", *graph,
options.flib_def);
}
for (Node* node : graph->nodes()) {
if (!IsValidFillOp(*node)) {
continue;
}
auto colocation_name = GetColocateStringName(*node);
if (!colocation_name.has_value()) continue;
if (!AreAllInNodesQualifiedConst(*node)) continue;
node->AddAttr(std::string(kClassAttr), {*colocation_name});
for (auto in_node : node->in_nodes()) {
in_node->AddAttr(std::string(kClassAttr), {*colocation_name});
}
for (auto out_node : node->out_nodes()) {
out_node->AddAttr(std::string(kClassAttr), {*colocation_name});
}
}
if (VLOG_IS_ON(1)) {
VLOG(1) << DumpGraphToFile("after_colocate_predecessor_trees", *graph,
options.flib_def);
}
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 50,
ColocatePredecessorTreesPass);
} | #include "tensorflow/core/common_runtime/colocate_predecessor_trees_pass.h"
#include <memory>
#include <string>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/config/flags.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace tensorflow {
const char kCpu0[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:0";
const char kCpu1[] = "/job:tpu_host_worker/replica:0/task:0/device:CPU:1";
const char kClassAttr[] = "_class";
Node* GetNode(const Graph& graph, const std::string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
TEST(ColocatePredecessorTreesPassTest, ICIFlagFalse) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, SimpleExample) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
}
TEST(ColocatePredecessorTreesPassTest, PropagateTwoTrees) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
Node* const_2 = ops::SourceOp("Const", builder.opts()
.WithName("const_2")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_3 = ops::SourceOp("Const", builder.opts()
.WithName("const_3")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill_1 = ops::BinaryOp("Fill", const_2, const_3,
builder.opts().WithName("fill_1"));
ops::UnaryOp("Identity", fill_1, builder.opts().WithName("identity_1"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_1")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_2")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_3")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
std::string expected_colocation_info_1 = "loc:@identity_1";
TF_EXPECT_OK(
GetNode(*graph, "const_2")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "const_3")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "fill_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
TF_EXPECT_OK(
GetNode(*graph, "identity_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info_1);
}
TEST(ColocatePredecessorTreesPassTest, RootHasMultipleOutputs) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
Node* identity =
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity_0"));
ops::UnaryOp("Identity", identity, builder.opts().WithName("identity_1"));
ops::UnaryOp("Identity", identity, builder.opts().WithName("identity_2"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "identity_0")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
EXPECT_TRUE(HasNodeAttr(GetNode(*graph, "identity_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity_2")->def(), kClassAttr));
std::string expected_colocation_info = "loc:@identity";
const AttrValue* input_value;
TF_EXPECT_OK(
GetNode(*graph, "const_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "const_1")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(GetNode(*graph, "fill")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
TF_EXPECT_OK(
GetNode(*graph, "identity_0")->attrs().Find(kClassAttr, &input_value));
EXPECT_EQ(input_value->list().s().at(0), expected_colocation_info);
}
TEST(ColocatePredecessorTreesPassTest, ConstHasDeviceAttr) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0)));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GetNode(*graph, "const_0")->set_requested_device(kCpu1);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_1")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, ConstHasColocationInfo) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* const_0 =
ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(1.0))
.WithAttr("_class", {"loc:@fill"}));
Node* const_1 = ops::SourceOp("Const", builder.opts()
.WithName("const_1")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", const_0, const_1, builder.opts().WithName("fill"));
Node* identity =
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_TRUE(HasNodeAttr(const_0->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(const_1->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(fill->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(identity->def(), kClassAttr));
}
TEST(ColocatePredecessorTreesPassTest, InputArg) {
flags::Global().enable_tf2min_ici_weight.reset(true);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* arg_0 = ops::SourceOp("_Arg", builder.opts()
.WithName("arg_0")
.WithAttr("T", DT_INT32)
.WithAttr("index", 0));
Node* const_0 = ops::SourceOp("Const", builder.opts()
.WithName("const_0")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor(2.0)));
Node* fill =
ops::BinaryOp("Fill", arg_0, const_0, builder.opts().WithName("fill"));
ops::UnaryOp("Identity", fill, builder.opts().WithName("identity"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
GetNode(*graph, "identity")->set_requested_device(kCpu0);
GraphDef before;
graph->ToGraphDef(&before);
GraphOptimizationPassOptions options;
options.graph = &graph;
ColocatePredecessorTreesPass pass;
TF_ASSERT_OK(pass.Run(options));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "arg_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "const_0")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "fill")->def(), kClassAttr));
EXPECT_FALSE(HasNodeAttr(GetNode(*graph, "identity")->def(), kClassAttr));
}
} |
261 | #ifndef TENSORFLOW_TOOLS_PROTO_SPLITTER_MERGE_H_
#define TENSORFLOW_TOOLS_PROTO_SPLITTER_MERGE_H_
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow::tools::proto_splitter {
class Merger {
private:
enum MergerOp { MERGE, READ };
public:
static absl::Status Merge(
const std::vector<std::unique_ptr<tsl::protobuf::Message>>& chunks,
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
tsl::protobuf::Message* merged_message);
static absl::Status Read(std::string prefix,
tsl::protobuf::Message* merged_message);
static absl::Status ReadPartial(
absl::string_view prefix,
const ::tensorflow::proto_splitter::ChunkMetadata& chunk_metadata,
tsl::protobuf::Message* merged_message);
private:
static absl::Status ReadPb(const std::string& pb_file,
tsl::protobuf::Message* merged_message);
static absl::Status ReadFields(
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>&
chunks_info,
tsl::protobuf::Message* merged_message);
static absl::Status ProcessField(
const ::tensorflow::proto_splitter::ChunkedField& chunked_field,
tsl::protobuf::Message* merged_message,
const std::vector<::tensorflow::proto_splitter::ChunkInfo>& chunks_info,
const std::vector<std::unique_ptr<tsl::protobuf::Message>>& chunks,
riegeli::RecordReader<riegeli::FdReader<>>& reader, MergerOp op);
};
}
#endif
#include "tensorflow/tools/proto_splitter/merge.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "riegeli/base/object.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::tools::proto_splitter {
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetRiegeliReader;
using tools::proto_splitter::OnlyContainsPb;
using tsl::protobuf::FieldDescriptor;
using tsl::protobuf::Message;
using tsl::protobuf::Reflection;
absl::Status Merger::Merge(const std::vector<std::unique_ptr<Message>>& chunks,
const ChunkedMessage& chunked_message,
Message* merged_message) {
riegeli::RecordReader<riegeli::FdReader<>> null_reader{riegeli::kClosed};
if (chunked_message.has_chunk_index()) {
merged_message->MergeFrom(*chunks[chunked_message.chunk_index()].get());
}
for (const auto& chunked_field : chunked_message.chunked_fields()) {
absl::Status s = ProcessField(chunked_field, merged_message, {}, chunks,
null_reader, MergerOp::MERGE);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
absl::Status Merger::Read(std::string prefix, Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix));
if (only_contains_pb) {
return ReadPb(absl::StrCat(prefix, ".pb"), merged_message);
}
TF_ASSIGN_OR_RETURN(auto reader,
GetRiegeliReader(absl::StrCat(prefix, ".cpb")));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
return absl::FailedPreconditionError(
absl::StrCat("Couldn't read ChunkMetadata from chunked proto.\n",
read_metadata.status().ToString()));
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
absl::Status s =
ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message);
reader.Close();
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading and merging chunked proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return s;
}
absl::Status Merger::ReadPartial(absl::string_view prefix,
const ChunkMetadata& chunk_metadata,
Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool only_contains_pb, OnlyContainsPb(prefix));
if (only_contains_pb) {
return absl::FailedPreconditionError(
absl::StrCat("Attempting to read part of a chunked proto .cpb file, "
"but only found a regular proto: ",
prefix, ".pb"));
}
TF_ASSIGN_OR_RETURN(auto reader,
GetRiegeliReader(absl::StrCat(prefix, ".cpb")));
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
absl::Status s =
ReadFields(chunk_metadata.message(), reader, chunks_info, merged_message);
reader.Close();
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading and merging chunked proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return s;
}
absl::Status Merger::ReadPb(const std::string& pb_file,
Message* merged_message) {
uint64_t start_time = Env::Default()->NowMicros();
TF_ASSIGN_OR_RETURN(bool file_exists,
internal::FileExists(Env::Default(), pb_file));
if (!file_exists)
return absl::NotFoundError(absl::StrCat("File not found: ", pb_file));
LOG(INFO) << "Reading binary proto from " << pb_file;
auto ret = ReadBinaryProto(Env::Default(), pb_file, merged_message);
uint64_t end_time = Env::Default()->NowMicros();
LOG(INFO) << "Finished reading binary proto, took "
<< HumanReadableDuration(end_time - start_time) << ".";
return ret;
}
absl::Status Merger::ReadFields(
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
tsl::protobuf::Message* merged_message) {
if (chunked_message.has_chunk_index()) {
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
if (!merged_message->MergeFromString(chunk)) {
return absl::FailedPreconditionError(
"Couldn't merge chunk into message.");
}
}
std::vector<ChunkedField> chunked_fields(
chunked_message.chunked_fields().begin(),
chunked_message.chunked_fields().end());
absl::Status sort_status = absl::OkStatus();
std::sort(
chunked_fields.begin(), chunked_fields.end(),
[&sort_status](ChunkedField cf1, ChunkedField cf2) {
int tag_depth =
std::min(cf1.field_tag().size(), cf2.field_tag().size());
for (int depth = 0; depth < tag_depth; ++depth) {
FieldIndex tag1 = cf1.field_tag()[depth];
FieldIndex tag2 = cf2.field_tag()[depth];
if (tag1.has_field() && tag2.has_field()) {
uint32_t field1 = tag1.field();
uint32_t field2 = tag2.field();
if (field1 != field2) return field1 < field2;
} else if (tag1.has_index() && tag2.has_index()) {
uint64_t index1 = tag1.index();
uint64_t index2 = tag2.index();
if (index1 != index2) return index1 < index2;
} else if (tag1.has_map_key() && tag2.has_map_key()) {
return false;
} else {
sort_status = absl::FailedPreconditionError("Field tag mismatch");
return false;
}
}
if (cf1.field_tag().size() == cf2.field_tag().size()) {
return cf1.message().chunk_index() < cf2.message().chunk_index();
}
return cf1.field_tag().size() < cf2.field_tag().size();
});
if (!sort_status.ok()) return sort_status;
for (const auto& chunked_field : chunked_fields) {
absl::Status s = ProcessField(chunked_field, merged_message, chunks_info,
{}, reader, MergerOp::READ);
if (!s.ok()) return s;
}
return absl::OkStatus();
}
absl::Status Merger::ProcessField(
const ChunkedField& chunked_field, Message* merged_message,
const std::vector<ChunkInfo>& chunks_info,
const std::vector<std::unique_ptr<Message>>& chunks,
riegeli::RecordReader<riegeli::FdReader<>>& reader, MergerOp op) {
std::string chunk;
switch (op) {
case MergerOp::READ: {
TF_ASSIGN_OR_RETURN(
chunk, ReadChunk(reader,
chunks_info[chunked_field.message().chunk_index()]));
break;
}
case MergerOp::MERGE: {
chunk =
chunks[chunked_field.message().chunk_index()]->SerializeAsString();
break;
}
}
if (chunked_field.field_tag().empty()) {
merged_message->MergeFromString(chunk);
return absl::OkStatus();
}
uint64_t field_index;
Message* curr_message = merged_message;
TF_ASSIGN_OR_RETURN(const std::vector<Field> fields,
GetFieldTypes(chunked_field.field_tag()));
const FieldDescriptor* field_desc = nullptr;
for (const auto& field : fields) {
merged_message = curr_message;
field_desc = merged_message->GetDescriptor()->FindFieldByNumber(
std::get<int>(field.first));
auto res = GetMutableField(merged_message, field);
if (!res.ok()) {
if (!absl::IsNotFound(res.status())) return res.status();
if (field_desc->is_map()) {
TF_RETURN_IF_ERROR(
AddMapEntry(curr_message, field_desc, field.second.value()));
res = GetMutableField(curr_message, field);
} else {
curr_message->GetReflection()->AddMessage(curr_message, field_desc);
res = GetMutableField(curr_message, field);
}
}
auto [parent, mutable_field, mutable_field_index] = res.value();
if (mutable_field->is_repeated() && mutable_field_index != -1) {
field_index = mutable_field_index;
curr_message = parent->GetReflection()->MutableRepeatedMessage(
parent, mutable_field, std::max(0, mutable_field_index));
if (mutable_field->is_map()) {
field_desc = mutable_field->message_type()->FindFieldByNumber(2);
merged_message = curr_message;
curr_message = curr_message->GetReflection()->MutableMessage(
curr_message, field_desc);
}
} else if (mutable_field->type() == FieldDescriptor::Type::TYPE_MESSAGE) {
curr_message =
parent->GetReflection()->MutableMessage(parent, mutable_field);
}
}
const Reflection* reflection = merged_message->GetReflection();
if (field_desc->is_repeated()) {
auto message_callback = [&reflection, &merged_message, &field_index, &op,
&chunks, &chunked_field, &reader, &chunks_info,
&field_desc]() -> absl::Status {
for (int _ = reflection->FieldSize(*merged_message, field_desc);
_ <= field_index; _++) {
reflection->AddMessage(merged_message, field_desc);
}
switch (op) {
case MergerOp::MERGE:
TF_RETURN_IF_ERROR(
Merge(chunks, chunked_field.message(),
reflection->MutableRepeatedMessage(
merged_message, field_desc, field_index)));
break;
case MergerOp::READ:
TF_RETURN_IF_ERROR(
ReadFields(chunked_field.message(), reader, chunks_info,
reflection->MutableRepeatedMessage(
merged_message, field_desc, field_index)));
break;
default:
return absl::InternalError("Encountered unknown MergerOp.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(SetRepeatedFieldElement(
merged_message, field_desc, field_index, chunk, message_callback));
} else {
auto message_callback = [&reflection, &merged_message, &op, &chunks,
&chunked_field, &reader, &chunks_info,
&field_desc]() -> absl::Status {
switch (op) {
case MergerOp::MERGE:
TF_RETURN_IF_ERROR(
Merge(chunks, chunked_field.message(),
reflection->MutableMessage(merged_message, field_desc)));
break;
case MergerOp::READ:
TF_RETURN_IF_ERROR(ReadFields(
chunked_field.message(), reader, chunks_info,
reflection->MutableMessage(merged_message, field_desc)));
break;
default:
return absl::InternalError("Encountered unknown MergerOp.");
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(
SetFieldElement(merged_message, field_desc, chunk, message_callback));
}
return absl::OkStatus();
}
} | #include "tensorflow/tools/proto_splitter/merge.h"
#include <array>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/tools/proto_splitter/cc/test_util.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::tools::proto_splitter {
namespace {
inline constexpr std::array kDFSplitTreeChunks = {
"val: \"0\"", "val: \"010\"", "val: \"01020\"",
"val: \"0102030\"", "val: \"0102031\"", "val: \"0102032\"",
"val: \"01021\"", "val: \"0102130\"", "val: \"0102131\"",
"val: \"0102132\""};
inline constexpr std::array kBFSplitTreeChunks = {
"val: \"0\"", "val: \"010\"", "val: \"01020\"",
"val: \"01021\"", "val: \"0102030\"", "val: \"0102031\"",
"val: \"0102032\"", "val: \"0102130\"", "val: \"0102131\"",
"val: \"0102132\""};
TEST(MergeTest, TestReadRiegeliTreeDepthFirst) {
const std::string cpb_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "df-split-tree");
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestReadRiegeliTreeBreadthFirst) {
const std::string cpb_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "bf-split-tree");
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Read(cpb_path, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestMergeTreeChunksDepthFirst) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "df-split-tree");
std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks;
for (const auto& chunk : kDFSplitTreeChunks) {
::tensorflow::proto_splitter_testdata::StringNode string_node;
::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node);
std::unique_ptr<::tsl::protobuf::Message> node =
std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>(
string_node);
chunks.push_back(std::move(node));
}
std::string split_tree_metadata;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata));
::tensorflow::proto_splitter::ChunkedMessage chunked_message;
::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata,
&chunked_message);
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestMergeTreeChunksBreadthFirst) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "bf-split-tree");
std::vector<std::unique_ptr<::tsl::protobuf::Message>> chunks;
for (const auto& chunk : kBFSplitTreeChunks) {
::tensorflow::proto_splitter_testdata::StringNode string_node;
::tsl::protobuf::TextFormat::ParseFromString(chunk, &string_node);
std::unique_ptr<::tsl::protobuf::Message> node =
std::make_unique<::tensorflow::proto_splitter_testdata::StringNode>(
string_node);
chunks.push_back(std::move(node));
}
std::string split_tree_metadata;
TF_ASSERT_OK(tsl::ReadFileToString(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &split_tree_metadata));
::tensorflow::proto_splitter::ChunkedMessage chunked_message;
::tsl::protobuf::TextFormat::ParseFromString(split_tree_metadata,
&chunked_message);
::tensorflow::proto_splitter_testdata::StringNode merged_tree;
TF_ASSERT_OK(Merger::Merge(chunks, chunked_message, &merged_tree));
const std::string pbtxt_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-tree");
::tensorflow::proto_splitter_testdata::StringNode test_proto;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(pbtxt_path, ".pbtxt"), &test_proto));
ASSERT_THAT(merged_tree, EqualsProto(test_proto));
}
TEST(MergeTest, TestReadGraphDefLotsNodes) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-lots-nodes");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadGraphDefLargeNodes) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-nodes");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadGraphDefLargeConstant) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-large-constant");
GraphDef merged_graph_def;
TF_ASSERT_OK(Merger::Read(path, &merged_graph_def));
GraphDef test_graph_def;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_graph_def));
ASSERT_THAT(merged_graph_def, EqualsProto(test_graph_def));
}
TEST(MergeTest, TestReadManyField) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "many-field");
::tensorflow::proto_splitter_testdata::ManyFields merged_many_field;
TF_ASSERT_OK(Merger::Read(path, &merged_many_field));
::tensorflow::proto_splitter_testdata::ManyFields test_many_field;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_many_field));
ASSERT_THAT(merged_many_field, EqualsProto(test_many_field));
}
TEST(MergeTest, TestReadSavedModel) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "split-standard");
SavedModel merged_saved_model;
TF_ASSERT_OK(Merger::Read(path, &merged_saved_model));
SavedModel test_saved_model;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model));
ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model));
}
TEST(MergeTest, TestReadChunkedModel) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(), "cc/saved_model/testdata",
"chunked_saved_model/chunked_model/saved_model");
SavedModel merged_saved_model;
TF_ASSERT_OK(Merger::Read(path, &merged_saved_model));
SavedModel test_saved_model;
TF_ASSERT_OK(tsl::ReadTextProto(
tsl::Env::Default(), absl::StrCat(path, ".pbtxt"), &test_saved_model));
ASSERT_THAT(merged_saved_model, EqualsProto(test_saved_model));
}
TEST(MergeTest, TestReadPartial) {
const std::string path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"tools/proto_splitter/testdata", "many-field");
TF_ASSERT_OK_AND_ASSIGN(auto reader, tools::proto_splitter::GetRiegeliReader(
absl::StrCat(path, ".cpb")));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
::tensorflow::proto_splitter::ChunkMetadata chunk_metadata =
read_metadata.value();
::tensorflow::proto_splitter::ChunkMetadata partial_chunk_metadata;
partial_chunk_metadata.mutable_chunks()->CopyFrom(chunk_metadata.chunks());
partial_chunk_metadata.mutable_message()->set_chunk_index(
chunk_metadata.message().chunk_index());
proto_splitter_testdata::ManyFields merged_many_fields;
TF_ASSERT_OK(
Merger::ReadPartial(path, partial_chunk_metadata, &merged_many_fields));
ASSERT_THAT(merged_many_fields, EqualsProto(R"pb(
map_field_int64 { key: -1345 value: "map_value_-1345" }
)pb"));
}
}
} |
262 | #ifndef TENSORFLOW_TSL_PLATFORM_STACKTRACE_HANDLER_H_
#define TENSORFLOW_TSL_PLATFORM_STACKTRACE_HANDLER_H_
namespace tsl {
namespace testing {
void InstallStacktraceHandler();
}
}
#endif
#include "tsl/platform/platform.h"
#if !defined(IS_MOBILE_PLATFORM) && defined(PLATFORM_POSIX) && \
(defined(__clang__) || defined(__GNUC__))
#define TF_GENERATE_STACKTRACE
#endif
#if defined(TF_GENERATE_STACKTRACE)
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#include <string>
#include "tsl/platform/stacktrace.h"
#endif
namespace tsl {
namespace testing {
#if defined(TF_GENERATE_STACKTRACE)
inline void SafePrintStackTrace() {
static const char begin_msg[] = "*** BEGIN MANGLED STACK TRACE ***\n";
(void)!write(STDERR_FILENO, begin_msg, strlen(begin_msg));
int buffer_size = 128;
void *trace[128];
buffer_size = backtrace(trace, buffer_size);
backtrace_symbols_fd(trace, buffer_size, STDERR_FILENO);
static const char end_msg[] = "*** END MANGLED STACK TRACE ***\n\n";
(void)!write(STDERR_FILENO, end_msg, strlen(end_msg));
}
static void StacktraceHandler(int sig, siginfo_t *si, void *v) {
struct itimerval timer;
timer.it_value.tv_sec = 60;
timer.it_value.tv_usec = 0;
timer.it_interval.tv_sec = 0;
timer.it_interval.tv_usec = 0;
setitimer(ITIMER_REAL, &timer, 0);
struct sigaction sa_timeout;
memset(&sa_timeout, 0, sizeof(sa_timeout));
sa_timeout.sa_handler = SIG_DFL;
sigaction(SIGALRM, &sa_timeout, 0);
char buf[128];
snprintf(buf, sizeof(buf), "*** Received signal %d ***\n", sig);
(void)!write(STDERR_FILENO, buf, strlen(buf));
SafePrintStackTrace();
std::string stacktrace = CurrentStackTrace();
(void)!write(STDERR_FILENO, stacktrace.c_str(), stacktrace.length());
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = 0;
sa.sa_handler = SIG_DFL;
sigaction(SIGABRT, &sa, NULL);
abort();
}
void InstallStacktraceHandler() {
int handled_signals[] = {SIGSEGV, SIGABRT, SIGBUS, SIGILL, SIGFPE};
size_t array_limit = sizeof(handled_signals) / sizeof(int);
for (size_t i = 0; i < array_limit; i++) {
int sig = handled_signals[i];
struct sigaction sa;
struct sigaction osa;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_SIGINFO | SA_RESETHAND;
sa.sa_sigaction = &StacktraceHandler;
if (sigaction(sig, &sa, &osa) != 0) {
char buf[128];
snprintf(buf, sizeof(buf),
"Warning, can't install backtrace signal handler for signal %d, "
"errno:%d \n",
sig, errno);
(void)!write(STDERR_FILENO, buf, strlen(buf));
} else if (osa.sa_handler != SIG_DFL) {
char buf[128];
snprintf(buf, sizeof(buf),
"Warning, backtrace signal handler for signal %d overwrote "
"previous handler.\n",
sig);
(void)!write(STDERR_FILENO, buf, strlen(buf));
}
}
}
#else
void InstallStacktraceHandler() {}
#endif
}
} | #include <csignal>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(StacktraceHandlerTest, GeneratesStacktrace) {
EXPECT_DEATH(raise(SIGABRT), "testing::internal::UnitTestImpl::RunAllTests");
}
}
} |
263 | #ifndef TENSORFLOW_CORE_IR_TF_OP_WRAPPER_H_
#define TENSORFLOW_CORE_IR_TF_OP_WRAPPER_H_
#include <cstddef>
#include "llvm/ADT/iterator_range.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/ir/utility.h"
namespace mlir {
namespace detail {
template <typename ValueIteratorT>
class ControlRetIterator final
: public llvm::mapped_iterator_base<ControlRetIterator<ValueIteratorT>,
ValueIteratorT, Value> {
public:
using llvm::mapped_iterator_base<ControlRetIterator<ValueIteratorT>,
ValueIteratorT, Value>::mapped_iterator_base;
Value mapElement(Value value) const {
return mlir::isa<tf_type::ControlType>(value.getType())
? value
: tfg::LookupControlDependency(value);
}
};
}
namespace tfg {
class TFOp {
public:
TFOp(Operation *op = nullptr);
explicit TFOp(Operation &op) : TFOp(&op) {}
static bool classof(Operation *op) {
return isa<TFGraphDialect>(op->getDialect());
}
Operation *getOperation() { return op_; }
TFGraphDialect *getDialect() {
return cast<TFGraphDialect>(op_->getDialect());
}
std::pair<OperandRange, OperandRange> splitOperands() {
ControlType ctl_type = getDialect()->getControlType();
return SplitDataAndControlValues(op_->getOperands(), ctl_type);
}
OperandRange getNonControlOperands() { return splitOperands().first; }
OperandRange getControlOperands() { return splitOperands().second; }
Value controlRet() { return op_->getResult(op_->getNumResults() - 1); }
ResultRange getNonControlResults() {
return op_->getResults().slice(0, op_->getNumResults() - 1);
}
StringAttr nameAttr();
StringRef name();
void setName(const Twine &name);
void setName(StringAttr name);
StringAttr requestedDeviceAttr();
StringRef requestedDevice();
void setRequestedDevice(const Twine &requested_device);
void setRequestedDevice(StringAttr requested_device);
StringAttr assignedDeviceAttr();
StringRef assignedDevice();
void setAssignedDevice(const Twine &assigned_device);
void setAssignedDevice(StringAttr assigned_device);
StringAttr tpuReplicate();
void setTpuReplicate(StringAttr tpu_replicate);
StringAttr deviceAttr() {
StringAttr device = assignedDeviceAttr();
if (device) {
assert(!device.getValue().empty());
return device;
}
return requestedDeviceAttr();
}
StringRef device() {
StringAttr device_attr = deviceAttr();
if (device_attr) return device_attr.getValue();
return "";
}
Operation *operator->() { return op_; }
Operation &operator*() { return *op_; }
explicit operator bool() const { return op_; }
private:
Operation *op_;
};
template <typename ValueRangeT>
class ControlRetRange final
: public llvm::iterator_range<
::mlir::detail::ControlRetIterator<typename ValueRangeT::iterator>> {
public:
using Base = llvm::iterator_range<
::mlir::detail::ControlRetIterator<typename ValueRangeT::iterator>>;
explicit ControlRetRange(ValueRangeT c) : Base(c.begin(), c.end()) {}
Value operator[](size_t index) const {
assert(index < size() && "invalid index into value range");
return *(this->begin() + index);
}
size_t size() const { return llvm::size(*this); }
Value front() { return (*this)[0]; }
template <typename OtherT>
bool operator==(const OtherT &other) const {
return llvm::size(*this) == llvm::size(other) &&
std::equal(this->begin(), this->end(), other.begin());
}
template <typename OtherT>
bool operator!=(const OtherT &other) const {
return !(*this == other);
}
};
using OperandControlRetRange = ControlRetRange<OperandRange>;
using ValueControlRetRange = ControlRetRange<ValueRange>;
}
}
#endif
#include "tensorflow/core/ir/tf_op_wrapper.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "tensorflow/core/ir/dialect.h"
namespace mlir {
namespace tfg {
TFOp::TFOp(Operation *op) : op_(op) {
assert(!op || classof(op) && "expected a TFG op");
}
StringAttr TFOp::nameAttr() {
return op_->getAttrOfType<StringAttr>(getDialect()->getNameAttrIdentifier());
}
StringRef TFOp::name() { return nameAttr().getValue(); }
void TFOp::setName(const Twine &name) {
setName(StringAttr::get(op_->getContext(), name.str()));
}
void TFOp::setName(StringAttr name) {
op_->setAttr(getDialect()->getNameAttrIdentifier(), name);
}
StringAttr TFOp::requestedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getDeviceAttrIdentifier());
}
StringRef TFOp::requestedDevice() { return requestedDeviceAttr().getValue(); }
void TFOp::setRequestedDevice(const Twine &device) {
setRequestedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setRequestedDevice(StringAttr device) {
op_->setAttr(getDialect()->getDeviceAttrIdentifier(), device);
}
StringAttr TFOp::assignedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getAssignedDeviceAttrIdentifier());
}
StringRef TFOp::assignedDevice() { return assignedDeviceAttr().getValue(); }
void TFOp::setAssignedDevice(const Twine &device) {
setAssignedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setAssignedDevice(StringAttr device) {
op_->setAttr(getDialect()->getAssignedDeviceAttrIdentifier(), device);
}
StringAttr TFOp::tpuReplicate() {
return op_->getAttrOfType<StringAttr>("_tpu_replicate");
}
void TFOp::setTpuReplicate(StringAttr tpu_replicate) {
op_->setAttr("_tpu_replicate", tpu_replicate);
}
}
} | #include "tensorflow/core/ir/tf_op_wrapper.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TFOpWrapper, LLVMRTTI) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<i32>) {
%A, %ctlA = A : () -> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *module_op = module.get();
EXPECT_FALSE(isa<TFOp>(module_op));
EXPECT_FALSE(dyn_cast<TFOp>(module_op));
module->walk([&](TFOp op) {
EXPECT_TRUE(isa<TFOp>(op.getOperation()));
EXPECT_TRUE(dyn_cast<TFOp>(op.getOperation()));
});
}
TEST(TFOpWrapper, ControlOperands) {
const char *const code = R"mlir(
tfg.func @test(%a: tensor<i32> {tfg.name = "a"},
%b: tensor<i32> {tfg.name = "b"}) -> (tensor<i32>) {
%A, %ctlA = A(%a, %b) [%a.ctl, %b.ctl] : (tensor<i32>, tensor<i32>)
-> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
TFOp a_op;
module->walk([&](TFOp op) {
if (op->getName().getStringRef() == "tfg.A") a_op = op;
});
ASSERT_TRUE(a_op);
EXPECT_EQ(a_op.controlRet().getDefiningOp(), a_op.getOperation());
OperandRange operands = a_op->getOperands();
OperandRange data = a_op.getNonControlOperands();
OperandRange ctls = a_op.getControlOperands();
EXPECT_EQ(operands.size(), 4u);
EXPECT_EQ(data.size(), 2u);
EXPECT_EQ(ctls.size(), 2u);
OperandRange::iterator ctl_it = llvm::find_if(operands, [](Value operand) {
return mlir::isa<ControlType>(operand.getType());
});
EXPECT_NE(ctl_it, operands.end());
EXPECT_EQ(data.end(), ctl_it);
EXPECT_EQ(*ctls.begin(), *ctl_it);
}
TEST(TFOpWrapper, AttributeGetterSetters) {
MLIRContext context;
auto *tfg_dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.A");
state.addTypes(tfg_dialect->getControlType());
TFOp op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
{
EXPECT_FALSE(op.nameAttr());
StringRef a_name = "a_name";
op.setName(a_name);
EXPECT_EQ(op.name(), a_name);
StringRef another_name = "another_name";
op.setName(StringAttr::get(&context, another_name));
EXPECT_EQ(op.name(), another_name);
}
{
StringRef a_device = "/some_device";
EXPECT_FALSE(op.requestedDeviceAttr());
op.setRequestedDevice(a_device);
EXPECT_EQ(op.requestedDevice(), a_device);
StringRef another_device = "/some_other_device";
op.setRequestedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.requestedDevice(), another_device);
}
{
StringRef a_device = "/some_assigned_device";
EXPECT_FALSE(op.assignedDeviceAttr());
op.setAssignedDevice(a_device);
EXPECT_EQ(op.assignedDevice(), a_device);
StringRef another_device = "/some_other_assigned_device";
op.setAssignedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.assignedDevice(), another_device);
}
{
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
EXPECT_EQ(op.deviceAttr(), op.requestedDeviceAttr());
StringRef device = "/an_assigned_device";
op.setAssignedDevice(device);
EXPECT_EQ(op.deviceAttr(), op.assignedDeviceAttr());
EXPECT_EQ(op.device(), device);
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
op->removeAttr(tfg_dialect->getDeviceAttrIdentifier());
EXPECT_EQ(op.device(), "");
}
{
auto tpu_replicate = StringAttr::get(op->getContext(), "a_tpu");
op.setTpuReplicate(tpu_replicate);
EXPECT_EQ(op.tpuReplicate(), tpu_replicate);
}
}
TEST(TFOpWrapper, ValueControlRet) {
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Const, %ctl = Const {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Add, %ctl_2 = Add(%Const, %arg) [%ctl] {T = i32} : (tensor<i32>, tensor<i32>) -> (tensor<i32>)
return(%Add) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iterator = func.getBody().begin()->begin();
TFOp const_op = &(*iterator++);
TFOp add_op = &(*iterator);
OperandControlRetRange ret_range(add_op->getOperands());
EXPECT_EQ(ret_range[0], const_op.controlRet());
EXPECT_EQ(ret_range[1], func.getBody().begin()->getArguments()[1]);
EXPECT_EQ(ret_range[2], const_op.controlRet());
for (Value v : ret_range) EXPECT_TRUE(mlir::isa<ControlType>(v.getType()));
}
}
}
} |
264 | #ifndef TENSORFLOW_CORE_TFRT_FALLBACK_COST_RECORDER_H_
#define TENSORFLOW_CORE_TFRT_FALLBACK_COST_RECORDER_H_
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace tfrt_stub {
class CostRecorder {
public:
void RecordCost(int64_t op_key, uint64_t execution_time);
uint64_t GetCost(int64_t op_key) const;
Status WriteToFile() const;
size_t size() const;
static const char* MesuredCostPathEnvVarName() {
return "TF_TFRT_MEASURED_COST_PATH";
}
private:
mutable tensorflow::mutex op_cost_map_mutex_;
absl::flat_hash_map<int64_t, std::pair<uint64_t, uint64_t>> op_cost_map_
TF_GUARDED_BY(op_cost_map_mutex_);
};
}
}
#endif
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace tfrt_stub {
void CostRecorder::RecordCost(int64_t op_key, uint64_t execution_time) {
mutex_lock l(op_cost_map_mutex_);
op_cost_map_[op_key].first += execution_time;
op_cost_map_[op_key].second += 1;
}
uint64_t CostRecorder::GetCost(int64_t op_key) const {
tf_shared_lock l(op_cost_map_mutex_);
const auto iter = op_cost_map_.find(op_key);
if (iter == op_cost_map_.end()) return std::numeric_limits<uint32_t>::max();
const auto total_cost = iter->second.first;
const auto num_ops = iter->second.second;
auto r =
std::max(static_cast<uint64_t>(1),
static_cast<uint64_t>(total_cost / num_ops));
VLOG(2) << "Get cost for op_key=" << op_key << ", cost=" << r;
return r;
}
Status CostRecorder::WriteToFile() const {
OpCostMapProto op_cost_map_proto;
{
tf_shared_lock l(op_cost_map_mutex_);
for (const auto& [op_key, op_cost] : op_cost_map_) {
const uint64_t avg_op_cost = op_cost.first / op_cost.second;
(*op_cost_map_proto.mutable_op_cost_map())[op_key] = avg_op_cost;
}
}
std::string measured_cost_path;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(MesuredCostPathEnvVarName(), "",
&measured_cost_path));
return tensorflow::WriteTextProto(tensorflow::Env::Default(),
measured_cost_path, op_cost_map_proto);
}
size_t CostRecorder::size() const {
tf_shared_lock l(op_cost_map_mutex_);
return op_cost_map_.size();
}
}
} | #include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include <limits>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/tfrt/fallback/op_cost_map.pb.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
constexpr int64_t kTestOpKey = 1;
constexpr uint64_t kTestCost = 1234;
constexpr uint64_t kTestAvgCost = 1851;
TEST(CostRecorderTest, RecordCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, kTestCost);
EXPECT_EQ(recorder.size(), 1);
}
TEST(CostRecorderTest, GetCostTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
EXPECT_EQ(recorder.size(), 1);
EXPECT_EQ(recorder.GetCost(kTestOpKey), kTestAvgCost);
}
TEST(CostRecorderTest, GetCostDefaultValueTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
EXPECT_EQ(recorder.GetCost(kTestOpKey),
std::numeric_limits<uint32_t>::max());
}
TEST(CostRecorderTest, WriteToFileTest) {
CostRecorder recorder;
ASSERT_EQ(recorder.size(), 0);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv("TF_TFRT_MEASURED_COST_PATH", measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map_size(), 0);
}
TEST(CostRecorderTest, ProtoRecordsTest) {
CostRecorder recorder;
recorder.RecordCost(kTestOpKey, kTestCost);
recorder.RecordCost(kTestOpKey, 2 * kTestCost);
ASSERT_EQ(recorder.size(), 1);
std::string measured_cost_path;
tensorflow::Env::Default()->LocalTempFilename(&measured_cost_path);
ASSERT_EQ(setenv(CostRecorder::MesuredCostPathEnvVarName(),
measured_cost_path.c_str(), 1),
0);
TF_CHECK_OK(recorder.WriteToFile());
OpCostMapProto op_cost_map_proto;
TF_CHECK_OK(tensorflow::ReadTextProto(
tensorflow::Env::Default(), measured_cost_path, &op_cost_map_proto));
EXPECT_EQ(op_cost_map_proto.op_cost_map().find(kTestOpKey)->second,
kTestAvgCost);
}
}
}
} |
265 | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_LITE_QUANTIZE_WEIGHTS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_QUANTIZATION_LITE_QUANTIZE_WEIGHTS_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
namespace mlir {
namespace lite {
enum class BufferType { QUANTIZED_INT8, QUANTIZED_FLOAT16 };
struct CustomOpInfo {
std::vector<std::int32_t> quantizable_input_indices;
bool is_weight_only = false;
bool no_side_effect = true;
};
using BuiltinOperatorSet = absl::flat_hash_set<tflite::BuiltinOperator>;
using CustomOpMap = std::unordered_map<std::string, CustomOpInfo>;
absl::Status QuantizeWeights(
flatbuffers::FlatBufferBuilder* builder, const tflite::Model* input_model,
const tflite::TensorType& inference_type,
const absl::flat_hash_set<std::string>& denylisted_ops,
const CustomOpMap& custom_op_map,
int64_t minimum_elements_for_weights = 1024,
bool disable_per_channel = false, bool weight_only_quantization = false,
bool legacy_float_scale = false);
absl::Status QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
const tflite::Model* input_model,
int64_t weights_min_num_elements,
bool use_hybrid_evaluation = true);
absl::Status QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
const tflite::Model* input_model,
BufferType quant_type = BufferType::QUANTIZED_INT8,
bool use_updated_hybrid_scheme = true);
absl::Status QuantizeWeights(flatbuffers::FlatBufferBuilder* builder,
const tflite::Model* input_model,
int64_t weights_min_num_elements,
const CustomOpMap& custom_op_map,
bool use_updated_hybrid_scheme = true,
const BuiltinOperatorSet& op_denylist = {});
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status QuantizeWeights(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
int32_t minimum_size;
TF_RETURN_IF_ERROR(
context.GetOneInt32Parameter("minimum_size", 1024, &minimum_size));
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def, {"Const"},
[minimum_size](const NodeMatch& match,
const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& old_const_node = match.node;
if (!old_const_node.attr().count("dtype")) {
return errors::InvalidArgument("No 'dtype' attribute for Const node ",
old_const_node.name());
}
if (!old_const_node.attr().count("value")) {
return errors::InvalidArgument("No 'value' attribute for Const node ",
old_const_node.name());
}
const DataType old_dtype = old_const_node.attr().at("dtype").type();
Tensor old_tensor;
if (!old_tensor.FromProto(old_const_node.attr().at("value").tensor())) {
return errors::InvalidArgument("Decoding Tensor failed for node",
old_const_node.name());
}
const size_t num_elements = old_tensor.NumElements();
if ((old_dtype != DT_FLOAT) || (num_elements < minimum_size)) {
new_nodes->push_back(old_const_node);
return OkStatus();
}
const float* old_values = old_tensor.flat<float>().data();
float min = std::numeric_limits<float>::max();
float max = std::numeric_limits<float>::min();
for (int i = 0; i < num_elements; ++i) {
const float value = old_values[i];
min = std::min(min, value);
max = std::max(max, value);
}
min = std::min(min, 0.0f);
max = std::max(0.0f, max);
if (min == max) {
if (std::abs(min) < 0.000001f) {
max = min + 1.0f;
} else if (min > 0) {
max = 2.0f * min;
} else {
max = min / 2.0f;
}
}
Tensor quantized_tensor(DT_QUINT8, old_tensor.shape());
FloatTensorToQuantizedInPlace<quint8>(old_tensor, min, max,
&quantized_tensor);
NodeDef quantized_const_node;
quantized_const_node.set_op("Const");
quantized_const_node.set_name(old_const_node.name() +
"_quantized_const");
SetNodeAttr("dtype", DT_QUINT8, &quantized_const_node);
SetNodeTensorAttr<float>("value", quantized_tensor,
&quantized_const_node);
new_nodes->push_back(quantized_const_node);
NodeDef min_node;
min_node.set_op("Const");
min_node.set_name(old_const_node.name() + "_quantized_min");
SetNodeAttr("dtype", DT_FLOAT, &min_node);
Tensor min_tensor(DT_FLOAT, {});
min_tensor.scalar<float>()() = min;
SetNodeTensorAttr<float>("value", min_tensor, &min_node);
new_nodes->push_back(min_node);
NodeDef max_node;
max_node.set_op("Const");
max_node.set_name(old_const_node.name() + "_quantized_max");
SetNodeAttr("dtype", DT_FLOAT, &max_node);
Tensor max_tensor(DT_FLOAT, {});
max_tensor.scalar<float>()() = max;
SetNodeTensorAttr<float>("value", max_tensor, &max_node);
new_nodes->push_back(max_node);
NodeDef dequantize_node;
dequantize_node.set_op("Dequantize");
dequantize_node.set_name(old_const_node.name());
SetNodeAttr("T", DT_QUINT8, &dequantize_node);
SetNodeAttr("mode", "MIN_FIRST", &dequantize_node);
AddNodeInput(quantized_const_node.name(), &dequantize_node);
AddNodeInput(min_node.name(), &dequantize_node);
AddNodeInput(max_node.name(), &dequantize_node);
new_nodes->push_back(dequantize_node);
return OkStatus();
},
{}, output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("quantize_weights", QuantizeWeights);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status QuantizeWeights(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class QuantizeWeightsTest : public ::testing::Test {
protected:
void BuildGraphDef(const TensorShape& input_shape,
std::initializer_list<float> input_values,
const TensorShape& weight_shape,
std::initializer_list<float> weight_values,
GraphDef* original_graph_def) {
auto root = tensorflow::Scope::DisabledShapeInferenceScope();
Tensor input_data(DT_FLOAT, input_shape);
test::FillValues<float>(&input_data, input_values);
Output input_op =
ops::Const(root.WithOpName("input_op"), Input::Initializer(input_data));
Tensor weights_data(DT_FLOAT, weight_shape);
test::FillValues<float>(&weights_data, weight_values);
Output weights_op = ops::Const(root.WithOpName("weights_op"),
Input::Initializer(weights_data));
Output conv_op = ops::Conv2D(root.WithOpName("output"), input_op,
weights_op, {1, 1, 1, 1}, "VALID");
TF_ASSERT_OK(root.ToGraphDef(original_graph_def));
}
void TestQuantizeWeights() {
GraphDef original_graph_def;
BuildGraphDef({1, 1, 6, 2},
{1.0f, 4.0f, 2.0f, 5.0f, 3.0f, 6.0f, -1.0f, -4.0f, -2.0f,
-5.0f, -3.0f, -6.0f},
{1, 2, 2, 10},
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f,
3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f,
0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f,
0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f},
&original_graph_def);
TransformFuncContext context;
context.output_names = {"output"};
context.params["minimum_size"] = {"16"};
GraphDef quantized_graph_def;
TF_ASSERT_OK(
QuantizeWeights(original_graph_def, context, &quantized_graph_def));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(quantized_graph_def, &node_lookup);
EXPECT_EQ(1, node_lookup.count("input_op"));
const NodeDef* q_input_op = node_lookup.at("input_op");
EXPECT_EQ(DT_FLOAT, q_input_op->attr().at("dtype").type());
EXPECT_EQ(1, node_lookup.count("weights_op"));
const NodeDef* q_weights_op = node_lookup.at("weights_op");
EXPECT_EQ("Dequantize", q_weights_op->op());
const string& weights_const_name =
NodeNameFromInput(q_weights_op->input(0));
EXPECT_EQ(1, node_lookup.count(weights_const_name));
const NodeDef* q_weights_const = node_lookup.at(weights_const_name);
EXPECT_EQ("Const", q_weights_const->op());
EXPECT_EQ(DT_QUINT8, q_weights_const->attr().at("dtype").type());
std::unique_ptr<Session> original_session(NewSession(SessionOptions()));
TF_ASSERT_OK(original_session->Create(original_graph_def));
std::vector<Tensor> original_outputs;
TF_ASSERT_OK(original_session->Run({}, {"output"}, {}, &original_outputs));
std::unique_ptr<Session> quantized_session(NewSession(SessionOptions()));
TF_ASSERT_OK(quantized_session->Create(quantized_graph_def));
std::vector<Tensor> quantized_outputs;
TF_ASSERT_OK(
quantized_session->Run({}, {"output"}, {}, &quantized_outputs));
test::ExpectTensorNear<float>(original_outputs[0], quantized_outputs[0],
0.5);
}
};
TEST_F(QuantizeWeightsTest, TestQuantizeWeights) { TestQuantizeWeights(); }
TEST_F(QuantizeWeightsTest, RangesAlwaysIncludeZero) {
GraphDef original_graph_def;
BuildGraphDef({1, 1, 4, 4},
{-1.0f, -4.0f, -2.0f, -5.0f, -1.0f, -4.0f, -2.0f, -5.0f, -1.0f,
-4.0f, -2.0f, -5.0f, -1.0f, -4.0f, -2.0f, -5.0f},
{1, 2, 2, 10},
{1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f,
3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f,
0.1f, 0.2f, 0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f,
0.3f, 0.4f, 1.0f, 2.0f, 3.0f, 4.0f, 0.1f, 0.2f, 0.3f, 0.4f},
&original_graph_def);
TransformFuncContext context;
context.output_names = {"output"};
context.params["minimum_size"] = {"16"};
GraphDef quantized_graph_def;
TF_ASSERT_OK(
QuantizeWeights(original_graph_def, context, &quantized_graph_def));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(quantized_graph_def, &node_lookup);
auto expected_tensor = [](float value) {
Tensor tensor(DT_FLOAT, TensorShape({}));
test::FillValues<float>(&tensor, {value});
return tensor;
};
auto existing_tensor = [&node_lookup](string op) {
const NodeDef* node_def = node_lookup.at(op);
CHECK(node_def);
return GetNodeTensorAttr(*node_def, "value");
};
test::ExpectTensorNear<float>(
expected_tensor(-5.0), existing_tensor("input_op_quantized_min"), 1e-5);
test::ExpectTensorNear<float>(
expected_tensor(0.0), existing_tensor("input_op_quantized_max"), 1e-5);
test::ExpectTensorNear<float>(
expected_tensor(0.0), existing_tensor("weights_op_quantized_min"), 1e-5);
test::ExpectTensorNear<float>(
expected_tensor(4.0), existing_tensor("weights_op_quantized_max"), 1e-5);
}
}
} |
266 | #include <algorithm>
#include <cassert>
#include <cstring>
#include <sstream>
#include "phonenumbers/stringutil.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "absl/strings/match.h"
namespace i18n {
namespace phonenumbers {
using std::equal;
using std::stringstream;
string operator+(const string& s, int n) {
string result;
absl::StrAppend(&result,s,n);
return result;
}
string SimpleItoa(int n) {
return absl::StrCat(n);
}
string SimpleItoa(uint64 n) {
return absl::StrCat(n);
}
string SimpleItoa(int64 n) {
return absl::StrCat(n);
}
bool HasPrefixString(const string& s, const string& prefix) {
return absl::StartsWith(s, prefix);
}
size_t FindNth(const string& s, char c, int n) {
size_t pos = string::npos;
for (int i = 0; i < n; ++i) {
pos = s.find_first_of(c, pos + 1);
if (pos == string::npos) {
break;
}
}
return pos;
}
void SplitStringUsing(const string& s, char delimiter,
vector<string>* result) {
assert(result);
for (absl::string_view split_piece : absl::StrSplit(
s, absl::ByChar(delimiter), absl::SkipEmpty())) {
result->push_back(std::string(split_piece));
}
}
bool TryStripPrefixString(const string& in, const string& prefix, string* out) {
assert(out);
const bool has_prefix = in.compare(0, prefix.length(), prefix) == 0;
out->assign(has_prefix ? in.substr(prefix.length()) : in);
return has_prefix;
}
bool HasSuffixString(const string& s, const string& suffix) {
return absl::EndsWith(s, suffix);
}
template <typename T>
void GenericAtoi(const string& s, T* out) {
if (!absl::SimpleAtoi(s, out))
*out = 0;
}
void safe_strto32(const string& s, int32 *n) {
GenericAtoi(s, n);
}
void safe_strtou64(const string& s, uint64 *n) {
GenericAtoi(s, n);
}
void safe_strto64(const string& s, int64* n) {
GenericAtoi(s, n);
}
void strrmm(string* s, const string& chars) {
for (string::iterator it = s->begin(); it != s->end(); ) {
const char current_char = *it;
if (chars.find(current_char) != string::npos) {
it = s->erase(it);
} else {
++it;
}
}
}
int GlobalReplaceSubstring(const string& substring,
const string& replacement,
string* s) {
return absl::StrReplaceAll({{substring, replacement}}, s);;
}
StringHolder::StringHolder(const string& s)
: absl::AlphaNum(s)
{}
StringHolder::StringHolder(const char* cp)
: absl::AlphaNum(cp)
{}
StringHolder::StringHolder(uint64 n)
: absl::AlphaNum(n)
{}
StringHolder::~StringHolder() {}
string& operator+=(string& lhs, const StringHolder& rhs) {
absl::string_view s = rhs.GetString();;
if (s.size() != 0) {
lhs += s.data();
} else {
const char* const cs = rhs.GetCString();
if (cs)
lhs.append(cs, rhs.Length());
}
return lhs;
}
string StrCat(const StringHolder& s1, const StringHolder& s2) {
return absl::StrCat(s1, s2);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3) {
return absl::StrCat(s1, s2, s3);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4) {
return absl::StrCat(s1, s2, s3, s4);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5) {
return absl::StrCat(s1, s2, s3, s4, s5);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6) {
return absl::StrCat(s1, s2, s3, s4, s5, s6);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8) {
string result;
result.reserve(s1.Length() + s2.Length() + s3.Length() + s4.Length() +
s5.Length() + s6.Length() + s7.Length() + s8.Length() + 1);
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8,
const StringHolder& s9) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8, s9);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8,
const StringHolder& s9, const StringHolder& s10,
const StringHolder& s11) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8,
const StringHolder& s9, const StringHolder& s10,
const StringHolder& s11, const StringHolder& s12) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8,
const StringHolder& s9, const StringHolder& s10,
const StringHolder& s11, const StringHolder& s12,
const StringHolder& s13) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12,
s13);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8,
const StringHolder& s9, const StringHolder& s10,
const StringHolder& s11, const StringHolder& s12,
const StringHolder& s13, const StringHolder& s14) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12,
s13, s14);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8,
const StringHolder& s9, const StringHolder& s10,
const StringHolder& s11, const StringHolder& s12,
const StringHolder& s13, const StringHolder& s14,
const StringHolder& s15) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12,
s13, s14, s15);
}
string StrCat(const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5, const StringHolder& s6,
const StringHolder& s7, const StringHolder& s8,
const StringHolder& s9, const StringHolder& s10,
const StringHolder& s11, const StringHolder& s12,
const StringHolder& s13, const StringHolder& s14,
const StringHolder& s15, const StringHolder& s16) {
return absl::StrCat(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12,
s13, s14, s15, s16);
}
void StrAppend(string* dest, const StringHolder& s1) {
absl::StrAppend(dest, s1);
}
void StrAppend(string* dest, const StringHolder& s1, const StringHolder& s2) {
absl::StrAppend(dest, s1, s2);
}
void StrAppend(string* dest, const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3) {
absl::StrAppend(dest, s1, s2, s3);
}
void StrAppend(string* dest, const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4) {
absl::StrAppend(dest, s1, s2, s3, s4);
}
void StrAppend(string* dest, const StringHolder& s1, const StringHolder& s2,
const StringHolder& s3, const StringHolder& s4,
const StringHolder& s5) {
absl::StrAppend(dest, s1, s2, s3, s4, s5);
}
}
} | #include "phonenumbers/stringutil.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
using std::string;
using std::vector;
namespace i18n {
namespace phonenumbers {
TEST(StringUtilTest, OperatorPlus) {
EXPECT_EQ("hello10", string("hello") + 10);
}
TEST(StringUtilTest, SimpleItoa) {
EXPECT_EQ("10", SimpleItoa(10));
}
TEST(StringUtilTest, HasPrefixString) {
EXPECT_TRUE(HasPrefixString("hello world", "hello"));
EXPECT_FALSE(HasPrefixString("hello world", "hellO"));
}
TEST(StringUtilTest, FindNthWithEmptyString) {
EXPECT_EQ(string::npos, FindNth("", 'a', 1));
}
TEST(StringUtilTest, FindNthWithNNegative) {
EXPECT_EQ(string::npos, FindNth("hello world", 'o', -1));
}
TEST(StringUtilTest, FindNthWithNTooHigh) {
EXPECT_EQ(string::npos, FindNth("hello world", 'o', 3));
}
TEST(StringUtilTest, FindNth) {
EXPECT_EQ(7U, FindNth("hello world", 'o', 2));
}
TEST(StringUtilTest, SplitStringUsingWithEmptyString) {
vector<string> result;
SplitStringUsing("", ':', &result);
EXPECT_EQ(0U, result.size());
}
TEST(StringUtilTest, SplitStringUsing) {
vector<string> result;
SplitStringUsing(":hello:world:", ':', &result);
EXPECT_EQ(2U, result.size());
EXPECT_EQ("hello", result[0]);
EXPECT_EQ("world", result[1]);
}
TEST(StringUtilTest, SplitStringUsingIgnoresEmptyToken) {
vector<string> result;
SplitStringUsing("hello::world", ':', &result);
EXPECT_EQ(2U, result.size());
EXPECT_EQ("hello", result[0]);
EXPECT_EQ("world", result[1]);
}
TEST(StringUtilTest, TryStripPrefixString) {
string s;
EXPECT_TRUE(TryStripPrefixString("hello world", "hello", &s));
EXPECT_EQ(" world", s);
s.clear();
EXPECT_FALSE(TryStripPrefixString("hello world", "helloa", &s));
s.clear();
EXPECT_TRUE(TryStripPrefixString("hello world", "", &s));
EXPECT_EQ("hello world", s);
s.clear();
EXPECT_FALSE(TryStripPrefixString("", "hello", &s));
s.clear();
}
TEST(StringUtilTest, HasSuffixString) {
EXPECT_TRUE(HasSuffixString("hello world", "hello world"));
EXPECT_TRUE(HasSuffixString("hello world", "world"));
EXPECT_FALSE(HasSuffixString("hello world", "world!"));
EXPECT_TRUE(HasSuffixString("hello world", ""));
EXPECT_FALSE(HasSuffixString("", "hello"));
}
TEST(StringUtilTest, safe_strto32) {
int32 n;
safe_strto32("0", &n);
EXPECT_EQ(0, n);
safe_strto32("16", &n);
EXPECT_EQ(16, n);
safe_strto32("2147483647", &n);
EXPECT_EQ(2147483647, n);
safe_strto32("-2147483648", &n);
EXPECT_EQ(-2147483648LL, n);
}
TEST(StringUtilTest, safe_strtou64) {
uint64 n;
safe_strtou64("0", &n);
EXPECT_EQ(0U, n);
safe_strtou64("16", &n);
EXPECT_EQ(16U, n);
safe_strtou64("18446744073709551615", &n);
EXPECT_EQ(18446744073709551615ULL, n);
}
TEST(StringUtilTest, strrmm) {
string input("hello");
strrmm(&input, "");
EXPECT_EQ(input, input);
string empty;
strrmm(&empty, "");
EXPECT_EQ("", empty);
strrmm(&empty, "aa");
EXPECT_EQ("", empty);
strrmm(&input, "h");
EXPECT_EQ("ello", input);
strrmm(&input, "el");
EXPECT_EQ("o", input);
}
TEST(StringUtilTest, GlobalReplaceSubstring) {
string input("hello");
EXPECT_EQ(0, GlobalReplaceSubstring("aaa", "", &input));
EXPECT_EQ("hello", input);
EXPECT_EQ(0, GlobalReplaceSubstring("", "aaa", &input));
EXPECT_EQ("hello", input);
EXPECT_EQ(0, GlobalReplaceSubstring("", "", &input));
EXPECT_EQ("hello", input);
EXPECT_EQ(0, GlobalReplaceSubstring("aaa", "bbb", &input));
EXPECT_EQ("hello", input);
EXPECT_EQ(1, GlobalReplaceSubstring("o", "o world", &input));
ASSERT_EQ("hello world", input);
EXPECT_EQ(2, GlobalReplaceSubstring("o", "O", &input));
EXPECT_EQ("hellO wOrld", input);
}
TEST(StringUtilTest, StringHolder) {
static const char cstring[] = "aaa";
StringHolder sh1(cstring);
EXPECT_EQ(cstring, sh1.GetCString());
string s = "aaa";
StringHolder sh2(s);
EXPECT_EQ(cstring, sh2.GetString());
string s2 = "hello";
StringHolder sh3(s2);
EXPECT_EQ(5U, sh3.Length());
StringHolder sh4(42);
static const char cstring2[] = "42";;
EXPECT_EQ(cstring2, sh4.GetString());
}
TEST(StringUtilTest, OperatorPlusEquals) {
string s = "h";
static const char append1[] = "ello";
s += StringHolder(append1);
EXPECT_EQ("hello", s);
s = "h";
string append2 = "ello";
s += StringHolder(append2);
EXPECT_EQ("hello", s);
}
TEST(StringUtilTest, StrCat) {
string s;
s = StrCat("a", "b");
EXPECT_EQ("ab", s);
s = StrCat("a", "b", "c");
EXPECT_EQ("abc", s);
s = StrCat("a", "b", "c", "d");
EXPECT_EQ("abcd", s);
s = StrCat("a", "b", "c", "d", "e");
EXPECT_EQ("abcde", s);
s = StrCat("a", "b", "c", "d", "e", "f");
EXPECT_EQ("abcdef", s);
s = StrCat("a", "b", "c", "d", "e", "f", "g");
EXPECT_EQ("abcdefg", s);
s = StrCat("a", "b", "c", "d", "e", "f", "g", "h");
EXPECT_EQ("abcdefgh", s);
s = StrCat("a", "b", "c", "d", "e", "f", "g", "h", "i");
EXPECT_EQ("abcdefghi", s);
s = StrCat("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k");
EXPECT_EQ("abcdefghijk", s);
}
TEST(StringUtilTest, StrAppend) {
string s;
StrAppend(&s, "a");
ASSERT_EQ("a", s);
StrAppend(&s, "b", "c");
ASSERT_EQ("abc", s);
StrAppend(&s, "d", "e", "f");
ASSERT_EQ("abcdef", s);
StrAppend(&s, "g", "h", "i", "j");
ASSERT_EQ("abcdefghij", s);
StrAppend(&s, "k", "l", "m", "n", "o");
ASSERT_EQ("abcdefghijklmno", s);
StrAppend(&s, 42);
ASSERT_EQ("abcdefghijklmno42", s);
}
}
} |
267 | #ifndef TENSORFLOW_CORE_GRAPH_TENSOR_ID_H_
#define TENSORFLOW_CORE_GRAPH_TENSOR_ID_H_
#include <string>
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
struct SafeTensorId;
struct TensorId : public std::pair<StringPiece, int> {
typedef std::pair<StringPiece, int> Base;
using Base::pair;
TensorId() : Base() {}
TensorId(const SafeTensorId& id);
const StringPiece node() const { return first; }
int index() const { return second; }
string ToString() const {
if (second == Graph::kControlSlot) return strings::StrCat("^", first);
return strings::StrCat(first, ":", second);
}
struct Hasher {
public:
std::size_t operator()(const TensorId& x) const {
return Hash32(x.first.data(), x.first.size(), x.second);
}
};
};
TensorId ParseTensorName(const string& name);
TensorId ParseTensorName(StringPiece name);
bool IsTensorIdControl(const TensorId& tensor_id);
struct SafeTensorId : public std::pair<string, int> {
typedef std::pair<string, int> Base;
SafeTensorId() : Base() {}
SafeTensorId(const string& str, int idx) : Base(str, idx) {}
SafeTensorId(const TensorId& id);
const string& node() const { return first; }
int index() const { return second; }
string ToString() const {
if (second == Graph::kControlSlot) return strings::StrCat("^", first);
return strings::StrCat(first, ":", second);
}
struct Hasher {
public:
std::size_t operator()(const TensorId& x) const {
return Hash32(x.first.data(), x.first.size(), x.second);
}
};
};
}
#endif
#include "tensorflow/core/graph/tensor_id.h"
#include <string>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
TensorId::TensorId(const SafeTensorId& id) : TensorId(id.first, id.second) {}
SafeTensorId::SafeTensorId(const TensorId& id)
: SafeTensorId(string(id.first), id.second) {}
TensorId ParseTensorName(const string& name) {
return ParseTensorName(StringPiece(name.data(), name.size()));
}
TensorId ParseTensorName(StringPiece name) {
const char* base = name.data();
const char* p = base + name.size() - 1;
unsigned int index = 0;
unsigned int mul = 1;
while (p > base && (*p >= '0' && *p <= '9')) {
index += ((*p - '0') * mul);
mul *= 10;
p--;
}
TensorId id;
if (p > base && *p == ':' && mul > 1) {
id.first = StringPiece(base, p - base);
id.second = index;
} else if (absl::StartsWith(name, "^")) {
id.first = StringPiece(base + 1);
id.second = Graph::kControlSlot;
} else {
id.first = name;
id.second = 0;
}
return id;
}
bool IsTensorIdControl(const TensorId& tensor_id) {
return tensor_id.index() == Graph::kControlSlot;
}
} | #include "tensorflow/core/graph/tensor_id.h"
#include <vector>
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
string ParseHelper(const string& n) { return ParseTensorName(n).ToString(); }
TEST(TensorIdTest, ParseTensorName) {
EXPECT_EQ(ParseHelper("W1"), "W1:0");
EXPECT_EQ(ParseHelper("W1:0"), "W1:0");
EXPECT_EQ(ParseHelper("weights:0"), "weights:0");
EXPECT_EQ(ParseHelper("W1:1"), "W1:1");
EXPECT_EQ(ParseHelper("W1:17"), "W1:17");
EXPECT_EQ(ParseHelper("xyz1_17"), "xyz1_17:0");
EXPECT_EQ(ParseHelper("^foo"), "^foo");
}
uint32 Skewed(random::SimplePhilox* rnd, int max_log) {
const uint32 space = 1 << (rnd->Rand32() % (max_log + 1));
return rnd->Rand32() % space;
}
void BM_ParseTensorName(::testing::benchmark::State& state) {
const int arg = state.range(0);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
std::vector<string> names;
for (int i = 0; i < 100; i++) {
string name;
switch (arg) {
case 0: {
size_t len = Skewed(&rnd, 4);
while (name.size() < len) {
name += rnd.OneIn(4) ? '0' : 'a';
}
if (rnd.OneIn(3)) {
strings::StrAppend(&name, ":", rnd.Uniform(12));
}
break;
}
case 1:
name = "W1";
break;
case 2:
name = "t0003";
break;
case 3:
name = "weights";
break;
case 4:
name = "weights:17";
break;
case 5:
name = "^weights";
break;
default:
LOG(FATAL) << "Unexpected arg";
break;
}
names.push_back(name);
}
TensorId id;
int index = 0;
int sum = 0;
for (auto s : state) {
id = ParseTensorName(names[index++ % names.size()]);
sum += id.second;
}
VLOG(2) << sum;
}
BENCHMARK(BM_ParseTensorName)->Arg(0)->Arg(1)->Arg(2)->Arg(3)->Arg(4)->Arg(5);
TEST(TensorIdTest, IsTensorIdControl) {
string input = "^foo";
TensorId tensor_id = ParseTensorName(input);
EXPECT_TRUE(IsTensorIdControl(tensor_id));
input = "foo";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
input = "foo:2";
tensor_id = ParseTensorName(input);
EXPECT_FALSE(IsTensorIdControl(tensor_id));
}
TEST(TensorIdTest, PortZero) {
for (string input : {"foo", "foo:0"}) {
TensorId tensor_id = ParseTensorName(input);
EXPECT_EQ("foo", tensor_id.node());
EXPECT_EQ(0, tensor_id.index());
}
}
}
} |
268 | #ifndef TENSORFLOW_TSL_PLATFORM_BFLOAT16_H_
#define TENSORFLOW_TSL_PLATFORM_BFLOAT16_H_
#include "Eigen/Core"
namespace tsl {
typedef Eigen::bfloat16 bfloat16;
}
#endif
#include "tensorflow/core/framework/bfloat16.h"
#include "Eigen/Core"
namespace tensorflow {
void RoundFloatToBFloat16(const float* src, bfloat16* dst, int64_t size) {
Eigen::Map<const Eigen::ArrayXf> src_eigen(src, size);
Eigen::Map<Eigen::Array<bfloat16, Eigen::Dynamic, 1>> dst_eigen(dst, size);
dst_eigen = src_eigen.cast<bfloat16>();
}
void FloatToBFloat16(const float* src, bfloat16* dst, int64_t size) {
for (; size != 0; src++, dst++, size--) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
memcpy(dst, src, sizeof(bfloat16));
#else
memcpy(
dst,
reinterpret_cast<const char*>(src) + sizeof(float) - sizeof(bfloat16),
sizeof(bfloat16));
#endif
}
}
void BFloat16ToFloat(const bfloat16* src, float* dst, int64_t size) {
Eigen::Map<const Eigen::Array<bfloat16, Eigen::Dynamic, 1>> src_eigen(src,
size);
Eigen::Map<Eigen::ArrayXf> dst_eigen(dst, size);
dst_eigen = src_eigen.cast<float>();
}
} | #include <cmath>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/array2d.h"
#include "xla/array4d.h"
#include "xla/client/lib/arithmetic.h"
#include "xla/client/local_client.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/reference_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class Bfloat16Test : public ClientLibraryTestBase {
protected:
const ErrorSpec error_spec_{0.001, 0.001};
};
XLA_TEST_F(Bfloat16Test, ScalarOperation) {
XlaBuilder builder(TestName());
auto x = ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(2.0f));
auto y = ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(1.0f));
Add(x, y);
ComputeAndCompareR0<bfloat16>(&builder, static_cast<bfloat16>(3.0f), {},
error_spec_);
}
XLA_TEST_F(Bfloat16Test, LogOperation) {
XlaBuilder builder(TestName());
auto x = ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(4.0f));
Log(x);
ComputeAndCompareR0<bfloat16>(&builder, static_cast<bfloat16>(1.387f), {},
ErrorSpec(0.01, 0.01));
}
XLA_TEST_F(Bfloat16Test, NegateScalarF16) {
XlaBuilder builder(TestName());
Neg(ConstantR0<bfloat16>(&builder, static_cast<bfloat16>(2.1f)));
ComputeAndCompareR0<bfloat16>(&builder, static_cast<bfloat16>(-2.1f), {},
error_spec_);
}
XLA_TEST_F(Bfloat16Test, DISABLED_ON_INTERPRETER(BatchNormTraining)) {
const int kFeatureIndex = 2;
XlaBuilder builder(TestName());
auto operand = ConstantR4FromArray4D<bfloat16>(
&builder,
{{{{static_cast<bfloat16>(1.f)}, {static_cast<bfloat16>(2.f)}},
{{static_cast<bfloat16>(3.f)}, {static_cast<bfloat16>(4.f)}}},
{{{static_cast<bfloat16>(5.f)}, {static_cast<bfloat16>(6.f)}},
{{static_cast<bfloat16>(7.f)}, {static_cast<bfloat16>(8.f)}}}});
auto scale = ConstantR1<bfloat16>(
&builder, {static_cast<bfloat16>(2.0f), static_cast<bfloat16>(3.0f)});
auto offset = ConstantR1<bfloat16>(
&builder, {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(2.0f)});
BatchNormTraining(operand, scale, offset, 0.001, kFeatureIndex);
auto expected = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR4<bfloat16>(
{{{{static_cast<bfloat16>(-1.6875f)},
{static_cast<bfloat16>(-2.04f)}},
{{static_cast<bfloat16>(0.105f)}, {static_cast<bfloat16>(0.66f)}}},
{{{static_cast<bfloat16>(1.89f)}, {static_cast<bfloat16>(3.35f)}},
{{static_cast<bfloat16>(3.7f)}, {static_cast<bfloat16>(6.04f)}}}}),
LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(4), static_cast<bfloat16>(5)}),
LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(5), static_cast<bfloat16>(5)})});
ComputeAndCompareTuple(&builder, expected, {}, ErrorSpec(0.01, 0.02));
}
XLA_TEST_F(Bfloat16Test, DISABLED_ON_INTERPRETER(BatchNormGrad)) {
const int kFeatureIndex = 2;
XlaBuilder builder(TestName());
auto operand = ConstantR4FromArray4D<bfloat16>(
&builder, Array4D<bfloat16>(2, 2, 2, 1, static_cast<bfloat16>(0.0f)));
auto scale = ConstantR1<bfloat16>(
&builder, {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(1.0f)});
auto mean = ConstantR1<bfloat16>(
&builder, {static_cast<bfloat16>(0.0f), static_cast<bfloat16>(0.0f)});
auto var = ConstantR1<bfloat16>(
&builder, {static_cast<bfloat16>(1.0f), static_cast<bfloat16>(1.0f)});
auto grad_output = ConstantR4FromArray4D<bfloat16>(
&builder,
{{{{static_cast<bfloat16>(1.f)}, {static_cast<bfloat16>(2.f)}},
{{static_cast<bfloat16>(3.f)}, {static_cast<bfloat16>(4.f)}}},
{{{static_cast<bfloat16>(5.f)}, {static_cast<bfloat16>(6.f)}},
{{static_cast<bfloat16>(7.f)}, {static_cast<bfloat16>(8.f)}}}});
BatchNormGrad(operand, scale, mean, var, grad_output,
0.0, kFeatureIndex);
auto expected = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR4<bfloat16>(
{{{{static_cast<bfloat16>(-3.f)}, {static_cast<bfloat16>(-3.f)}},
{{static_cast<bfloat16>(-1.f)}, {static_cast<bfloat16>(-1.f)}}},
{{{static_cast<bfloat16>(1.f)}, {static_cast<bfloat16>(1.f)}},
{{static_cast<bfloat16>(3.f)}, {static_cast<bfloat16>(3.f)}}}}),
LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(0), static_cast<bfloat16>(0)}),
LiteralUtil::CreateR1<bfloat16>(
{static_cast<bfloat16>(16), static_cast<bfloat16>(20)})});
ComputeAndCompareTuple(&builder, expected, {}, ErrorSpec(0.01));
}
}
} |
269 | #ifndef TENSORFLOW_CORE_GRAPPLER_UTILS_CANONICALIZER_H_
#define TENSORFLOW_CORE_GRAPPLER_UTILS_CANONICALIZER_H_
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
void CanonicalizeNode(NodeDef* node);
void CanonicalizeGraph(GraphDef* graph);
void CompressConstants(GraphDef* graph);
}
}
#endif
#include "tensorflow/core/grappler/utils/canonicalizer.h"
#include <algorithm>
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
void CanonicalizeNode(NodeDef* node) {
if (node->input_size() < 2) return;
int index = 0;
for (; index < node->input_size(); ++index) {
if (IsControlInput(node->input(index))) {
break;
}
}
auto* input = node->mutable_input();
if (IsCommutative(*node) && index > 0) {
std::sort(input->begin(), input->begin() + index);
}
if (index < node->input_size()) {
std::sort(input->begin() + index, input->end());
input->erase(std::unique(input->begin() + index, input->end()),
input->end());
}
}
void CanonicalizeGraph(GraphDef* graph) {
for (int i = 0; i < graph->node_size(); ++i) {
CanonicalizeNode(graph->mutable_node(i));
}
}
void CompressConstants(GraphDef* graph) {
for (int i = 0; i < graph->node_size(); ++i) {
NodeDef* node = graph->mutable_node(i);
if ((IsConstant(*node) || IsHostConstant(*node)) &&
HasNodeAttr(*node, "value")) {
AttrValue& attr_val = (*node->mutable_attr())["value"];
if (attr_val.has_tensor()) {
tensor::CompressTensorProtoInPlace(attr_val.mutable_tensor());
}
}
}
}
}
} | #include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
NodeDef MakeNode(const string& op) {
NodeDef node;
node.set_name("node");
node.set_op(op);
*node.add_input() = "b";
*node.add_input() = "a";
*node.add_input() = "^z";
*node.add_input() = "^y";
*node.add_input() = "^x";
*node.add_input() = "^z";
return node;
}
void Verify(const NodeDef& node) {
EXPECT_EQ(node.name(), "node");
ASSERT_EQ(node.input_size(), 5);
if (node.op() == "Div") {
EXPECT_EQ(node.input(0), "b");
EXPECT_EQ(node.input(1), "a");
} else {
EXPECT_EQ(node.input(0), "a");
EXPECT_EQ(node.input(1), "b");
}
EXPECT_EQ(node.input(2), "^x");
EXPECT_EQ(node.input(3), "^y");
EXPECT_EQ(node.input(4), "^z");
}
TEST(CanonicalizeNode, NonCommutative) {
NodeDef node = MakeNode("Div");
CanonicalizeNode(&node);
Verify(node);
}
TEST(CanonicalizeNode, Commutative) {
NodeDef node = MakeNode("Mul");
CanonicalizeNode(&node);
Verify(node);
}
TEST(CanonicalizeGraph, Simple) {
GraphDef graph;
*graph.add_node() = MakeNode("Div");
*graph.add_node() = MakeNode("Mul");
CanonicalizeGraph(&graph);
for (auto node : graph.node()) {
Verify(node);
}
}
}
}
} |
270 | #ifndef I18N_ADDRESSINPUT_ADDRESS_FORMATTER_H_
#define I18N_ADDRESSINPUT_ADDRESS_FORMATTER_H_
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
struct AddressData;
void GetFormattedNationalAddress(
const AddressData& address_data, std::vector<std::string>* lines);
void GetFormattedNationalAddressLine(
const AddressData& address_data, std::string* line);
void GetStreetAddressLinesAsSingleLine(
const AddressData& address_data, std::string* line);
}
}
#endif
#include <libaddressinput/address_formatter.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <functional>
#include <string>
#include <vector>
#include "format_element.h"
#include "language.h"
#include "region_data_constants.h"
#include "rule.h"
#include "util/cctype_tolower_equal.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
namespace {
const char kCommaSeparator[] = ", ";
const char kSpaceSeparator[] = " ";
const char kArabicCommaSeparator[] = "، ";
const char kLanguagesThatUseSpace[][3] = {
"th",
"ko",
};
const char kLanguagesThatHaveNoSeparator[][3] = {
"ja",
"zh",
};
const char kLanguagesThatUseAnArabicComma[][3] = {
"ar",
"fa",
"ku",
"ps",
"ur",
};
std::string GetLineSeparatorForLanguage(const std::string& language_tag) {
Language address_language(language_tag);
if (address_language.has_latin_script) {
return kCommaSeparator;
}
const std::string& base_language = address_language.base;
using std::placeholders::_1;
if (std::find_if(kLanguagesThatUseSpace,
kLanguagesThatUseSpace + size(kLanguagesThatUseSpace),
std::bind(&EqualToTolowerString, _1, base_language)) !=
kLanguagesThatUseSpace + size(kLanguagesThatUseSpace)) {
return kSpaceSeparator;
} else if (std::find_if(
kLanguagesThatHaveNoSeparator,
kLanguagesThatHaveNoSeparator +
size(kLanguagesThatHaveNoSeparator),
std::bind(&EqualToTolowerString, _1, base_language)) !=
kLanguagesThatHaveNoSeparator +
size(kLanguagesThatHaveNoSeparator)) {
return "";
} else if (std::find_if(
kLanguagesThatUseAnArabicComma,
kLanguagesThatUseAnArabicComma +
size(kLanguagesThatUseAnArabicComma),
std::bind(&EqualToTolowerString, _1, base_language)) !=
kLanguagesThatUseAnArabicComma +
size(kLanguagesThatUseAnArabicComma)) {
return kArabicCommaSeparator;
}
return kCommaSeparator;
}
void CombineLinesForLanguage(const std::vector<std::string>& lines,
const std::string& language_tag,
std::string* line) {
line->clear();
std::string separator = GetLineSeparatorForLanguage(language_tag);
for (auto it = lines.begin(); it != lines.end(); ++it) {
if (it != lines.begin()) {
line->append(separator);
}
line->append(*it);
}
}
}
void GetFormattedNationalAddress(
const AddressData& address_data, std::vector<std::string>* lines) {
assert(lines != nullptr);
lines->clear();
Rule rule;
rule.CopyFrom(Rule::GetDefault());
rule.ParseSerializedRule(
RegionDataConstants::GetRegionData(address_data.region_code));
Language language(address_data.language_code);
const std::vector<FormatElement>& format =
language.has_latin_script && !rule.GetLatinFormat().empty()
? rule.GetLatinFormat()
: rule.GetFormat();
std::vector<FormatElement> pruned_format;
for (auto element_it = format.begin();
element_it != format.end();
++element_it) {
if (element_it->IsNewline() ||
(element_it->IsField() &&
!address_data.IsFieldEmpty(element_it->GetField())) ||
(!element_it->IsField() &&
(element_it + 1 == format.end() ||
!(element_it + 1)->IsField() ||
!address_data.IsFieldEmpty((element_it + 1)->GetField())) &&
(element_it == format.begin() ||
!(element_it - 1)->IsField() ||
(!pruned_format.empty() && pruned_format.back().IsField())))) {
pruned_format.push_back(*element_it);
}
}
std::string line;
for (const auto& element : pruned_format) {
if (element.IsNewline()) {
if (!line.empty()) {
lines->push_back(line);
line.clear();
}
} else if (element.IsField()) {
AddressField field = element.GetField();
if (field == STREET_ADDRESS) {
if (!address_data.IsFieldEmpty(field)) {
line.append(address_data.address_line.front());
if (address_data.address_line.size() > 1U) {
lines->push_back(line);
line.clear();
const auto last_element_iterator =
address_data.address_line.begin() +
address_data.address_line.size() - 1;
lines->insert(lines->end(), address_data.address_line.begin() + 1,
last_element_iterator);
line.append(*last_element_iterator);
}
}
} else {
line.append(address_data.GetFieldValue(field));
}
} else {
line.append(element.GetLiteral());
}
}
if (!line.empty()) {
lines->push_back(line);
}
}
void GetFormattedNationalAddressLine(
const AddressData& address_data, std::string* line) {
std::vector<std::string> address_lines;
GetFormattedNationalAddress(address_data, &address_lines);
CombineLinesForLanguage(address_lines, address_data.language_code, line);
}
void GetStreetAddressLinesAsSingleLine(
const AddressData& address_data, std::string* line) {
CombineLinesForLanguage(
address_data.address_line, address_data.language_code, line);
}
}
} | #include <libaddressinput/address_formatter.h>
#include <libaddressinput/address_data.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::GetFormattedNationalAddress;
using i18n::addressinput::GetFormattedNationalAddressLine;
using i18n::addressinput::GetStreetAddressLinesAsSingleLine;
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_EmptyAddress) {
const AddressData address;
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_TRUE(result.empty());
}
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_1Line) {
AddressData address{
.region_code = "US",
.address_line{"Line 1"},
};
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1", result);
address.language_code = "en";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1", result);
address.language_code = "zh-Hans";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1", result);
}
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_2Lines) {
AddressData address{
.region_code = "US",
.address_line{
"Line 1",
"Line 2",
},
};
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1, Line 2", result);
address.language_code = "en";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1, Line 2", result);
address.language_code = "zh-Hans";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1Line 2", result);
address.language_code = "ko";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1 Line 2", result);
address.language_code = "ar";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1، Line 2", result);
}
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_5Lines) {
const AddressData address{
.region_code = "US",
.address_line{
"Line 1",
"Line 2",
"Line 3",
"Line 4",
"Line 5",
},
.language_code = "fr",
};
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ(result, "Line 1, Line 2, Line 3, Line 4, Line 5");
}
TEST(AddressFormatterTest, GetFormattedNationalAddressLocalLanguage) {
AddressData address{
.region_code = "NZ",
.address_line{
"Rotopapa",
"Irwell 3RD",
},
.locality = "Leeston",
.postal_code = "8704",
};
const std::vector<std::string> expected{
"Rotopapa",
"Irwell 3RD",
"Leeston 8704",
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.language_code = "en-Latn-CN";
lines.clear();
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
std::string one_line;
GetFormattedNationalAddressLine(address, &one_line);
EXPECT_EQ("Rotopapa, Irwell 3RD, Leeston 8704", one_line);
}
TEST(AddressFormatterTest, GetFormattedNationalAddressLatinFormat) {
static const char kTaiwanCity[] = "大安區";
static const char kTaiwanAdmin[] = "台北市";
static const char kTaiwanStreetLine[] = "台灣信義路三段33號";
static const char kPostalCode[] = "106";
const AddressData address{
.region_code = "TW",
.address_line{kTaiwanStreetLine},
.administrative_area = kTaiwanAdmin,
.locality = kTaiwanCity,
.postal_code = kPostalCode,
.language_code = "zh-Hant",
};
const std::vector<std::string> expected{
kPostalCode,
std::string(kTaiwanAdmin).append(kTaiwanCity),
kTaiwanStreetLine,
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
std::string one_line;
GetFormattedNationalAddressLine(address, &one_line);
EXPECT_EQ(std::string(kPostalCode)
.append(kTaiwanAdmin)
.append(kTaiwanCity)
.append(kTaiwanStreetLine),
one_line);
const AddressData latin_address{
.region_code = "TW",
.address_line{"No. 33, Section 3 Xinyi Rd"},
.administrative_area = "Taipei City",
.locality = "Da-an District",
.postal_code = kPostalCode,
.language_code = "zh-Latn",
};
const std::vector<std::string> expected_latin{
"No. 33, Section 3 Xinyi Rd",
"Da-an District, Taipei City 106",
};
lines.clear();
GetFormattedNationalAddress(latin_address, &lines);
EXPECT_EQ(expected_latin, lines);
GetFormattedNationalAddressLine(latin_address, &one_line);
EXPECT_EQ("No. 33, Section 3 Xinyi Rd, Da-an District, Taipei City 106",
one_line);
}
TEST(AddressFormatterTest, GetFormattedNationalAddressMultilingualCountry) {
const AddressData address{
.region_code = "CA",
.address_line{
"5 Rue du Tresor",
"Apt. 4",
},
.administrative_area = "QC",
.locality = "Montmagny",
.postal_code = "G1R 123",
.language_code = "fr",
};
const std::vector<std::string> expected{
"5 Rue du Tresor",
"Apt. 4",
"Montmagny QC G1R 123",
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest, GetFormattedNationalAddress_InlineStreetAddress) {
const AddressData address{
.region_code = "CI",
.address_line{"32 Boulevard Carde"},
.locality = "Abidjan",
.sorting_code = "64",
.language_code = "zh-Hant",
};
const std::vector<std::string> expected{"64 32 Boulevard Carde Abidjan 64"};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralsAroundField) {
AddressData address{.region_code = "CH"};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "Zurich";
expected.emplace_back("Zurich");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "8001";
expected.back().assign("CH-8001 Zurich");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality.clear();
expected.back().assign("CH-8001");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralsBetweenFields) {
AddressData address{.region_code = "US"};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.administrative_area = "CA";
expected.emplace_back("CA");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "Los Angeles";
expected.back().assign("Los Angeles, CA");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "90291";
expected.back().assign("Los Angeles, CA 90291");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.administrative_area.clear();
expected.back().assign("Los Angeles 90291");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality.clear();
address.administrative_area = "CA";
expected.back().assign("CA 90291");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralOnSeparateLine) {
AddressData address{.region_code = "AX"};
std::vector<std::string> expected{"ÅLAND"};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "City";
expected.emplace(expected.begin(), "City");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "123";
expected.front().assign("AX-123 City");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralBeforeField) {
AddressData address{
.region_code = "JP",
.language_code = "ja",
};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "123";
expected.emplace_back("〒123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.administrative_area = "Prefecture";
expected.emplace_back("Prefecture");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code.clear();
expected.erase(expected.begin());
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddress_LiteralBeforeOneAddressLine) {
const AddressData address{
.region_code = "JP",
.address_line{"Roppongi Hills"},
.administrative_area = "Tokyo",
.language_code = "ja_Latn",
};
const std::vector<std::string> expected{"Roppongi Hills, Tokyo"};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddress_LiteralBeforeTwoAddressLines) {
const AddressData address{
.region_code = "JP",
.address_line{
"Roppongi Hills",
"Mori Tower",
},
.administrative_area = "Tokyo",
.language_code = "ja_Latn",
};
const std::vector<std::string> expected{
"Roppongi Hills",
"Mori Tower, Tokyo",
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_DuplicateField) {
AddressData address{.region_code = "CI"};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.sorting_code = "123";
expected.emplace_back("123 123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.address_line.emplace_back("456 Main St");
expected.back().assign("123 456 Main St 123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "Yamoussoukro";
expected.back().assign("123 456 Main St Yamoussoukro 123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.sorting_code.erase();
expected.back().assign("456 Main St Yamoussoukro");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.address_line.clear();
expected.back().assign("Yamoussoukro");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
} |
271 | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_RPC_RENDEZVOUS_MGR_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_RPC_RPC_RENDEZVOUS_MGR_H_
#include "tensorflow/core/distributed_runtime/base_rendezvous_mgr.h"
#include "tensorflow/core/distributed_runtime/worker_env.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
class DeviceMgr;
class RpcRendezvousMgr : public BaseRendezvousMgr {
public:
explicit RpcRendezvousMgr(const WorkerEnv* env);
protected:
tsl::core::RefCountPtr<BaseRemoteRendezvous> Create(
int64_t step_id, const WorkerEnv* worker_env) override;
private:
RpcRendezvousMgr(const RpcRendezvousMgr&) = delete;
void operator=(const RpcRendezvousMgr&) = delete;
};
}
#endif
#include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class RpcRemoteRendezvous : public BaseRemoteRendezvous {
public:
RpcRemoteRendezvous(const WorkerEnv* env, int64_t step_id)
: BaseRemoteRendezvous(env, step_id) {}
protected:
void RecvFromRemoteAsync(const Rendezvous::ParsedKey& parsed,
const Rendezvous::Args& args,
DoneCallback done) override;
private:
~RpcRemoteRendezvous() override {}
RpcRemoteRendezvous(const RpcRemoteRendezvous&) = delete;
void operator=(const RpcRemoteRendezvous&) = delete;
};
class RpcRecvTensorCall : public BaseRecvTensorCall {
public:
RpcRecvTensorCall() : wi_(nullptr), dst_device_(nullptr) {}
void Init(WorkerInterface* wi, int64_t step_id, StringPiece key,
AllocatorAttributes alloc_attrs, Device* dst_device,
const Rendezvous::Args& recv_args, Rendezvous::DoneCallback done) {
wi_ = wi;
alloc_attrs_ = alloc_attrs;
dst_device_ = dst_device;
recv_args_ = recv_args;
done_ = std::move(done);
req_.set_step_id(step_id);
req_.set_rendezvous_key(key.data(), key.size());
req_.set_request_id(GetUniqueRequestId());
}
void Reset() {
DCHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall::Reset().";
alloc_attrs_ = AllocatorAttributes();
dst_device_ = nullptr;
req_.Clear();
resp_.Clear();
{
mutex_lock l(mu_);
status_ = absl::OkStatus();
}
done_ = nullptr;
}
~RpcRecvTensorCall() override {
CHECK_EQ(static_cast<WorkerInterface*>(nullptr), wi_)
<< "Leaking WorkerInterface in RpcRecvTensorCall destructor.";
}
void Start(std::function<void()> recv_done) override {
StartRTCall(std::move(recv_done));
}
void StartAbort(const Status& s) override {
{
mutex_lock l(mu_);
status_.Update(s);
}
opts_.StartCancel();
}
Status status() const override {
mutex_lock l(mu_);
return status_;
}
void ReleaseWorker(WorkerCacheInterface* worker_cache) {
DCHECK_NE(static_cast<WorkerInterface*>(nullptr), wi_)
<< "RpcRecvTensorCall::ReleaseWorker() called twice.";
worker_cache->ReleaseWorker(src_worker_, wi_);
wi_ = nullptr;
}
const Tensor& tensor() const { return resp_.tensor(); }
bool is_dead() const { return resp_.metadata().is_dead(); }
Device* dst_device() const { return dst_device_; }
const Rendezvous::Args& recv_args() const { return recv_args_; }
const Rendezvous::DoneCallback& done() const { return done_; }
private:
friend class RpcRemoteRendezvous;
void StartRTCall(std::function<void()> recv_done) {
resp_.InitAlloc(dst_device_, alloc_attrs_);
auto abort_checked = std::make_shared<Notification>();
auto cb = [this, abort_checked,
recv_done = std::move(recv_done)](const Status& s) {
abort_checked->WaitForNotification();
if (!s.ok()) {
mutex_lock l(mu_);
status_.Update(s);
}
recv_done();
};
wi_->RecvTensorAsync(&opts_, &req_, &resp_, std::move(cb));
Status s;
{
mutex_lock l(mu_);
s = status_;
}
if (!s.ok()) {
opts_.StartCancel();
}
abort_checked->Notify();
}
string src_worker_;
string src_rel_device_;
WorkerInterface* wi_;
AllocatorAttributes alloc_attrs_;
Device* dst_device_;
CallOptions opts_;
RecvTensorRequest req_;
TensorResponse resp_;
Rendezvous::Args recv_args_;
Rendezvous::DoneCallback done_;
mutable mutex mu_;
Status status_ TF_GUARDED_BY(mu_);
RpcRecvTensorCall(const RpcRecvTensorCall&) = delete;
void operator=(const RpcRecvTensorCall&) = delete;
};
class RpcRecvTensorFreeList {
public:
RpcRecvTensorFreeList() {}
~RpcRecvTensorFreeList() {
for (size_t i = 0; i < objects_.size(); i++) {
delete objects_[i];
}
}
RpcRecvTensorCall* New() {
{
mutex_lock l(mu_);
if (!objects_.empty()) {
RpcRecvTensorCall* result = objects_.back();
objects_.pop_back();
return result;
}
}
return new RpcRecvTensorCall;
}
void Release(RpcRecvTensorCall* obj) {
obj->Reset();
{
mutex_lock l(mu_);
if (objects_.size() < kMaxObjects) {
objects_.push_back(obj);
return;
}
}
delete obj;
}
private:
static constexpr int kMaxObjects = 1000;
mutex mu_;
std::vector<RpcRecvTensorCall*> objects_ TF_GUARDED_BY(mu_);
};
static RpcRecvTensorFreeList* get_call_freelist() {
static RpcRecvTensorFreeList* call_freelist = new RpcRecvTensorFreeList();
return call_freelist;
}
void RpcRemoteRendezvous::RecvFromRemoteAsync(
const Rendezvous::ParsedKey& parsed, const Rendezvous::Args& recv_args,
DoneCallback done) {
CHECK(is_initialized());
Status s;
RpcRecvTensorCall* call = get_call_freelist()->New();
if (!DeviceNameUtils::SplitDeviceName(parsed.src_device, &call->src_worker_,
&call->src_rel_device_)) {
s = errors::Internal(parsed.src_device,
" is invalid remote source device.");
}
WorkerSession* sess = session();
std::shared_ptr<WorkerCacheInterface> worker_cache =
sess->GetSharedWorkerCache();
WorkerInterface* rwi = worker_cache->GetOrCreateWorker(call->src_worker_);
if (s.ok() && rwi == nullptr) {
s = errors::Internal("No worker known as ", call->src_worker_);
}
Device* dst_device;
if (s.ok()) {
s = sess->device_mgr()->LookupDevice(parsed.dst_device, &dst_device);
}
if (!s.ok()) {
if (rwi != nullptr) {
sess->worker_cache()->ReleaseWorker(call->src_worker_, rwi);
}
get_call_freelist()->Release(call);
done(s, Args(), recv_args, Tensor{}, false);
return;
}
call->Init(rwi, step_id_, parsed.FullKey(), recv_args.alloc_attrs, dst_device,
recv_args, std::move(done));
RegisterCall(call, recv_args);
if (!call->status().ok()) {
DeregisterCall(call, recv_args);
call->ReleaseWorker(sess->worker_cache());
call->done()(call->status(), Args(), Args(), Tensor(), false);
get_call_freelist()->Release(call);
return;
}
Ref();
call->Start([this, call, recv_args, worker_cache]() {
DeregisterCall(call, recv_args);
Status s = call->status();
call->ReleaseWorker(session()->worker_cache());
call->done()(s, Args(), call->recv_args(), call->tensor(), call->is_dead());
get_call_freelist()->Release(call);
Unref();
});
}
}
RpcRendezvousMgr::RpcRendezvousMgr(const WorkerEnv* env)
: BaseRendezvousMgr(env) {}
tsl::core::RefCountPtr<BaseRemoteRendezvous> RpcRendezvousMgr::Create(
int64_t step_id, const WorkerEnv* worker_env) {
return tsl::core::RefCountPtr<BaseRemoteRendezvous>(
new RpcRemoteRendezvous(worker_env, step_id));
}
} | #include "tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/test_utils.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<tstring>()() = content;
return tensor;
}
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<tstring>()();
}
Rendezvous::ParsedKey MakeKey(const string& s) {
Rendezvous::ParsedKey key;
CHECK(Rendezvous::ParseKey(s, &key).ok());
return key;
}
namespace {
class DummyWorker : public TestWorkerInterface {
public:
void RecvTensorAsync(CallOptions* opts, const RecvTensorRequest* request,
TensorResponse* response, StatusCallback done) override {
SchedClosure([done = std::move(done)]() {
const int64_t t_us = random::New64() % 100 * 1000;
Env::Default()->SleepForMicroseconds(t_us);
done(absl::OkStatus());
});
}
};
class DummyWorkerCache : public WorkerCacheInterface {
void ListWorkers(std::vector<string>* workers) const override {}
void ListWorkersInJob(const string& job_name,
std::vector<string>* workers) const override {}
WorkerInterface* GetOrCreateWorker(const string& target) override {
if (dummy_remote_worker_ == nullptr) {
dummy_remote_worker_ = new DummyWorker;
}
return dummy_remote_worker_;
}
Status GetEagerClientCache(
std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
Status GetCoordinationClientCache(
std::unique_ptr<CoordinationClientCache>* coord_client_cache) override {
return errors::Unimplemented("Unimplemented.");
}
bool GetDeviceLocalityNonBlocking(const string& device,
DeviceLocality* locality) override {
return false;
}
void GetDeviceLocalityAsync(const string& device, DeviceLocality* locality,
StatusCallback done) override {}
private:
DummyWorker* dummy_remote_worker_ = nullptr;
};
static Device* CreateDevice(const char* type, const char* name) {
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
};
DeviceAttributes attr;
attr.set_name(name);
attr.set_device_type(type);
return new FakeDevice(attr);
}
static DeviceMgr* CreateDeviceMgr() {
std::unique_ptr<Device> d0(
CreateDevice("CPU", "/job:mnist/replica:1/task:2/cpu:1"));
std::vector<std::unique_ptr<Device>> devices;
devices.emplace_back(std::move(d0));
return new StaticDeviceMgr(std::move(devices));
}
}
class RpcRendezvousMgrTest : public ::testing::Test {
protected:
RpcRendezvousMgrTest()
: cache_(new DummyWorkerCache),
worker_session_("rpc_session", "/job:mnist/replica:1/task:2",
std::unique_ptr<WorkerCacheInterface>(cache_),
std::unique_ptr<DeviceMgr>(CreateDeviceMgr()),
std::unique_ptr<GraphMgr>(), nullptr,
[](WorkerSession* worker_session, bool called,
DeviceMgr* remote_device_mgr) { return nullptr; }),
rmgr_(&env) {
env.env = Env::Default();
}
DummyWorkerCache* cache_;
WorkerEnv env;
WorkerSession worker_session_;
RpcRendezvousMgr rmgr_;
};
TEST_F(RpcRendezvousMgrTest, LocalSendRecv) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Tensor val(DT_FLOAT);
bool val_dead = false;
TF_ASSERT_OK(rmgr_.RecvLocal(step_id, key, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, LocalAbort) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, rendez = rendez.GetNewRef()]() {
env.env->SleepForMicroseconds(100 * 1000);
rendez->StartAbort(errors::Aborted(""));
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
{
const int64_t step_id = 321;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
SchedClosure([this, step_id]() {
env.env->SleepForMicroseconds(100 * 1000);
rmgr_.Cleanup(step_id);
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsAborted(rendez->Recv(key, args, &val, &val_dead)));
}
}
TEST_F(RpcRendezvousMgrTest, LocalCancel) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
EXPECT_TRUE(errors::IsCancelled(rendez->Recv(key, args, &val, &val_dead)));
n.WaitForNotification();
delete cm;
}
TEST_F(RpcRendezvousMgrTest, CancelAfterReceived) {
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
auto* cm = new CancellationManager();
const int64_t step_id = 123;
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Notification n;
SchedClosure([this, rendez = rendez.get(), key, cm, &n]() {
env.env->SleepForMicroseconds(100 * 1000);
TF_ASSERT_OK(rendez->Send(key, Rendezvous::Args(), V("peach"), false));
cm->StartCancel();
n.Notify();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
args.cancellation_manager = cm;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
EXPECT_EQ(V(val), "peach");
n.WaitForNotification();
delete cm;
}
namespace {
class DummyDeviceContext : public DeviceContext {
public:
explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}
~DummyDeviceContext() override {}
int stream_id() const { return stream_id_; }
private:
const int stream_id_;
};
}
TEST_F(RpcRendezvousMgrTest, TransferDummyDeviceContext) {
DummyDeviceContext* dc = new DummyDeviceContext(123);
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
Rendezvous::Args args;
args.device_context = dc;
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
TF_ASSERT_OK(rendez->Send(key, args, V("peach"), false));
}
{
Notification n;
rmgr_.RecvLocalAsync(
step_id, key,
[&n](const Status& s, const Rendezvous::Args send_args,
const Rendezvous::Args recv_args, const Tensor& val,
bool is_dead) {
auto send_dev_context =
static_cast<DummyDeviceContext*>(send_args.device_context);
CHECK_EQ(123, send_dev_context->stream_id());
CHECK_EQ(V(val), "peach");
n.Notify();
});
n.WaitForNotification();
}
rmgr_.Cleanup(step_id);
dc->Unref();
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvOne) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
Tensor val(DT_STRING);
bool val_dead = false;
TF_ASSERT_OK(rendez->Recv(key, args, &val, &val_dead));
}
rmgr_.Cleanup(step_id);
}
TEST_F(RpcRendezvousMgrTest, RemoteRecvAsyncMany) {
const int64_t step_id = 123;
const Rendezvous::ParsedKey key = MakeKey(Rendezvous::CreateKey(
"/job:worker/replica:1/task:2/cpu:0", 7890,
"/job:mnist/replica:1/task:2/cpu:1", "foo", FrameAndIter(0, 0)));
{
tsl::core::RefCountPtr<RemoteRendezvous> rendez = rmgr_.Find(step_id);
TF_ASSERT_OK(rendez->Initialize(&worker_session_));
Rendezvous::Args args;
int num_requests = 10000;
Tensor val(DT_STRING);
mutex mu_;
Status status = absl::OkStatus();
BlockingCounter counter(num_requests);
for (int i = 0; i < num_requests; i++) {
rendez->RecvAsync(
key, args,
[&mu_, &status, &counter](const Status& s, const Rendezvous::Args&,
const Rendezvous::Args&, const Tensor&,
const bool) {
{
mutex_lock l(mu_);
status.Update(s);
}
counter.DecrementCount();
});
}
counter.Wait();
TF_ASSERT_OK(status);
}
rmgr_.Cleanup(step_id);
}
} |
272 | #ifndef TENSORSTORE_INTERNAL_DECODED_MATCHES_H_
#define TENSORSTORE_INTERNAL_DECODED_MATCHES_H_
#include <functional>
#include <string>
#include <string_view>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
::testing::Matcher<absl::Cord> DecodedMatches(
::testing::Matcher<std::string_view> value_matcher,
std::function<Result<std::string>(std::string_view)> decoder);
}
}
#endif
#include "tensorstore/internal/decoded_matches.h"
#include <functional>
#include <ostream>
#include <string>
#include <string_view>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
namespace {
using DecodeFunction = std::function<Result<std::string>(std::string_view)>;
class Matcher : public ::testing::MatcherInterface<absl::Cord> {
public:
Matcher(::testing::Matcher<std::string_view> value_matcher,
DecodeFunction decoder)
: value_matcher_(std::move(value_matcher)),
decoder_(std::move(decoder)) {}
bool MatchAndExplain(
absl::Cord value,
::testing::MatchResultListener* listener) const override {
auto decoded = decoder_(value.Flatten());
if (!decoded.ok()) {
*listener << "Failed to decode value: " << decoded.status();
return false;
}
return value_matcher_.MatchAndExplain(*decoded, listener);
}
void DescribeTo(std::ostream* os) const override {
*os << "when decoded ";
value_matcher_.DescribeTo(os);
}
private:
::testing::Matcher<std::string_view> value_matcher_;
DecodeFunction decoder_;
};
}
::testing::Matcher<absl::Cord> DecodedMatches(
::testing::Matcher<std::string_view> value_matcher,
DecodeFunction decoder) {
return ::testing::MakeMatcher(
new Matcher(std::move(value_matcher), std::move(decoder)));
}
}
} | #include "tensorstore/internal/decoded_matches.h"
#include <cstddef>
#include <sstream>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal::DecodedMatches;
tensorstore::Result<std::string> Stride2Decoder(std::string_view input) {
if (input.size() % 2 != 0) {
return absl::InvalidArgumentError("");
}
std::string output;
for (size_t i = 0; i < input.size(); i += 2) {
output += input[i];
}
return output;
}
TEST(DecodedMatchesTest, Describe) {
std::ostringstream ss;
DecodedMatches("x", Stride2Decoder).DescribeTo(&ss);
EXPECT_EQ("when decoded is equal to \"x\"", ss.str());
}
TEST(DecodedMatchesTest, ExplainValueMatcher) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(
DecodedMatches(::testing::SizeIs(3), Stride2Decoder), absl::Cord("xy"),
&listener);
EXPECT_EQ("whose size 1 doesn't match", listener.str());
}
TEST(DecodedMatchesTest, ExplainDecodeError) {
::testing::StringMatchResultListener listener;
::testing::ExplainMatchResult(DecodedMatches("x", Stride2Decoder),
absl::Cord("xyz"), &listener);
EXPECT_EQ("Failed to decode value: INVALID_ARGUMENT: ", listener.str());
}
TEST(DecodedMatchesTest, Matches) {
EXPECT_THAT(absl::Cord("abcd"), DecodedMatches("ac", Stride2Decoder));
EXPECT_THAT(absl::Cord("abc"),
::testing::Not(DecodedMatches("ac", Stride2Decoder)));
EXPECT_THAT(absl::Cord("abcd"),
::testing::Not(DecodedMatches("ab", Stride2Decoder)));
EXPECT_THAT(absl::Cord("abcd"),
DecodedMatches(::testing::Not("ab"), Stride2Decoder));
}
} |
273 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_PROPAGATION_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_DEVICE_PROPAGATION_H_
#include <functional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace device_propagation {
typedef std::function<bool(StringPiece)> DeviceFilter;
typedef std::function<bool(const Node&)> NodeFilter;
}
void PropagateDevices(const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter,
Graph* graph);
}
#endif
#include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
namespace {
const std::string& AssignedOrRequestedDevice(const Node& node) {
if (!node.assigned_device_name().empty()) {
return node.assigned_device_name();
}
return node.requested_device();
}
bool UpdateDeviceFromInputs(
const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter, Node* node) {
if (!AssignedOrRequestedDevice(*node).empty() || !node_filter(*node)) {
return false;
}
string proposed_device = "";
Node* proposed_src = nullptr;
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
Node* src = e->src();
const string& src_device = AssignedOrRequestedDevice(*src);
if ((node->IsSwitch() && src->IsLoopCond()) ||
(node->IsMerge() && src->IsEnter())) {
continue;
}
if (!device_filter(src_device)) return false;
if (proposed_src == nullptr) {
proposed_device = src_device;
proposed_src = src;
} else if (proposed_device != src_device) {
return false;
}
}
if (proposed_src) {
node->set_assigned_device_name(proposed_src->assigned_device_name());
node->set_requested_device(proposed_src->requested_device());
return true;
} else {
return false;
}
}
}
void PropagateDevices(const device_propagation::NodeFilter& node_filter,
const device_propagation::DeviceFilter& device_filter,
Graph* graph) {
bool nodes_changed = true;
while (nodes_changed) {
nodes_changed = false;
BreadthFirstTraversal(
*graph, {}, [&nodes_changed, &node_filter, &device_filter](Node* node) {
nodes_changed |=
UpdateDeviceFromInputs(node_filter, device_filter, node);
});
}
}
} | #include "tensorflow/core/common_runtime/device_propagation.h"
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status_test_util.h"
using ::testing::UnorderedElementsAreArray;
namespace tensorflow {
namespace {
const char kTpu0[] = "/job:localhost/replica:0/task:0/device:TPU:0";
const char kTpu1[] = "/job:localhost/replica:0/task:0/device:TPU:1";
const char kTpu2[] = "/job:localhost/replica:0/task:0/device:TPU:2";
const char kGpu0[] = "/job:localhost/replica:0/task:0/device:GPU:0";
bool IsTPUDevice(StringPiece device_name) {
return absl::StrContains(device_name, "device:TPU:");
}
device_propagation::NodeFilter TargetOps(
const absl::flat_hash_set<std::string>& ops) {
return [&ops](const Node& n) { return ops.contains(n.type_string()); };
}
absl::flat_hash_map<std::string, std::string> GetNodeNameDevices(
const Graph& graph) {
absl::flat_hash_map<std::string, std::string> node_name_devices;
for (const Node* node : graph.nodes()) {
if (node->IsSource() || node->IsSink()) {
continue;
}
const string& device = node->assigned_device_name().empty()
? node->requested_device()
: node->assigned_device_name();
node_name_devices[node->name()] = device;
}
return node_name_devices;
}
TEST(DevicePropagationTest, PropagateTPUDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu1);
auto c = ops::Identity(scope.WithOpName("C"), a);
auto d =
ops::Merge(scope.WithOpName("D"), std::initializer_list<Input>{a, c});
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{b, c});
auto f = ops::Identity(scope.WithOpName("F"), a);
f.node()->set_assigned_device_name(kTpu2);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity", "Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", kTpu1},
{"C", kTpu0},
{"D", kTpu0},
{"E", ""},
{"F", kTpu2},
}));
}
TEST(DevicePropagationTest, DoNotPropagateToUnsupportedOps) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kTpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Merge"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kTpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, DoNotPropagateUnmatchedDevices) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
a.node()->set_assigned_device_name(kGpu0);
auto b = ops::Identity(scope.WithOpName("B"), a);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Identity"}), IsTPUDevice, &graph);
EXPECT_THAT(GetNodeNameDevices(graph),
UnorderedElementsAreArray(
std::vector<std::pair<std::string, std::string>>{
{"A", kGpu0},
{"B", ""},
}));
}
TEST(DevicePropagationTest, SwitchOpShouldIgnoreLoopCondOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_BOOL);
auto b = ops::LoopCond(scope.WithOpName("B"), a);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_FLOAT);
c.node()->set_assigned_device_name(kTpu2);
auto d = ops::Switch(scope.WithOpName("D"), c, b);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Switch", "LoopCond"}), IsTPUDevice, &graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", ""},
{"C", kTpu2},
{"D", kTpu2},
}));
}
TEST(DevicePropagationTest, MergeOpShouldIgnoreEnterOp) {
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_FLOAT);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
b.node()->set_assigned_device_name(kTpu2);
auto c = ops::internal::Enter(scope.WithOpName("C"), a, "Enter");
auto d = ops::NextIteration(scope.WithOpName("D"), b);
auto e =
ops::Merge(scope.WithOpName("E"), std::initializer_list<Input>{c, d});
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
PropagateDevices(TargetOps({"Enter", "Merge", "NextIteration"}), IsTPUDevice,
&graph);
EXPECT_THAT(
GetNodeNameDevices(graph),
UnorderedElementsAreArray(std::vector<
std::pair<std::string, std::string>>{
{"A", ""},
{"B", kTpu2},
{"C", ""},
{"D", kTpu2},
{"E", kTpu2},
}));
}
}
} |
274 | #ifndef TENSORFLOW_TSL_LIB_IO_RANDOM_INPUTSTREAM_H_
#define TENSORFLOW_TSL_LIB_IO_RANDOM_INPUTSTREAM_H_
#include "tsl/lib/io/inputstream_interface.h"
#include "tsl/platform/cord.h"
#include "tsl/platform/file_system.h"
namespace tsl {
namespace io {
class RandomAccessInputStream : public InputStreamInterface {
public:
RandomAccessInputStream(RandomAccessFile* file, bool owns_file = false);
~RandomAccessInputStream() override;
absl::Status ReadNBytes(int64_t bytes_to_read, tstring* result) override;
#if defined(TF_CORD_SUPPORT)
absl::Status ReadNBytes(int64_t bytes_to_read, absl::Cord* result) override;
#endif
absl::Status SkipNBytes(int64_t bytes_to_skip) override;
int64_t Tell() const override;
absl::Status Seek(int64_t position) {
pos_ = position;
return absl::OkStatus();
}
absl::Status Reset() override { return Seek(0); }
private:
RandomAccessFile* file_;
int64_t pos_ = 0;
bool owns_file_ = false;
};
}
}
#endif
#include "tsl/lib/io/random_inputstream.h"
#include <memory>
namespace tsl {
namespace io {
RandomAccessInputStream::RandomAccessInputStream(RandomAccessFile* file,
bool owns_file)
: file_(file), owns_file_(owns_file) {}
RandomAccessInputStream::~RandomAccessInputStream() {
if (owns_file_) {
delete file_;
}
}
absl::Status RandomAccessInputStream::ReadNBytes(int64_t bytes_to_read,
tstring* result) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Cannot read negative number of bytes");
}
result->clear();
result->resize_uninitialized(bytes_to_read);
char* result_buffer = &(*result)[0];
StringPiece data;
absl::Status s = file_->Read(pos_, bytes_to_read, &data, result_buffer);
if (data.data() != result_buffer) {
memmove(result_buffer, data.data(), data.size());
}
result->resize(data.size());
if (s.ok() || errors::IsOutOfRange(s)) {
pos_ += data.size();
}
return s;
}
#if defined(TF_CORD_SUPPORT)
absl::Status RandomAccessInputStream::ReadNBytes(int64_t bytes_to_read,
absl::Cord* result) {
if (bytes_to_read < 0) {
return errors::InvalidArgument("Cannot read negative number of bytes");
}
int64_t current_size = result->size();
absl::Status s = file_->Read(pos_, bytes_to_read, result);
if (s.ok() || errors::IsOutOfRange(s)) {
pos_ += result->size() - current_size;
}
return s;
}
#endif
static constexpr int64_t kMaxSkipSize = 8 * 1024 * 1024;
absl::Status RandomAccessInputStream::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can't skip a negative number of bytes");
}
std::unique_ptr<char[]> scratch(new char[kMaxSkipSize]);
if (bytes_to_skip > 0) {
StringPiece data;
absl::Status s =
file_->Read(pos_ + bytes_to_skip - 1, 1, &data, scratch.get());
if ((s.ok() || errors::IsOutOfRange(s)) && data.size() == 1) {
pos_ += bytes_to_skip;
return absl::OkStatus();
}
}
while (bytes_to_skip > 0) {
int64_t bytes_to_read = std::min<int64_t>(kMaxSkipSize, bytes_to_skip);
StringPiece data;
absl::Status s = file_->Read(pos_, bytes_to_read, &data, scratch.get());
if (s.ok() || errors::IsOutOfRange(s)) {
pos_ += data.size();
} else {
return s;
}
if (data.size() < static_cast<size_t>(bytes_to_read)) {
return errors::OutOfRange("reached end of file");
}
bytes_to_skip -= bytes_to_read;
}
return absl::OkStatus();
}
int64_t RandomAccessInputStream::Tell() const { return pos_; }
}
} | #include "tsl/lib/io/random_inputstream.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace io {
namespace {
TEST(RandomInputStream, ReadNBytes) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(5, &read));
EXPECT_EQ(read, "34567");
EXPECT_EQ(8, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(20, &read)));
EXPECT_EQ(read, "89");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
#if defined(TF_CORD_SUPPORT)
TEST(RandomInputStream, ReadNBytesWithCords) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
absl::Cord read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.ReadNBytes(3, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "012");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(5, &read));
EXPECT_EQ(read, "01234567");
EXPECT_EQ(8, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "01234567");
EXPECT_EQ(8, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(20, &read)));
EXPECT_EQ(read, "0123456789");
EXPECT_EQ(10, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "0123456789");
EXPECT_EQ(10, in.Tell());
}
#endif
TEST(RandomInputStream, SkipNBytes) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.SkipNBytes(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(0, &read));
EXPECT_EQ(read, "");
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.SkipNBytes(0));
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(2, &read));
EXPECT_EQ(read, "78");
EXPECT_EQ(9, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.SkipNBytes(20)));
EXPECT_EQ(10, in.Tell());
EXPECT_TRUE(errors::IsOutOfRange(in.ReadNBytes(5, &read)));
EXPECT_EQ(read, "");
EXPECT_EQ(10, in.Tell());
}
TEST(RandomInputStream, Seek) {
Env* env = Env::Default();
string fname = testing::TmpDir() + "/random_inputbuffer_seek_test";
TF_ASSERT_OK(WriteStringToFile(env, fname, "0123456789"));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(fname, &file));
tstring read;
RandomAccessInputStream in(file.get());
TF_ASSERT_OK(in.Seek(3));
EXPECT_EQ(3, in.Tell());
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "3456");
EXPECT_EQ(7, in.Tell());
TF_ASSERT_OK(in.Seek(1));
TF_ASSERT_OK(in.ReadNBytes(4, &read));
EXPECT_EQ(read, "1234");
EXPECT_EQ(5, in.Tell());
}
}
}
} |
275 | #ifndef QUICHE_COMMON_SIMPLE_BUFFER_ALLOCATOR_H_
#define QUICHE_COMMON_SIMPLE_BUFFER_ALLOCATOR_H_
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/quiche_buffer_allocator.h"
namespace quiche {
class QUICHE_EXPORT SimpleBufferAllocator : public QuicheBufferAllocator {
public:
static SimpleBufferAllocator* Get() {
static SimpleBufferAllocator* singleton = new SimpleBufferAllocator();
return singleton;
}
char* New(size_t size) override;
char* New(size_t size, bool flag_enable) override;
void Delete(char* buffer) override;
};
}
#endif
#include "quiche/common/simple_buffer_allocator.h"
namespace quiche {
char* SimpleBufferAllocator::New(size_t size) { return new char[size]; }
char* SimpleBufferAllocator::New(size_t size, bool ) {
return New(size);
}
void SimpleBufferAllocator::Delete(char* buffer) { delete[] buffer; }
} | #include "quiche/common/simple_buffer_allocator.h"
#include <utility>
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
TEST(SimpleBufferAllocatorTest, NewDelete) {
SimpleBufferAllocator alloc;
char* buf = alloc.New(4);
EXPECT_NE(nullptr, buf);
alloc.Delete(buf);
}
TEST(SimpleBufferAllocatorTest, DeleteNull) {
SimpleBufferAllocator alloc;
alloc.Delete(nullptr);
}
TEST(SimpleBufferAllocatorTest, MoveBuffersConstructor) {
SimpleBufferAllocator alloc;
QuicheBuffer buffer1(&alloc, 16);
EXPECT_NE(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 16u);
QuicheBuffer buffer2(std::move(buffer1));
EXPECT_EQ(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 0u);
EXPECT_NE(buffer2.data(), nullptr);
EXPECT_EQ(buffer2.size(), 16u);
}
TEST(SimpleBufferAllocatorTest, MoveBuffersAssignment) {
SimpleBufferAllocator alloc;
QuicheBuffer buffer1(&alloc, 16);
QuicheBuffer buffer2;
EXPECT_NE(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 16u);
EXPECT_EQ(buffer2.data(), nullptr);
EXPECT_EQ(buffer2.size(), 0u);
buffer2 = std::move(buffer1);
EXPECT_EQ(buffer1.data(), nullptr);
EXPECT_EQ(buffer1.size(), 0u);
EXPECT_NE(buffer2.data(), nullptr);
EXPECT_EQ(buffer2.size(), 16u);
}
TEST(SimpleBufferAllocatorTest, CopyBuffer) {
SimpleBufferAllocator alloc;
const absl::string_view original = "Test string";
QuicheBuffer copy = QuicheBuffer::Copy(&alloc, original);
EXPECT_EQ(copy.AsStringView(), original);
}
}
} |
276 | #ifndef TENSORFLOW_LITE_TOOLS_TOOL_PARAMS_H_
#define TENSORFLOW_LITE_TOOLS_TOOL_PARAMS_H_
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
namespace tflite {
namespace tools {
template <typename T>
class TypedToolParam;
class ToolParam {
protected:
enum class ParamType { TYPE_INT32, TYPE_FLOAT, TYPE_BOOL, TYPE_STRING };
template <typename T>
static ParamType GetValueType();
public:
template <typename T>
static std::unique_ptr<ToolParam> Create(const T& default_value,
int position = 0) {
auto* param = new TypedToolParam<T>(default_value);
param->SetPosition(position);
return std::unique_ptr<ToolParam>(param);
}
template <typename T>
TypedToolParam<T>* AsTyped() {
AssertHasSameType(GetValueType<T>(), type_);
return static_cast<TypedToolParam<T>*>(this);
}
template <typename T>
const TypedToolParam<T>* AsConstTyped() const {
AssertHasSameType(GetValueType<T>(), type_);
return static_cast<const TypedToolParam<T>*>(this);
}
virtual ~ToolParam() {}
explicit ToolParam(ParamType type)
: has_value_set_(false), position_(0), type_(type) {}
bool HasValueSet() const { return has_value_set_; }
int GetPosition() const { return position_; }
void SetPosition(int position) { position_ = position; }
virtual void Set(const ToolParam&) {}
virtual std::unique_ptr<ToolParam> Clone() const = 0;
protected:
bool has_value_set_;
int position_;
private:
static void AssertHasSameType(ParamType a, ParamType b);
const ParamType type_;
};
template <typename T>
class TypedToolParam : public ToolParam {
public:
explicit TypedToolParam(const T& value)
: ToolParam(GetValueType<T>()), value_(value) {}
void Set(const T& value) {
value_ = value;
has_value_set_ = true;
}
const T& Get() const { return value_; }
void Set(const ToolParam& other) override {
Set(other.AsConstTyped<T>()->Get());
SetPosition(other.AsConstTyped<T>()->GetPosition());
}
std::unique_ptr<ToolParam> Clone() const override {
return ToolParam::Create<T>(value_, position_);
}
private:
T value_;
};
class ToolParams {
public:
void AddParam(const std::string& name, std::unique_ptr<ToolParam> value) {
params_[name] = std::move(value);
}
void RemoveParam(const std::string& name) { params_.erase(name); }
bool HasParam(const std::string& name) const {
return params_.find(name) != params_.end();
}
bool Empty() const { return params_.empty(); }
const ToolParam* GetParam(const std::string& name) const {
const auto& entry = params_.find(name);
if (entry == params_.end()) return nullptr;
return entry->second.get();
}
template <typename T>
void Set(const std::string& name, const T& value, int position = 0) {
AssertParamExists(name);
params_.at(name)->AsTyped<T>()->Set(value);
params_.at(name)->AsTyped<T>()->SetPosition(position);
}
template <typename T>
bool HasValueSet(const std::string& name) const {
AssertParamExists(name);
return params_.at(name)->AsConstTyped<T>()->HasValueSet();
}
template <typename T>
int GetPosition(const std::string& name) const {
AssertParamExists(name);
return params_.at(name)->AsConstTyped<T>()->GetPosition();
}
template <typename T>
T Get(const std::string& name) const {
AssertParamExists(name);
return params_.at(name)->AsConstTyped<T>()->Get();
}
void Set(const ToolParams& other);
void Merge(const ToolParams& other, bool overwrite = false);
private:
void AssertParamExists(const std::string& name) const;
std::unordered_map<std::string, std::unique_ptr<ToolParam>> params_;
};
#define LOG_TOOL_PARAM(params, type, name, description, verbose) \
do { \
TFLITE_MAY_LOG(INFO, (verbose) || params.HasValueSet<type>(name)) \
<< description << ": [" << params.Get<type>(name) << "]"; \
} while (0)
}
}
#endif
#include "tensorflow/lite/tools/tool_params.h"
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/tools/logging.h"
namespace tflite {
namespace tools {
void ToolParam::AssertHasSameType(ToolParam::ParamType a,
ToolParam::ParamType b) {
TFLITE_TOOLS_CHECK(a == b) << "Type mismatch while accessing parameter.";
}
template <>
ToolParam::ParamType ToolParam::GetValueType<int32_t>() {
return ToolParam::ParamType::TYPE_INT32;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<bool>() {
return ToolParam::ParamType::TYPE_BOOL;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<float>() {
return ToolParam::ParamType::TYPE_FLOAT;
}
template <>
ToolParam::ParamType ToolParam::GetValueType<std::string>() {
return ToolParam::ParamType::TYPE_STRING;
}
void ToolParams::AssertParamExists(const std::string& name) const {
TFLITE_TOOLS_CHECK(HasParam(name)) << name << " was not found.";
}
void ToolParams::Set(const ToolParams& other) {
for (const auto& param : params_) {
const ToolParam* other_param = other.GetParam(param.first);
if (other_param == nullptr) continue;
param.second->Set(*other_param);
}
}
void ToolParams::Merge(const ToolParams& other, bool overwrite) {
for (const auto& one : other.params_) {
auto it = params_.find(one.first);
if (it == params_.end()) {
AddParam(one.first, one.second->Clone());
} else if (overwrite) {
it->second->Set(*one.second);
}
}
}
}
} | #include "tensorflow/lite/tools/tool_params.h"
#include <gtest/gtest.h>
namespace tflite {
namespace tools {
namespace {
TEST(ToolParams, SetTest) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true, 1));
params.Set(others);
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_TRUE(params.HasValueSet<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_EQ(0, params.GetPosition<int>("some-int2"));
EXPECT_FALSE(params.HasValueSet<int>("some-int2"));
EXPECT_FALSE(params.HasParam("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteTrue) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others, true );
EXPECT_EQ(19, params.Get<int>("some-int1"));
EXPECT_EQ(5, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
TEST(ToolParams, MergeTestOverwriteFalse) {
ToolParams params;
params.AddParam("some-int1", ToolParam::Create<int>(13 ));
params.AddParam("some-int2", ToolParam::Create<int>(17 ));
ToolParams others;
others.AddParam("some-int1", ToolParam::Create<int>(19, 5));
others.AddParam("some-bool", ToolParam::Create<bool>(true ));
params.Merge(others);
EXPECT_EQ(13, params.Get<int>("some-int1"));
EXPECT_EQ(0, params.GetPosition<int>("some-int1"));
EXPECT_EQ(17, params.Get<int>("some-int2"));
EXPECT_TRUE(params.Get<bool>("some-bool"));
}
}
}
} |
277 | #ifndef I18N_ADDRESSINPUT_FORMAT_ELEMENT_H_
#define I18N_ADDRESSINPUT_FORMAT_ELEMENT_H_
#include <libaddressinput/address_field.h>
#include <iosfwd>
#include <string>
namespace i18n {
namespace addressinput {
class FormatElement {
public:
explicit FormatElement(AddressField field);
explicit FormatElement(const std::string& literal);
FormatElement();
bool IsField() const { return literal_.empty(); }
bool IsNewline() const { return literal_ == "\n"; }
AddressField GetField() const { return field_; }
const std::string& GetLiteral() const { return literal_; }
bool operator==(const FormatElement& other) const;
private:
AddressField field_;
std::string literal_;
};
}
}
std::ostream& operator<<(std::ostream& o,
const i18n::addressinput::FormatElement& element);
#endif
#include "format_element.h"
#include <libaddressinput/address_field.h>
#include <cassert>
#include <ostream>
#include <string>
namespace i18n {
namespace addressinput {
FormatElement::FormatElement(AddressField field) : field_(field), literal_() {}
FormatElement::FormatElement(const std::string& literal)
: field_(COUNTRY), literal_(literal) {
assert(!literal.empty());
}
FormatElement::FormatElement() : field_(COUNTRY), literal_("\n") {}
bool FormatElement::operator==(const FormatElement& other) const {
return field_ == other.field_ && literal_ == other.literal_;
}
}
}
std::ostream& operator<<(std::ostream& o,
const i18n::addressinput::FormatElement& element) {
if (element.IsField()) {
o << "Field: " << element.GetField();
} else if (element.IsNewline()) {
o << "Newline";
} else {
o << "Literal: " << element.GetLiteral();
}
return o;
} | #include "format_element.h"
#include <libaddressinput/address_field.h>
#include <sstream>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::FormatElement;
using i18n::addressinput::SORTING_CODE;
TEST(FormatElementTest, StreamFunctionNewline) {
std::ostringstream oss;
oss << FormatElement();
EXPECT_EQ("Newline", oss.str());
}
TEST(FormatElementTest, StreamFunctionLiteral) {
std::ostringstream oss;
oss << FormatElement("Text");
EXPECT_EQ("Literal: Text", oss.str());
}
TEST(FormatElementTest, StreamFunctionField) {
std::ostringstream oss;
oss << FormatElement(SORTING_CODE);
EXPECT_EQ("Field: SORTING_CODE", oss.str());
}
TEST(FormatElementTest, IsNewline) {
EXPECT_TRUE(FormatElement().IsNewline());
EXPECT_FALSE(FormatElement(" ").IsNewline());
EXPECT_FALSE(FormatElement(SORTING_CODE).IsNewline());
}
TEST(FormatElementTest, IsField) {
EXPECT_FALSE(FormatElement().IsField());
EXPECT_FALSE(FormatElement(" ").IsField());
EXPECT_TRUE(FormatElement(SORTING_CODE).IsField());
}
} |
278 | #ifndef I18N_ADDRESSINPUT_TEST_TESTDATA_SOURCE_H_
#define I18N_ADDRESSINPUT_TEST_TESTDATA_SOURCE_H_
#include <libaddressinput/source.h>
#include <string>
namespace i18n {
namespace addressinput {
extern const char kDataFileName[];
class TestdataSource : public Source {
public:
TestdataSource(const TestdataSource&) = delete;
TestdataSource& operator=(const TestdataSource&) = delete;
explicit TestdataSource(bool aggregate);
TestdataSource(bool aggregate, const std::string& src_path);
~TestdataSource() override;
void Get(const std::string& key, const Callback& data_ready) const override;
private:
const bool aggregate_;
const std::string src_path_;
};
}
}
#endif
#include "testdata_source.h"
#include <cassert>
#include <cstddef>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <map>
#include <string>
namespace i18n {
namespace addressinput {
const char kDataFileName[] = TEST_DATA_DIR "/countryinfo.txt";
namespace {
const char kNormalPrefix = '-';
const char kAggregatePrefix = '+';
const char kDataKeyPrefix[] = "data/";
const size_t kDataKeyPrefixLength = sizeof kDataKeyPrefix - 1;
const size_t kCldrRegionCodeLength = 2;
const size_t kAggregateDataKeyLength =
kDataKeyPrefixLength + kCldrRegionCodeLength;
std::map<std::string, std::string> InitData(const std::string& src_path) {
std::map<std::string, std::string> data;
std::ifstream file(src_path);
if (!file.is_open()) {
std::cerr << "Error opening \"" << src_path << "\"." << '\n';
std::exit(EXIT_FAILURE);
}
const std::string normal_prefix(1, kNormalPrefix);
const std::string aggregate_prefix(1, kAggregatePrefix);
std::string key;
std::string value;
auto last_data_it = data.end();
auto aggregate_data_it = data.end();
while (file.good()) {
std::getline(file, key, '=');
if (!key.empty()) {
std::getline(file, value, '\n');
last_data_it =
data.emplace_hint(last_data_it, normal_prefix + key, value);
if (key.compare(0,
kDataKeyPrefixLength,
kDataKeyPrefix,
kDataKeyPrefixLength) == 0) {
if (aggregate_data_it != data.end() &&
key.compare(0,
kAggregateDataKeyLength,
aggregate_data_it->first,
sizeof kAggregatePrefix,
kAggregateDataKeyLength) == 0) {
aggregate_data_it->second.append(", \"" + key + "\": " + value);
} else {
assert(key.size() == kAggregateDataKeyLength);
if (aggregate_data_it != data.end()) {
aggregate_data_it->second.push_back('}');
}
const std::string& aggregate_key =
aggregate_prefix + key.substr(0, kAggregateDataKeyLength);
aggregate_data_it = data.emplace_hint(
aggregate_data_it, aggregate_key, "{\"" + key + "\": " + value);
}
}
}
}
file.close();
return data;
}
const std::map<std::string, std::string>& GetData(const std::string& src_path) {
static const std::map<std::string, std::string> kData(InitData(src_path));
return kData;
}
}
TestdataSource::TestdataSource(bool aggregate, const std::string& src_path)
: aggregate_(aggregate), src_path_(src_path) {}
TestdataSource::TestdataSource(bool aggregate)
: aggregate_(aggregate), src_path_(kDataFileName) {}
TestdataSource::~TestdataSource() = default;
void TestdataSource::Get(const std::string& key,
const Callback& data_ready) const {
std::string prefixed_key(1, aggregate_ ? kAggregatePrefix : kNormalPrefix);
prefixed_key += key;
auto data_it = GetData(src_path_).find(prefixed_key);
bool success = data_it != GetData(src_path_).end();
std::string* data = nullptr;
if (success) {
data = new std::string(data_it->second);
} else {
success = true;
data = new std::string("{}");
}
data_ready(success, key, data);
}
}
} | #include "testdata_source.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/source.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "region_data_constants.h"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::kDataFileName;
using i18n::addressinput::RegionDataConstants;
using i18n::addressinput::Source;
using i18n::addressinput::TestdataSource;
class TestdataSourceTest : public testing::TestWithParam<std::string> {
public:
TestdataSourceTest(const TestdataSourceTest&) = delete;
TestdataSourceTest& operator=(const TestdataSourceTest&) = delete;
protected:
TestdataSourceTest()
: source_(false),
source_with_path_(false, kDataFileName),
aggregate_source_(true),
aggregate_source_with_path_(true, kDataFileName),
success_(false),
key_(),
data_(),
data_ready_(BuildCallback(this, &TestdataSourceTest::OnDataReady)) {}
TestdataSource source_;
TestdataSource source_with_path_;
TestdataSource aggregate_source_;
TestdataSource aggregate_source_with_path_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const Source::Callback> data_ready_;
private:
void OnDataReady(bool success, const std::string& key, std::string* data) {
ASSERT_FALSE(success && data == nullptr);
success_ = success;
key_ = key;
if (data != nullptr) {
data_ = *data;
delete data;
}
}
};
testing::AssertionResult DataIsValid(const std::string& data,
const std::string& key) {
if (data.empty()) {
return testing::AssertionFailure() << "empty data";
}
std::string expected_data_begin = R"({"id":")" + key + R"(")";
if (data.compare(0, expected_data_begin.length(), expected_data_begin) != 0) {
return testing::AssertionFailure()
<< data << " does not begin with " << expected_data_begin;
}
static const char kDataEnd[] = "\"}";
static const size_t kDataEndLength = sizeof kDataEnd - 1;
if (data.compare(data.length() - kDataEndLength,
kDataEndLength,
kDataEnd,
kDataEndLength) != 0) {
return testing::AssertionFailure()
<< data << " does not end with " << kDataEnd;
}
return testing::AssertionSuccess();
}
TEST_P(TestdataSourceTest, TestdataSourceHasValidDataForRegion) {
std::string key = "data/" + GetParam();
source_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(DataIsValid(data_, key));
};
TEST_P(TestdataSourceTest, TestdataSourceWithPathHasValidDataForRegion) {
std::string key = "data/" + GetParam();
source_with_path_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(DataIsValid(data_, key));
};
testing::AssertionResult AggregateDataIsValid(const std::string& data,
const std::string& key) {
if (data.empty()) {
return testing::AssertionFailure() << "empty data";
}
std::string expected_data_begin = "{\"" + key;
if (data.compare(0, expected_data_begin.length(), expected_data_begin) != 0) {
return testing::AssertionFailure()
<< data << " does not begin with " << expected_data_begin;
}
static const char kDataEnd[] = "\"}}";
static const size_t kDataEndLength = sizeof kDataEnd - 1;
if (data.compare(data.length() - kDataEndLength,
kDataEndLength,
kDataEnd,
kDataEndLength) != 0) {
return testing::AssertionFailure()
<< data << " does not end with " << kDataEnd;
}
return testing::AssertionSuccess();
}
TEST_P(TestdataSourceTest, TestdataSourceHasValidAggregatedDataForRegion) {
std::string key = "data/" + GetParam();
aggregate_source_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(AggregateDataIsValid(data_, key));
};
TEST_P(TestdataSourceTest,
TestdataSourceWithPathHasValidAggregatedDataForRegion) {
std::string key = "data/" + GetParam();
aggregate_source_with_path_.Get(key, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(key, key_);
EXPECT_TRUE(AggregateDataIsValid(data_, key));
};
INSTANTIATE_TEST_SUITE_P(
AllRegions, TestdataSourceTest,
testing::ValuesIn(RegionDataConstants::GetRegionCodes()));
TEST_F(TestdataSourceTest, GetExistingData) {
static const std::string kKey = "data";
source_.Get(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(DataIsValid(data_, kKey));
}
TEST_F(TestdataSourceTest, GetMissingKeyReturnsEmptyDictionary) {
static const std::string kJunkKey = "junk";
source_.Get(kJunkKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kJunkKey, key_);
EXPECT_EQ("{}", data_);
}
TEST_F(TestdataSourceTest, AggregateGetMissingKeyReturnsEmptyDictionary) {
static const std::string kJunkKey = "junk";
aggregate_source_.Get(kJunkKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kJunkKey, key_);
EXPECT_EQ("{}", data_);
}
TEST_F(TestdataSourceTest, GetEmptyKeyReturnsEmptyDictionary) {
static const std::string kEmptyKey;
source_.Get(kEmptyKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kEmptyKey, key_);
EXPECT_EQ("{}", data_);
}
} |
279 | #ifndef TENSORFLOW_DTENSOR_MLIR_DTENSOR_LOCATION_H_
#define TENSORFLOW_DTENSOR_MLIR_DTENSOR_LOCATION_H_
#include <string>
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
namespace tensorflow {
namespace dtensor {
mlir::Location DTensorLocation(mlir::Location loc, llvm::StringRef file,
unsigned int line, llvm::StringRef name = "");
mlir::Location DTensorLocation(mlir::Operation* op, llvm::StringRef file,
unsigned int line, llvm::StringRef name = "");
std::string DTensorLocationToString(mlir::Location loc);
}
}
#define DT_LOC(loc) \
::tensorflow::dtensor::DTensorLocation(loc, __FILE__, __LINE__)
#define DT_LOC2(loc, name) \
::tensorflow::dtensor::DTensorLocation(loc, __FILE__, __LINE__, name)
#endif
#include "tensorflow/dtensor/mlir/dtensor_location.h"
#include <algorithm>
#include <queue>
#include <string>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/utils/name_utils.h"
namespace tensorflow {
namespace dtensor {
namespace {
std::string CreateLocalLocationString(mlir::FileLineColLoc loc) {
return llvm::formatv(">> {0}:{1}:{2}", loc.getFilename(), loc.getLine(),
loc.getColumn())
.str();
}
}
mlir::Location DTensorLocation(mlir::Location loc, llvm::StringRef file,
unsigned int line, llvm::StringRef name) {
auto split = file.rsplit("/");
if (!split.second.empty()) file = split.second;
mlir::Location callee_loc =
mlir::FileLineColLoc::get(loc.getContext(), file, line, 0);
std::string new_name = GetNameFromLoc(loc);
if (!new_name.empty()) {
if (!name.empty()) {
new_name = llvm::formatv("{0}/{1}", new_name, name).str();
}
callee_loc = mlir::NameLoc::get(
mlir::StringAttr::get(loc.getContext(), new_name), callee_loc);
}
return mlir::CallSiteLoc::get(callee_loc, loc);
}
mlir::Location DTensorLocation(mlir::Operation* op, llvm::StringRef file,
unsigned int line, llvm::StringRef name) {
return DTensorLocation(op->getLoc(), file, line, name);
}
std::string DTensorLocationToString(mlir::Location loc) {
llvm::SmallVector<std::string, 4> stack;
std::queue<mlir::Location> queue;
queue.push(loc);
while (!queue.empty()) {
mlir::Location& front = queue.front();
if (auto name_loc = mlir::dyn_cast<mlir::NameLoc>(front)) {
queue.push(name_loc.getChildLoc());
} else if (auto callsite_loc = mlir::dyn_cast<mlir::CallSiteLoc>(front)) {
queue.push(callsite_loc.getCallee());
queue.push(callsite_loc.getCaller());
} else if (auto line_loc = mlir::dyn_cast<mlir::FileLineColLoc>(front)) {
stack.push_back(CreateLocalLocationString(line_loc));
}
queue.pop();
}
std::reverse(stack.begin(), stack.end());
std::string s;
llvm::raw_string_ostream ss(s);
llvm::interleave(stack, ss, "\n");
return ss.str();
}
}
} | #include "tensorflow/dtensor/mlir/dtensor_location.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/utils/name_utils.h"
#include "tensorflow/core/platform/test.h"
namespace {
void CheckFileLineColLocation(mlir::Location loc, unsigned line,
unsigned column) {
ASSERT_TRUE(mlir::isa<mlir::FileLineColLoc>(loc));
auto file_line_col_loc = mlir::cast<mlir::FileLineColLoc>(loc);
EXPECT_EQ(file_line_col_loc.getFilename(), "test.cc");
EXPECT_EQ(file_line_col_loc.getLine(), line);
EXPECT_EQ(file_line_col_loc.getColumn(), column);
}
TEST(DTensorLocationTest, HandlesEmptyLocation) {
mlir::MLIRContext ctx;
mlir::Location loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20);
loc = tensorflow::dtensor::DTensorLocation(loc, "test.cc", 21);
ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(loc));
auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(loc);
CheckFileLineColLocation(callsite_loc.getCallee(), 21, 0);
CheckFileLineColLocation(callsite_loc.getCaller(), 10, 20);
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(loc), stack);
}
TEST(DTensorLocationTest, HandlesMultipleCalls) {
mlir::MLIRContext ctx;
mlir::Location test_loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 22);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 23);
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 24);
auto verify_loc = test_loc;
for (int i = 0; i < 4; ++i) {
ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(verify_loc));
auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(verify_loc);
auto callee_loc = callsite_loc.getCallee();
CheckFileLineColLocation(callee_loc, 24 - i, 0);
verify_loc = callsite_loc.getCaller();
}
CheckFileLineColLocation(verify_loc, 10, 20);
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0
>> test.cc:22:0
>> test.cc:23:0
>> test.cc:24:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack);
}
TEST(DTensorLocationTest, HandlesNameLoc) {
mlir::MLIRContext ctx;
mlir::Location test_loc =
mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"),
mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20));
test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21);
ASSERT_EQ(mlir::GetNameFromLoc(test_loc), "op");
ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(test_loc));
auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(test_loc);
mlir::Location caller_loc =
mlir::cast<mlir::CallSiteLoc>(test_loc).getCaller();
ASSERT_TRUE(mlir::isa<mlir::NameLoc>(caller_loc));
CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(caller_loc).getChildLoc(),
10, 20);
mlir::Location callee_loc = callsite_loc.getCallee();
ASSERT_TRUE(mlir::isa<mlir::NameLoc>(callee_loc));
CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(callee_loc).getChildLoc(),
21, 0);
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack);
}
TEST(DTensorLocationTest, HandlesNameLocWithName) {
mlir::MLIRContext ctx;
mlir::Location test_loc =
mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"),
mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20));
test_loc =
tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21, "nested");
EXPECT_EQ(mlir::GetNameFromLoc(test_loc), "op/nested");
constexpr char stack[] = R"stack(>> test.cc:10:20
>> test.cc:21:0)stack";
EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack);
}
} |
280 | #ifndef XLA_SERVICE_GPU_REDUCTION_LAYOUT_NORMALIZER_H_
#define XLA_SERVICE_GPU_REDUCTION_LAYOUT_NORMALIZER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionLayoutNormalizer : public HloModulePass {
public:
absl::string_view name() const override {
return "reduction-layout-normalizer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/reduction_layout_normalizer.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class EnforceMinorToMajorReduceOpVisitor : public DfsHloRewriteVisitor {
absl::Status HandleReduce(HloInstruction *hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
VLOG(5) << "Input: " << reduce->ToString();
int operand_idx = -1;
absl::InlinedVector<HloInstruction *, 2> canonical_reduce_inputs;
absl::InlinedVector<Shape, 2> new_reduce_shapes;
DimensionVector out_reduce_dimensions;
const Shape &first_instruction_shape = reduce->inputs()[0]->shape();
for (HloInstruction *operand : reduce->inputs()) {
operand_idx++;
if (operand_idx != 0 &&
operand->shape().layout() != first_instruction_shape.layout()) {
HloInstruction *copy =
reduce->parent()->AddInstruction(HloInstruction::CreateUnary(
operand->shape(), HloOpcode::kCopy, operand));
LayoutUtil::ClearLayout(copy->mutable_shape());
TF_RETURN_IF_ERROR(LayoutUtil::CopyLayoutBetweenShapes(
first_instruction_shape, copy->mutable_shape()));
copy->set_metadata(operand->metadata());
operand = copy;
VLOG(3) << "Copying to establish consistent inputs layout: "
<< copy->ToString();
}
const Shape &operand_shape = operand->shape();
const Layout &operand_layout = operand_shape.layout();
const Shape &reduce_shape =
reduce->shape().IsTuple() ? reduce->shape().tuple_shapes(operand_idx)
: reduce->shape();
DimensionVector new_reduce_dimensions;
DimensionVector new_operand_shape_data;
DimensionVector new_reduce_shape_data;
DimensionVector new_reduce_shape_layout(reduce_shape.rank());
std::vector<int64_t> reduce_shape_logical_to_physical =
LayoutUtil::MakeLogicalToPhysical(reduce_shape.layout());
auto to_reduce_logical_dim = [&](int64_t op_logical_dim) {
return op_logical_dim -
absl::c_count_if(reduce->dimensions(), [&](int64_t dim) {
CHECK(dim != op_logical_dim);
return dim < op_logical_dim;
});
};
for (int i = 0; i < operand_shape.rank(); i++) {
int64_t major_to_minor_dim_idx = operand_shape.rank() - i - 1;
int64_t logical_dim =
operand_layout.minor_to_major(major_to_minor_dim_idx);
int64_t dim_size = operand_shape.dimensions(logical_dim);
VLOG(5) << "Processing logical dimension " << logical_dim << " of size "
<< dim_size;
new_operand_shape_data.push_back(dim_size);
if (absl::c_linear_search(reduce->dimensions(), logical_dim)) {
new_reduce_dimensions.push_back(i);
} else {
new_reduce_shape_data.push_back(dim_size);
int64_t logical_reduce_dim = to_reduce_logical_dim(logical_dim);
int64_t physical_reduce_dim =
reduce_shape_logical_to_physical[logical_reduce_dim];
VLOG(5) << "logical_reduce_dim = " << logical_reduce_dim << ", "
<< "physical_reduce_dim = " << physical_reduce_dim;
new_reduce_shape_layout[reduce_shape.rank() - physical_reduce_dim -
1] = new_reduce_shape_data.size() - 1;
}
}
Shape new_operand_shape = ShapeUtil::MakeShape(
operand_shape.element_type(), new_operand_shape_data);
Shape new_reduce_shape = ShapeUtil::MakeShapeWithDenseLayout(
reduce_shape.element_type(), new_reduce_shape_data,
new_reduce_shape_layout);
if (new_operand_shape == operand_shape && reduce->inputs().size() == 1) {
return absl::OkStatus();
}
HloInstruction *canonical_reduce_input =
new_operand_shape != operand_shape
? reduce->parent()->AddInstruction(
HloInstruction::CreateBitcast(new_operand_shape, operand))
: operand;
canonical_reduce_input->set_metadata(operand->metadata());
VLOG(5) << "Reduction input: " << canonical_reduce_input->ToString();
new_reduce_shapes.push_back(new_reduce_shape);
canonical_reduce_inputs.push_back(canonical_reduce_input);
if (out_reduce_dimensions.empty()) {
out_reduce_dimensions = new_reduce_dimensions;
} else {
TF_RET_CHECK(out_reduce_dimensions == new_reduce_dimensions);
}
}
Shape new_reduce_shape = ShapeUtil::MakeMaybeTupleShape(new_reduce_shapes);
std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce(
new_reduce_shape, canonical_reduce_inputs, reduce->init_values(),
out_reduce_dimensions, reduce->to_apply());
VLOG(5) << "Generated new reduction: " << new_reduce->ToString();
const Shape &orig_reduce_shape = reduce->shape();
if (new_reduce_shape != orig_reduce_shape) {
HloInstruction *wrapped_reduce =
reduce->parent()->AddInstruction(std::move(new_reduce));
if (!new_reduce_shape.IsTuple()) {
new_reduce =
HloInstruction::CreateBitcast(reduce->shape(), wrapped_reduce);
} else {
absl::InlinedVector<HloInstruction *, 2> out;
for (int oidx = 0; oidx < reduce->input_count(); oidx++) {
HloInstruction *gte = reduce->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx));
out.push_back(
reduce->parent()->AddInstruction(HloInstruction::CreateBitcast(
orig_reduce_shape.tuple_shapes(oidx), gte)));
}
new_reduce = HloInstruction::CreateTuple(out);
}
}
VLOG(5) << "Generated output: " << new_reduce->ToString();
return ReplaceWithNewInstruction(reduce, std::move(new_reduce));
}
};
absl::StatusOr<bool> ReductionLayoutNormalizer::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
EnforceMinorToMajorReduceOpVisitor().RunOnModule(
module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_layout_normalizer.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class ReductionLayoutNormalizerTest : public HloTestBase {
public:
void CheckReductionLayoutNormalizer(
absl::string_view hlo, std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, gpu::ReductionLayoutNormalizer{}, expected);
}
};
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTest) {
const char* hlo = R"(
HloModule ReduceWithLayoutChange
add {
x0 = f32[] parameter(0)
y0 = f32[] parameter(1)
ROOT add0 = f32[] add(x0, y0)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
constant0 = f32[] constant(0)
ROOT reduce0 = f32[4,5,16,12,12]{4,3,2,1,0} reduce(arg0, constant0),
dimensions={1,6,7}, to_apply=add
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTestVariadic) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadic
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0)
idxs = u32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[4,5,16,12,12]{4,3,2,1,0},
u32[4,5,16,12,12]{4,3,2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={1,6,7}, to_apply=argmax
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
}
TEST_F(ReductionLayoutNormalizerTest,
LayoutCanonicalizerTestVariadicDifferentLayouts) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,7]{2,1,0,3} parameter(0)
idxs = u32[2,3,4,7]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
CheckReductionLayoutNormalizer(hlo,
R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-5, 1e-5}));
}
}
} |
281 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RELU_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RELU_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewReLUNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/relu.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ReLU : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const ReLUAttributes&>(ctx.op_attr);
std::vector<Variable> params;
std::string min;
if (attr.alpha == 0) {
min = "vec4($activation_min$)";
params.push_back({"activation_min", attr.activation_min});
} else {
min = "min($alpha$ * value_0, 0.0)";
params.push_back({"alpha", attr.alpha});
}
std::string code;
if (attr.activation_max == 0) {
code = "value_0 = max(value_0, " + min + ");";
} else {
code = "value_0 = clamp(value_0, " + min + ", vec4($activation_max$));";
params.push_back({"activation_max", attr.activation_max});
}
*generated_code = {
std::move(params),
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewReLUNodeShader() {
return std::make_unique<ReLU>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/relu.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ReluTest : public ::testing::Test {
public:
ReluTest() = default;
~ReluTest() override = default;
TensorRef<BHWC> GetTensorRef(int ref) {
TensorRef<BHWC> tensor_ref;
tensor_ref.type = DataType::FLOAT32;
tensor_ref.ref = ref;
tensor_ref.shape = BHWC(1, 2, 2, 1);
return tensor_ref;
}
};
TEST_F(ReluTest, Smoke) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_max = 0;
attr.alpha = 0;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.0, 2.0, 8.0}));
}
TEST_F(ReluTest, ClipOnly) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_max = 6;
attr.alpha = 0;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0.0, 0.0, 2.0, 6.0}));
}
TEST_F(ReluTest, AlphaOnly) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_max = 0;
attr.alpha = 0.5;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 8.0}));
}
TEST_F(ReluTest, ClipAndAlpha) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_max = 6;
attr.alpha = 0.5;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 6.0}));
}
TEST_F(ReluTest, ReLUN1Smoke) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_min = -1;
attr.activation_max = 0;
attr.alpha = 0;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-12.0f, -0.5f, 0.8f, 3.2f}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.0f, -0.5f, 0.8f, 3.2f}));
}
TEST_F(ReluTest, ReLUN1ClipOnly) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_min = -1;
attr.activation_max = 1;
attr.alpha = 0;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-12.0f, -0.5f, 0.8f, 3.2f}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.0f, -0.5f, 0.8f, 1.0f}));
}
TEST_F(ReluTest, ReLUN1AlphaOnly) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_min = -1;
attr.activation_max = 0;
attr.alpha = 0.5;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 8.0}));
}
TEST_F(ReluTest, ReLUN1ClipAndAlpha) {
OperationType op_type = OperationType::RELU;
ReLUAttributes attr;
attr.activation_min = -1;
attr.activation_max = 6;
attr.alpha = 0.5;
SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)},
{GetTensorRef(1)});
ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
ASSERT_OK(model.Invoke(*NewReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 6.0}));
}
}
}
}
} |
282 | #ifndef XLA_SERVICE_SPMD_CANONICALIZE_ALL_GATHER_FOR_CSE_H_
#define XLA_SERVICE_SPMD_CANONICALIZE_ALL_GATHER_FOR_CSE_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CanonicalizeAllGatherForCSE : public HloModulePass {
public:
CanonicalizeAllGatherForCSE() : next_channel_id_(0) {}
~CanonicalizeAllGatherForCSE() override = default;
absl::string_view name() const override { return "canon-all-gather-for-cse"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* comp);
int64_t NextChannelId() { return next_channel_id_++; }
int64_t next_channel_id_;
};
}
#endif
#include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/utils/hlo_query.h"
namespace xla {
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::RunOnComputation(
HloComputation* comp) {
bool changed = false;
std::vector<HloInstruction*> ordered_hlos = comp->MakeInstructionPostOrder();
for (HloInstruction* hlo : ordered_hlos) {
HloAllGatherInstruction* ag = DynCast<HloAllGatherInstruction>(hlo);
if (!ag || ag->operand_count() > 1) {
continue;
}
HloInstruction* real_data = ag->mutable_operand(0);
while (real_data->ReshapeMerelyInsertsOrDeletes1SizedDimensions()
.has_value()) {
real_data = real_data->mutable_operand(0);
}
if (real_data == ag->operand(0)) {
continue;
}
const int64_t ag_dim = ag->all_gather_dimension();
int64_t new_ag_dim;
if (auto dims = ShapeUtil::ReshapeLeavesDimensionsUnmodified(
ag->operand(0)->shape(), real_data->shape(), {ag_dim})) {
new_ag_dim = dims->at(0);
} else {
int64_t major_elements =
Product(absl::MakeConstSpan(ag->operand(0)->shape().dimensions())
.subspan(0, ag_dim));
new_ag_dim = 0;
while (major_elements > 1) {
major_elements /= real_data->shape().dimensions(new_ag_dim++);
}
}
if (new_ag_dim == real_data->shape().rank()) {
continue;
}
const int64_t all_gather_participants =
ShapeUtil::ElementsIn(ag->shape()) /
ShapeUtil::ElementsIn(ag->operand(0)->shape());
Shape new_ag_shape = real_data->shape();
new_ag_shape.set_dimensions(
new_ag_dim,
all_gather_participants * new_ag_shape.dimensions(new_ag_dim));
std::optional<int64_t> new_channel_id =
ag->channel_id() ? std::make_optional(this->NextChannelId())
: std::nullopt;
HloInstruction* new_ag =
comp->AddInstruction(HloInstruction::CreateAllGather(
new_ag_shape, {real_data}, new_ag_dim,
ag->device_list(), ag->constrain_layout(), new_channel_id,
ag->use_global_device_ids()));
ag->SetupDerivedInstruction(new_ag);
HloInstruction* new_formatting = comp->AddInstruction(
HloInstruction::CreateReshape(ag->shape(), new_ag));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(ag, new_formatting));
changed = true;
}
return changed;
}
absl::StatusOr<bool> CanonicalizeAllGatherForCSE::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
next_channel_id_ = hlo_query::NextChannelId(*module);
for (HloComputation* comp : module->computations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool comp_changed, RunOnComputation(comp));
changed |= comp_changed;
}
return changed;
}
} | #include "xla/service/spmd/canonicalize_all_gather_for_cse.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace spmd {
namespace {
using ::testing::_;
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
class AllGatherCanonicalizeTest : public HloTestBase {
public:
absl::StatusOr<std::unique_ptr<HloModule>> RunPass(
absl::string_view hlo_module) {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(
hlo_module, GetModuleConfigForTest()));
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module.get()).status());
return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module));
}
absl::Status RunPassOnModule(HloModule* module,
int64_t distance_threshold = 100) {
HloPassPipeline pipeline("all-gather-cse");
pipeline.AddPass<CanonicalizeAllGatherForCSE>();
TF_RETURN_IF_ERROR(pipeline.Run(module).status());
return absl::OkStatus();
}
};
TEST_F(AllGatherCanonicalizeTest, SimpleReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
ROOT ag = s32[2,8]{1,0} all-gather(resh), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape,
AllOf(op::Reshape(op::AllGather(_)), op::Shape("s32[2,8]")));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[1,8]{1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapes2) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[2,8,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, MultipleDegenerateReshapesNoDim0) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,8,1,1]{3,2,1,0} reshape(resh)
ROOT ag = s32[1,16,1,1]{3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={1}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, op::Reshape(op::AllGather(op::Parameter())));
}
TEST_F(AllGatherCanonicalizeTest, NonDegenerateReshape) {
absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
param0 = s32[8]{0} parameter(0)
resh = s32[8,1,1]{2,1,0} reshape(param0)
resh2 = s32[1,4,2,1,1]{4,3,2,1,0} reshape(resh)
ROOT ag = s32[2,4,2,1,1]{4,3,2,1,0} all-gather(resh2), replica_groups={{0,1}},
dimensions={0}, channel_id=0, use_global_device_ids=true
})";
auto module_status = RunPass(hlo_string);
EXPECT_TRUE(module_status.status().ok());
auto module = std::move(module_status).value();
const HloInstruction* const reshape =
module->entry_computation()->root_instruction();
EXPECT_THAT(reshape, AllOf(op::AllGather(op::Reshape(op::Reshape(_))),
op::Shape("s32[2,4,2,1,1]")));
}
}
}
} |
283 | #ifndef TENSORFLOW_TSL_PLATFORM_ABI_H_
#define TENSORFLOW_TSL_PLATFORM_ABI_H_
#include <string>
#include "tsl/platform/types.h"
namespace tsl {
namespace port {
std::string MaybeAbiDemangle(const char* name);
}
}
#endif
#include "tsl/platform/abi.h"
#include "tsl/platform/types.h"
#if defined(_MSC_VER)
#include <windows.h>
#include <cstring>
#else
#include <cxxabi.h>
#include <cstdlib>
#endif
#include <memory>
#include <string>
#if defined(_MSC_VER)
extern "C" char* __unDName(char* output_string, const char* name,
int max_string_length, void* (*p_alloc)(std::size_t),
void (*p_free)(void*), unsigned short disable_flags);
#endif
namespace tsl {
namespace port {
string MaybeAbiDemangle(const char* name) {
#if defined(_MSC_VER)
std::unique_ptr<char> demangled{__unDName(nullptr, name, 0, std::malloc,
std::free,
static_cast<unsigned short>(0))};
return string(demangled.get() != nullptr ? demangled.get() : name);
#else
int status = 0;
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name, nullptr, nullptr, &status), std::free};
return (status == 0) ? res.get() : name;
#endif
}
}
} | #include "tsl/platform/abi.h"
#include <typeinfo>
#include "tsl/platform/test.h"
namespace tsl {
struct MyRandomPODType {};
TEST(AbiTest, AbiDemangleTest) {
EXPECT_EQ(port::MaybeAbiDemangle(typeid(int).name()), "int");
#ifdef PLATFORM_WINDOWS
const char pod_type_name[] = "struct tsl::MyRandomPODType";
#else
const char pod_type_name[] = "tsl::MyRandomPODType";
#endif
EXPECT_EQ(port::MaybeAbiDemangle(typeid(MyRandomPODType).name()),
pod_type_name);
EXPECT_EQ(
port::MaybeAbiDemangle("help! i'm caught in a C++ mangle factoryasdf"),
"help! i'm caught in a C++ mangle factoryasdf");
}
} |
284 | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_AST_TRAVERSE_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_AST_TRAVERSE_H_
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "eval/public/ast_visitor.h"
namespace google::api::expr::runtime {
struct TraversalOptions {
bool use_comprehension_callbacks;
TraversalOptions() : use_comprehension_callbacks(false) {}
};
void AstTraverse(const google::api::expr::v1alpha1::Expr* expr,
const google::api::expr::v1alpha1::SourceInfo* source_info,
AstVisitor* visitor,
TraversalOptions options = TraversalOptions());
}
#endif
#include "eval/public/ast_traverse.h"
#include <stack>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/log/absl_log.h"
#include "absl/types/variant.h"
#include "eval/public/ast_visitor.h"
#include "eval/public/source_position.h"
namespace google::api::expr::runtime {
using google::api::expr::v1alpha1::Expr;
using google::api::expr::v1alpha1::SourceInfo;
using Ident = google::api::expr::v1alpha1::Expr::Ident;
using Select = google::api::expr::v1alpha1::Expr::Select;
using Call = google::api::expr::v1alpha1::Expr::Call;
using CreateList = google::api::expr::v1alpha1::Expr::CreateList;
using CreateStruct = google::api::expr::v1alpha1::Expr::CreateStruct;
using Comprehension = google::api::expr::v1alpha1::Expr::Comprehension;
namespace {
struct ArgRecord {
const Expr* expr;
const SourceInfo* source_info;
const Expr* calling_expr;
int call_arg;
};
struct ComprehensionRecord {
const Expr* expr;
const SourceInfo* source_info;
const Comprehension* comprehension;
const Expr* comprehension_expr;
ComprehensionArg comprehension_arg;
bool use_comprehension_callbacks;
};
struct ExprRecord {
const Expr* expr;
const SourceInfo* source_info;
};
using StackRecordKind =
absl::variant<ExprRecord, ArgRecord, ComprehensionRecord>;
struct StackRecord {
public:
ABSL_ATTRIBUTE_UNUSED static constexpr int kNotCallArg = -1;
static constexpr int kTarget = -2;
StackRecord(const Expr* e, const SourceInfo* info) {
ExprRecord record;
record.expr = e;
record.source_info = info;
record_variant = record;
}
StackRecord(const Expr* e, const SourceInfo* info,
const Comprehension* comprehension,
const Expr* comprehension_expr,
ComprehensionArg comprehension_arg,
bool use_comprehension_callbacks) {
if (use_comprehension_callbacks) {
ComprehensionRecord record;
record.expr = e;
record.source_info = info;
record.comprehension = comprehension;
record.comprehension_expr = comprehension_expr;
record.comprehension_arg = comprehension_arg;
record.use_comprehension_callbacks = use_comprehension_callbacks;
record_variant = record;
return;
}
ArgRecord record;
record.expr = e;
record.source_info = info;
record.calling_expr = comprehension_expr;
record.call_arg = comprehension_arg;
record_variant = record;
}
StackRecord(const Expr* e, const SourceInfo* info, const Expr* call,
int argnum) {
ArgRecord record;
record.expr = e;
record.source_info = info;
record.calling_expr = call;
record.call_arg = argnum;
record_variant = record;
}
StackRecordKind record_variant;
bool visited = false;
};
struct PreVisitor {
void operator()(const ExprRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
visitor->PreVisitExpr(expr, &position);
switch (expr->expr_kind_case()) {
case Expr::kConstExpr:
visitor->PreVisitConst(&expr->const_expr(), expr, &position);
break;
case Expr::kIdentExpr:
visitor->PreVisitIdent(&expr->ident_expr(), expr, &position);
break;
case Expr::kSelectExpr:
visitor->PreVisitSelect(&expr->select_expr(), expr, &position);
break;
case Expr::kCallExpr:
visitor->PreVisitCall(&expr->call_expr(), expr, &position);
break;
case Expr::kListExpr:
visitor->PreVisitCreateList(&expr->list_expr(), expr, &position);
break;
case Expr::kStructExpr:
visitor->PreVisitCreateStruct(&expr->struct_expr(), expr, &position);
break;
case Expr::kComprehensionExpr:
visitor->PreVisitComprehension(&expr->comprehension_expr(), expr,
&position);
break;
default:
break;
}
}
void operator()(const ArgRecord&) {}
void operator()(const ComprehensionRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
visitor->PreVisitComprehensionSubexpression(
expr, record.comprehension, record.comprehension_arg, &position);
}
AstVisitor* visitor;
};
void PreVisit(const StackRecord& record, AstVisitor* visitor) {
absl::visit(PreVisitor{visitor}, record.record_variant);
}
struct PostVisitor {
void operator()(const ExprRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
switch (expr->expr_kind_case()) {
case Expr::kConstExpr:
visitor->PostVisitConst(&expr->const_expr(), expr, &position);
break;
case Expr::kIdentExpr:
visitor->PostVisitIdent(&expr->ident_expr(), expr, &position);
break;
case Expr::kSelectExpr:
visitor->PostVisitSelect(&expr->select_expr(), expr, &position);
break;
case Expr::kCallExpr:
visitor->PostVisitCall(&expr->call_expr(), expr, &position);
break;
case Expr::kListExpr:
visitor->PostVisitCreateList(&expr->list_expr(), expr, &position);
break;
case Expr::kStructExpr:
visitor->PostVisitCreateStruct(&expr->struct_expr(), expr, &position);
break;
case Expr::kComprehensionExpr:
visitor->PostVisitComprehension(&expr->comprehension_expr(), expr,
&position);
break;
default:
ABSL_LOG(ERROR) << "Unsupported Expr kind: " << expr->expr_kind_case();
}
visitor->PostVisitExpr(expr, &position);
}
void operator()(const ArgRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
if (record.call_arg == StackRecord::kTarget) {
visitor->PostVisitTarget(record.calling_expr, &position);
} else {
visitor->PostVisitArg(record.call_arg, record.calling_expr, &position);
}
}
void operator()(const ComprehensionRecord& record) {
const Expr* expr = record.expr;
const SourcePosition position(expr->id(), record.source_info);
visitor->PostVisitComprehensionSubexpression(
expr, record.comprehension, record.comprehension_arg, &position);
}
AstVisitor* visitor;
};
void PostVisit(const StackRecord& record, AstVisitor* visitor) {
absl::visit(PostVisitor{visitor}, record.record_variant);
}
void PushSelectDeps(const Select* select_expr, const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
if (select_expr->has_operand()) {
stack->push(StackRecord(&select_expr->operand(), source_info));
}
}
void PushCallDeps(const Call* call_expr, const Expr* expr,
const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
const int arg_size = call_expr->args_size();
for (int i = arg_size - 1; i >= 0; --i) {
stack->push(StackRecord(&call_expr->args(i), source_info, expr, i));
}
if (call_expr->has_target()) {
stack->push(StackRecord(&call_expr->target(), source_info, expr,
StackRecord::kTarget));
}
}
void PushListDeps(const CreateList* list_expr, const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
const auto& elements = list_expr->elements();
for (auto it = elements.rbegin(); it != elements.rend(); ++it) {
const auto& element = *it;
stack->push(StackRecord(&element, source_info));
}
}
void PushStructDeps(const CreateStruct* struct_expr,
const SourceInfo* source_info,
std::stack<StackRecord>* stack) {
const auto& entries = struct_expr->entries();
for (auto it = entries.rbegin(); it != entries.rend(); ++it) {
const auto& entry = *it;
if (entry.has_value()) {
stack->push(StackRecord(&entry.value(), source_info));
}
if (entry.has_map_key()) {
stack->push(StackRecord(&entry.map_key(), source_info));
}
}
}
void PushComprehensionDeps(const Comprehension* c, const Expr* expr,
const SourceInfo* source_info,
std::stack<StackRecord>* stack,
bool use_comprehension_callbacks) {
StackRecord iter_range(&c->iter_range(), source_info, c, expr, ITER_RANGE,
use_comprehension_callbacks);
StackRecord accu_init(&c->accu_init(), source_info, c, expr, ACCU_INIT,
use_comprehension_callbacks);
StackRecord loop_condition(&c->loop_condition(), source_info, c, expr,
LOOP_CONDITION, use_comprehension_callbacks);
StackRecord loop_step(&c->loop_step(), source_info, c, expr, LOOP_STEP,
use_comprehension_callbacks);
StackRecord result(&c->result(), source_info, c, expr, RESULT,
use_comprehension_callbacks);
stack->push(result);
stack->push(loop_step);
stack->push(loop_condition);
stack->push(accu_init);
stack->push(iter_range);
}
struct PushDepsVisitor {
void operator()(const ExprRecord& record) {
const Expr* expr = record.expr;
switch (expr->expr_kind_case()) {
case Expr::kSelectExpr:
PushSelectDeps(&expr->select_expr(), record.source_info, &stack);
break;
case Expr::kCallExpr:
PushCallDeps(&expr->call_expr(), expr, record.source_info, &stack);
break;
case Expr::kListExpr:
PushListDeps(&expr->list_expr(), record.source_info, &stack);
break;
case Expr::kStructExpr:
PushStructDeps(&expr->struct_expr(), record.source_info, &stack);
break;
case Expr::kComprehensionExpr:
PushComprehensionDeps(&expr->comprehension_expr(), expr,
record.source_info, &stack,
options.use_comprehension_callbacks);
break;
default:
break;
}
}
void operator()(const ArgRecord& record) {
stack.push(StackRecord(record.expr, record.source_info));
}
void operator()(const ComprehensionRecord& record) {
stack.push(StackRecord(record.expr, record.source_info));
}
std::stack<StackRecord>& stack;
const TraversalOptions& options;
};
void PushDependencies(const StackRecord& record, std::stack<StackRecord>& stack,
const TraversalOptions& options) {
absl::visit(PushDepsVisitor{stack, options}, record.record_variant);
}
}
void AstTraverse(const Expr* expr, const SourceInfo* source_info,
AstVisitor* visitor, TraversalOptions options) {
std::stack<StackRecord> stack;
stack.push(StackRecord(expr, source_info));
while (!stack.empty()) {
StackRecord& record = stack.top();
if (!record.visited) {
PreVisit(record, visitor);
PushDependencies(record, stack, options);
record.visited = true;
} else {
PostVisit(record, visitor);
stack.pop();
}
}
}
} | #include "eval/public/ast_traverse.h"
#include "eval/public/ast_visitor.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using google::api::expr::v1alpha1::Constant;
using google::api::expr::v1alpha1::Expr;
using google::api::expr::v1alpha1::SourceInfo;
using testing::_;
using Ident = google::api::expr::v1alpha1::Expr::Ident;
using Select = google::api::expr::v1alpha1::Expr::Select;
using Call = google::api::expr::v1alpha1::Expr::Call;
using CreateList = google::api::expr::v1alpha1::Expr::CreateList;
using CreateStruct = google::api::expr::v1alpha1::Expr::CreateStruct;
using Comprehension = google::api::expr::v1alpha1::Expr::Comprehension;
class MockAstVisitor : public AstVisitor {
public:
MOCK_METHOD(void, PreVisitExpr,
(const Expr* expr, const SourcePosition* position), (override));
MOCK_METHOD(void, PostVisitExpr,
(const Expr* expr, const SourcePosition* position), (override));
MOCK_METHOD(void, PreVisitConst,
(const Constant* const_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitConst,
(const Constant* const_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitIdent,
(const Ident* ident_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitIdent,
(const Ident* ident_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitSelect,
(const Select* select_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitSelect,
(const Select* select_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitCall,
(const Call* call_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitCall,
(const Call* call_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitComprehension,
(const Comprehension* comprehension_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitComprehension,
(const Comprehension* comprehension_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitComprehensionSubexpression,
(const Expr* expr, const Comprehension* comprehension_expr,
ComprehensionArg comprehension_arg,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitComprehensionSubexpression,
(const Expr* expr, const Comprehension* comprehension_expr,
ComprehensionArg comprehension_arg,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitTarget,
(const Expr* expr, const SourcePosition* position), (override));
MOCK_METHOD(void, PostVisitArg,
(int arg_num, const Expr* expr, const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitCreateList,
(const CreateList* list_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitCreateList,
(const CreateList* list_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PreVisitCreateStruct,
(const CreateStruct* struct_expr, const Expr* expr,
const SourcePosition* position),
(override));
MOCK_METHOD(void, PostVisitCreateStruct,
(const CreateStruct* struct_expr, const Expr* expr,
const SourcePosition* position),
(override));
};
TEST(AstCrawlerTest, CheckCrawlConstant) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto const_expr = expr.mutable_const_expr();
EXPECT_CALL(handler, PreVisitConst(const_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(const_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlIdent) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto ident_expr = expr.mutable_ident_expr();
EXPECT_CALL(handler, PreVisitIdent(ident_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlSelectNotCrashingPostVisitAbsentOperand) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto select_expr = expr.mutable_select_expr();
EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlSelect) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto select_expr = expr.mutable_select_expr();
auto operand = select_expr->mutable_operand();
auto ident_expr = operand->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PostVisitIdent(ident_expr, operand, _)).Times(1);
EXPECT_CALL(handler, PostVisitSelect(select_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlCallNoReceiver) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto* call_expr = expr.mutable_call_expr();
Expr* arg0 = call_expr->add_args();
auto* const_expr = arg0->mutable_const_expr();
Expr* arg1 = call_expr->add_args();
auto* ident_expr = arg1->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitTarget(_, _)).Times(0);
EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlCallReceiver) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto* call_expr = expr.mutable_call_expr();
Expr* target = call_expr->mutable_target();
auto* target_ident = target->mutable_ident_expr();
Expr* arg0 = call_expr->add_args();
auto* const_expr = arg0->mutable_const_expr();
Expr* arg1 = call_expr->add_args();
auto* ident_expr = arg1->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(target_ident, target, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(target, _)).Times(1);
EXPECT_CALL(handler, PostVisitTarget(&expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(0, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(1, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitCall(call_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitExpr(&expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCrawlComprehension) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto c = expr.mutable_comprehension_expr();
auto iter_range = c->mutable_iter_range();
auto iter_range_expr = iter_range->mutable_const_expr();
auto accu_init = c->mutable_accu_init();
auto accu_init_expr = accu_init->mutable_ident_expr();
auto loop_condition = c->mutable_loop_condition();
auto loop_condition_expr = loop_condition->mutable_const_expr();
auto loop_step = c->mutable_loop_step();
auto loop_step_expr = loop_step->mutable_ident_expr();
auto result = c->mutable_result();
auto result_expr = result->mutable_const_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitComprehension(c, &expr, _)).Times(1);
EXPECT_CALL(handler,
PreVisitComprehensionSubexpression(iter_range, c, ITER_RANGE, _))
.Times(1);
EXPECT_CALL(handler, PostVisitConst(iter_range_expr, iter_range, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(iter_range, c, ITER_RANGE, _))
.Times(1);
EXPECT_CALL(handler,
PreVisitComprehensionSubexpression(accu_init, c, ACCU_INIT, _))
.Times(1);
EXPECT_CALL(handler, PostVisitIdent(accu_init_expr, accu_init, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(accu_init, c, ACCU_INIT, _))
.Times(1);
EXPECT_CALL(handler, PreVisitComprehensionSubexpression(loop_condition, c,
LOOP_CONDITION, _))
.Times(1);
EXPECT_CALL(handler, PostVisitConst(loop_condition_expr, loop_condition, _))
.Times(1);
EXPECT_CALL(handler, PostVisitComprehensionSubexpression(loop_condition, c,
LOOP_CONDITION, _))
.Times(1);
EXPECT_CALL(handler,
PreVisitComprehensionSubexpression(loop_step, c, LOOP_STEP, _))
.Times(1);
EXPECT_CALL(handler, PostVisitIdent(loop_step_expr, loop_step, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(loop_step, c, LOOP_STEP, _))
.Times(1);
EXPECT_CALL(handler, PreVisitComprehensionSubexpression(result, c, RESULT, _))
.Times(1);
EXPECT_CALL(handler, PostVisitConst(result_expr, result, _)).Times(1);
EXPECT_CALL(handler,
PostVisitComprehensionSubexpression(result, c, RESULT, _))
.Times(1);
EXPECT_CALL(handler, PostVisitComprehension(c, &expr, _)).Times(1);
TraversalOptions opts;
opts.use_comprehension_callbacks = true;
AstTraverse(&expr, &source_info, &handler, opts);
}
TEST(AstCrawlerTest, CheckCrawlComprehensionLegacyCallbacks) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto c = expr.mutable_comprehension_expr();
auto iter_range = c->mutable_iter_range();
auto iter_range_expr = iter_range->mutable_const_expr();
auto accu_init = c->mutable_accu_init();
auto accu_init_expr = accu_init->mutable_ident_expr();
auto loop_condition = c->mutable_loop_condition();
auto loop_condition_expr = loop_condition->mutable_const_expr();
auto loop_step = c->mutable_loop_step();
auto loop_step_expr = loop_step->mutable_ident_expr();
auto result = c->mutable_result();
auto result_expr = result->mutable_const_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitComprehension(c, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(iter_range_expr, iter_range, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(ITER_RANGE, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(accu_init_expr, accu_init, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(ACCU_INIT, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(loop_condition_expr, loop_condition, _))
.Times(1);
EXPECT_CALL(handler, PostVisitArg(LOOP_CONDITION, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(loop_step_expr, loop_step, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(LOOP_STEP, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(result_expr, result, _)).Times(1);
EXPECT_CALL(handler, PostVisitArg(RESULT, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitComprehension(c, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCreateList) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto list_expr = expr.mutable_list_expr();
auto arg0 = list_expr->add_elements();
auto const_expr = arg0->mutable_const_expr();
auto arg1 = list_expr->add_elements();
auto ident_expr = arg1->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCreateList(list_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(const_expr, arg0, _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(ident_expr, arg1, _)).Times(1);
EXPECT_CALL(handler, PostVisitCreateList(list_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckCreateStruct) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto struct_expr = expr.mutable_struct_expr();
auto entry0 = struct_expr->add_entries();
auto key = entry0->mutable_map_key()->mutable_const_expr();
auto value = entry0->mutable_value()->mutable_ident_expr();
testing::InSequence seq;
EXPECT_CALL(handler, PreVisitCreateStruct(struct_expr, &expr, _)).Times(1);
EXPECT_CALL(handler, PostVisitConst(key, &entry0->map_key(), _)).Times(1);
EXPECT_CALL(handler, PostVisitIdent(value, &entry0->value(), _)).Times(1);
EXPECT_CALL(handler, PostVisitCreateStruct(struct_expr, &expr, _)).Times(1);
AstTraverse(&expr, &source_info, &handler);
}
TEST(AstCrawlerTest, CheckExprHandlers) {
SourceInfo source_info;
MockAstVisitor handler;
Expr expr;
auto struct_expr = expr.mutable_struct_expr();
auto entry0 = struct_expr->add_entries();
entry0->mutable_map_key()->mutable_const_expr();
entry0->mutable_value()->mutable_ident_expr();
EXPECT_CALL(handler, PreVisitExpr(_, _)).Times(3);
EXPECT_CALL(handler, PostVisitExpr(_, _)).Times(3);
AstTraverse(&expr, &source_info, &handler);
}
}
} |
285 | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_DATA_OWNING_VECTOR_REF_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ML_ADJACENT_DATA_OWNING_VECTOR_REF_H_
#include <vector>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
class OwningVectorRef : public MutableDataRef {
public:
explicit OwningVectorRef(etype_t type) : MutableDataRef(type) {}
OwningVectorRef(const OwningVectorRef&) = delete;
OwningVectorRef(OwningVectorRef&&) = delete;
OwningVectorRef& operator=(const OwningVectorRef&) = delete;
OwningVectorRef& operator=(OwningVectorRef&&) = delete;
void Resize(dims_t&& dims) override;
const void* Data() const override;
void* Data() override;
ind_t NumElements() const override;
size_t Bytes() const override;
~OwningVectorRef() override = default;
private:
std::vector<char> raw_data_buffer_;
ind_t num_elements_ = 0;
};
}
}
#endif
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <cstddef>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
void OwningVectorRef::Resize(dims_t&& dims) {
dims_ = dims;
num_elements_ = 0;
for (dim_t d : dims_) {
if (d <= 0) {
break;
}
if (num_elements_ == 0) {
num_elements_ = d;
} else {
num_elements_ *= d;
}
}
raw_data_buffer_.resize(num_elements_ * TypeWidth(Type()));
}
const void* OwningVectorRef::Data() const { return raw_data_buffer_.data(); }
void* OwningVectorRef::Data() { return raw_data_buffer_.data(); }
ind_t OwningVectorRef::NumElements() const { return num_elements_; }
size_t OwningVectorRef::Bytes() const {
return NumElements() * TypeWidth(Type());
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
TEST(OwningVectorRefTest, ConstructFloat32) {
OwningVectorRef t(etype_t::f32);
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 0);
EXPECT_EQ(t.Bytes(), 0);
EXPECT_THAT(t.Dims(), IsEmpty());
}
TEST(OwningVectorRefTest, ResizeFromEmptyFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({2, 2}));
EXPECT_EQ(t.NumElements(), 4);
ASSERT_EQ(t.Bytes(), 4 * sizeof(float));
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, ResizeDownFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
t.Resize({3});
ASSERT_THAT(t.Dims(), ElementsAreArray<dim_t>({3}));
EXPECT_EQ(t.NumElements(), 3);
ASSERT_EQ(t.Bytes(), 3 * sizeof(float));
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, IgnoresDimsForNumElementsAfterFirstNonPositive) {
OwningVectorRef t(etype_t::f32);
t.Resize({3, 0, 0, 2});
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 3);
EXPECT_EQ(t.Bytes(), 3 * sizeof(float));
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({3, 0, 0, 2}));
}
}
}
} |
286 | #ifndef QUICHE_HTTP2_DECODER_PAYLOAD_DECODERS_GOAWAY_PAYLOAD_DECODER_H_
#define QUICHE_HTTP2_DECODER_PAYLOAD_DECODERS_GOAWAY_PAYLOAD_DECODER_H_
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/decoder/frame_decoder_state.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_export.h"
namespace http2 {
namespace test {
class GoAwayPayloadDecoderPeer;
}
class QUICHE_EXPORT GoAwayPayloadDecoder {
public:
enum class PayloadState {
kStartDecodingFixedFields,
kHandleFixedFieldsStatus,
kReadOpaqueData,
kResumeDecodingFixedFields,
};
DecodeStatus StartDecodingPayload(FrameDecoderState* state, DecodeBuffer* db);
DecodeStatus ResumeDecodingPayload(FrameDecoderState* state,
DecodeBuffer* db);
private:
friend class test::GoAwayPayloadDecoderPeer;
Http2GoAwayFields goaway_fields_;
PayloadState payload_state_;
};
}
#endif
#include "quiche/http2/decoder/payload_decoders/goaway_payload_decoder.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::ostream& operator<<(std::ostream& out,
GoAwayPayloadDecoder::PayloadState v) {
switch (v) {
case GoAwayPayloadDecoder::PayloadState::kStartDecodingFixedFields:
return out << "kStartDecodingFixedFields";
case GoAwayPayloadDecoder::PayloadState::kHandleFixedFieldsStatus:
return out << "kHandleFixedFieldsStatus";
case GoAwayPayloadDecoder::PayloadState::kReadOpaqueData:
return out << "kReadOpaqueData";
case GoAwayPayloadDecoder::PayloadState::kResumeDecodingFixedFields:
return out << "kResumeDecodingFixedFields";
}
int unknown = static_cast<int>(v);
QUICHE_BUG(http2_bug_167_1)
<< "Invalid GoAwayPayloadDecoder::PayloadState: " << unknown;
return out << "GoAwayPayloadDecoder::PayloadState(" << unknown << ")";
}
DecodeStatus GoAwayPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "GoAwayPayloadDecoder::StartDecodingPayload: "
<< state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::GOAWAY, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
QUICHE_DCHECK_EQ(0, state->frame_header().flags);
state->InitializeRemainders();
payload_state_ = PayloadState::kStartDecodingFixedFields;
return ResumeDecodingPayload(state, db);
}
DecodeStatus GoAwayPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2)
<< "GoAwayPayloadDecoder::ResumeDecodingPayload: remaining_payload="
<< state->remaining_payload() << ", db->Remaining=" << db->Remaining();
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::GOAWAY, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), frame_header.payload_length);
QUICHE_DCHECK_NE(PayloadState::kHandleFixedFieldsStatus, payload_state_);
DecodeStatus status = DecodeStatus::kDecodeError;
size_t avail;
while (true) {
QUICHE_DVLOG(2)
<< "GoAwayPayloadDecoder::ResumeDecodingPayload payload_state_="
<< payload_state_;
switch (payload_state_) {
case PayloadState::kStartDecodingFixedFields:
status = state->StartDecodingStructureInPayload(&goaway_fields_, db);
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kHandleFixedFieldsStatus:
if (status == DecodeStatus::kDecodeDone) {
state->listener()->OnGoAwayStart(frame_header, goaway_fields_);
} else {
QUICHE_DCHECK((status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError &&
state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload();
payload_state_ = PayloadState::kResumeDecodingFixedFields;
return status;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kReadOpaqueData:
avail = db->Remaining();
if (avail > 0) {
state->listener()->OnGoAwayOpaqueData(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() > 0) {
payload_state_ = PayloadState::kReadOpaqueData;
return DecodeStatus::kDecodeInProgress;
}
state->listener()->OnGoAwayEnd();
return DecodeStatus::kDecodeDone;
case PayloadState::kResumeDecodingFixedFields:
status = state->ResumeDecodingStructureInPayload(&goaway_fields_, db);
payload_state_ = PayloadState::kHandleFixedFieldsStatus;
continue;
}
QUICHE_BUG(http2_bug_167_2) << "PayloadState: " << payload_state_;
}
}
} | #include "quiche/http2/decoder/payload_decoders/goaway_payload_decoder.h"
#include <stddef.h>
#include <string>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class GoAwayPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() { return Http2FrameType::GOAWAY; }
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnGoAwayStart(const Http2FrameHeader& header,
const Http2GoAwayFields& goaway) override {
QUICHE_VLOG(1) << "OnGoAwayStart header: " << header
<< "; goaway: " << goaway;
StartFrame(header)->OnGoAwayStart(header, goaway);
}
void OnGoAwayOpaqueData(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnGoAwayOpaqueData: len=" << len;
CurrentFrame()->OnGoAwayOpaqueData(data, len);
}
void OnGoAwayEnd() override {
QUICHE_VLOG(1) << "OnGoAwayEnd";
EndFrame()->OnGoAwayEnd();
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class GoAwayPayloadDecoderTest
: public AbstractPayloadDecoderTest<GoAwayPayloadDecoder,
GoAwayPayloadDecoderPeer, Listener> {};
TEST_F(GoAwayPayloadDecoderTest, Truncated) {
auto approve_size = [](size_t size) {
return size != Http2GoAwayFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(Http2GoAwayFields(123, Http2ErrorCode::ENHANCE_YOUR_CALM));
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
class GoAwayOpaqueDataLengthTests
: public GoAwayPayloadDecoderTest,
public ::testing::WithParamInterface<uint32_t> {
protected:
GoAwayOpaqueDataLengthTests() : length_(GetParam()) {
QUICHE_VLOG(1) << "################ length_=" << length_
<< " ################";
}
const uint32_t length_;
};
INSTANTIATE_TEST_SUITE_P(VariousLengths, GoAwayOpaqueDataLengthTests,
::testing::Values(0, 1, 2, 3, 4, 5, 6));
TEST_P(GoAwayOpaqueDataLengthTests, ValidLength) {
Http2GoAwayFields goaway;
Randomize(&goaway, RandomPtr());
std::string opaque_data = Random().RandString(length_);
Http2FrameBuilder fb;
fb.Append(goaway);
fb.Append(opaque_data);
Http2FrameHeader header(fb.size(), Http2FrameType::GOAWAY, RandFlags(),
RandStreamId());
set_frame_header(header);
FrameParts expected(header, opaque_data);
expected.SetOptGoaway(goaway);
ASSERT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
} |
287 | #ifndef XLA_SERVICE_GPU_FUSION_WRAPPER_H_
#define XLA_SERVICE_GPU_FUSION_WRAPPER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class FusionWrapper : public HloModulePass {
public:
absl::string_view name() const override { return "fusion-wrapper"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/fusion_wrapper.h"
#include <functional>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> FusionWrapper::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto instructions = module->entry_computation()->MakeInstructionPostOrder();
bool changed = false;
std::function<absl::Status(HloInstruction*)> handle_instruction;
handle_instruction = [&](HloInstruction* instruction) -> absl::Status {
switch (instruction->opcode()) {
case HloOpcode::kConditional:
case HloOpcode::kWhile:
for (auto* computation : instruction->called_computations()) {
for (auto* inner_instruction :
computation->MakeInstructionPostOrder()) {
TF_RETURN_IF_ERROR(handle_instruction(inner_instruction));
}
}
break;
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kBroadcast:
case HloOpcode::kCeil:
case HloOpcode::kCbrt:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kDivide:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kGather:
case HloOpcode::kImag:
case HloOpcode::kIota:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kMap:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kPad:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReshape:
case HloOpcode::kReduce:
case HloOpcode::kReducePrecision:
case HloOpcode::kReduceWindow:
case HloOpcode::kRemainder:
case HloOpcode::kReverse:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSlice:
case HloOpcode::kSqrt:
case HloOpcode::kSubtract:
case HloOpcode::kStochasticConvert:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTranspose:
case HloOpcode::kXor: {
auto* computation = instruction->parent();
auto* fusion_instruction =
computation->AddInstruction(HloInstruction::CreateFusion(
instruction->shape(),
ChooseFusionKind(*instruction, *instruction), instruction));
const absl::string_view wrapped_opcode =
HloOpcodeString(instruction->opcode());
module->SetAndUniquifyInstrName(
fusion_instruction, absl::StrCat("wrapped_", wrapped_opcode));
module->SetAndUniquifyComputationName(
fusion_instruction->fused_instructions_computation(),
absl::StrCat("wrapped_", wrapped_opcode, "_computation"));
if (module->has_schedule()) {
module->schedule().replace_instruction(computation, instruction,
fusion_instruction);
}
TF_RETURN_IF_ERROR(
fusion_instruction->CopyAllControlDepsFrom(instruction));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(fusion_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction));
changed = true;
break;
}
default:
break;
}
return absl::OkStatus();
};
for (auto* instruction : instructions) {
TF_RETURN_IF_ERROR(handle_instruction(instruction));
}
return changed;
}
}
} | #include "xla/service/gpu/fusion_wrapper.h"
#include <optional>
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
class FusionWrapperTest : public HloTestBase {};
TEST_F(FusionWrapperTest, SimpleOp) {
RunAndFilecheckHloRewrite(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[30,41] parameter(0)
p1 = f16[30,41] parameter(1)
ROOT result = f16[60, 41] concatenate(p0, p1), dimensions={0}
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, Scatter) {
RunAndFilecheckHloRewrite(R"(
HloModule ScatterIntoScalar
update_s32 {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
parameter.1 = s32[] parameter(0)
parameter.2 = s32[0]{0} parameter(1)
parameter.3 = s32[] parameter(2)
ROOT scatter_ScatterIntoScalar = s32[] scatter(parameter.1, parameter.2, parameter.3),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=update_s32
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, ControlDependency) {
RunAndFilecheckHloRewrite(R"(
HloModule TestModule
fusion {
ROOT param = f32[] parameter(0)
}
ENTRY main {
param = f32[] parameter(0)
fusion = f32[] fusion(param), kind=kLoop, calls=fusion
constant_one = f32[] constant(1)
ROOT add = f32[] add(param, constant_one), control-predecessors={fusion}
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, While) {
RunAndFilecheckHloRewrite(R"(
HloModule While
%body {
%parameter.5 = (f32[5]{0}) parameter(0)
%constant_8 = f32[] constant(0)
%broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={}
ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9)
}
%cond {
%parameter.12 = (f32[5]{0}) parameter(0)
ROOT %constant_1 = pred[] constant(false)
}
ENTRY %main (parameter.1: f32[5]) -> (f32[5]) {
%parameter.1 = f32[5]{0} parameter(0)
%copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1)
%tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3)
ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, WhileInFusion) {
RunAndFilecheckHloRewrite(R"(
HloModule While
%body {
%parameter.5 = (f32[5]{0}) parameter(0)
%constant_8 = f32[] constant(0)
%broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={}
ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9)
}
%cond {
%parameter.12 = (f32[5]{0}) parameter(0)
ROOT %constant_1 = pred[] constant(false)
}
%fusion {
%parameter.1 = f32[5]{0} parameter(0)
%copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1)
%tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3)
ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body
}
ENTRY %main (parameter.1: f32[5]) -> (f32[5]) {
%parameter.1 = f32[5]{0} parameter(0)
ROOT %fusion = (f32[5]{0}) fusion(f32[5]{0} %parameter.1), kind=kLoop, calls=%fusion
})",
FusionWrapper(),
std::nullopt);
}
}
}
} |
288 | #ifndef ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_
#define ABSL_STRINGS_INTERNAL_CORDZ_FUNCTIONS_H_
#include <stdint.h>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/optimization.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
int32_t get_cordz_mean_interval();
void set_cordz_mean_interval(int32_t mean_interval);
#if defined(ABSL_INTERNAL_CORDZ_ENABLED)
#error ABSL_INTERNAL_CORDZ_ENABLED cannot be set directly
#elif defined(__linux__) && defined(ABSL_HAVE_THREAD_LOCAL)
#define ABSL_INTERNAL_CORDZ_ENABLED 1
#endif
#ifdef ABSL_INTERNAL_CORDZ_ENABLED
struct SamplingState {
int64_t next_sample;
int64_t sample_stride;
};
ABSL_CONST_INIT extern thread_local SamplingState cordz_next_sample;
int64_t cordz_should_profile_slow(SamplingState& state);
inline int64_t cordz_should_profile() {
if (ABSL_PREDICT_TRUE(cordz_next_sample.next_sample > 1)) {
cordz_next_sample.next_sample--;
return 0;
}
return cordz_should_profile_slow(cordz_next_sample);
}
void cordz_set_next_sample_for_testing(int64_t next_sample);
#else
inline int64_t cordz_should_profile() { return 0; }
inline void cordz_set_next_sample_for_testing(int64_t) {}
#endif
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/strings/internal/cordz_functions.h"
#include <atomic>
#include <cmath>
#include <limits>
#include <random>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/profiling/internal/exponential_biased.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
std::atomic<int> g_cordz_mean_interval(50000);
}
#ifdef ABSL_INTERNAL_CORDZ_ENABLED
static constexpr int64_t kInitCordzNextSample = -1;
ABSL_CONST_INIT thread_local SamplingState cordz_next_sample = {
kInitCordzNextSample, 1};
constexpr int64_t kIntervalIfDisabled = 1 << 16;
ABSL_ATTRIBUTE_NOINLINE int64_t
cordz_should_profile_slow(SamplingState& state) {
thread_local absl::profiling_internal::ExponentialBiased
exponential_biased_generator;
int32_t mean_interval = get_cordz_mean_interval();
if (mean_interval <= 0) {
state = {kIntervalIfDisabled, kIntervalIfDisabled};
return 0;
}
if (mean_interval == 1) {
state = {1, 1};
return 1;
}
if (cordz_next_sample.next_sample <= 0) {
const bool initialized =
cordz_next_sample.next_sample != kInitCordzNextSample;
auto old_stride = state.sample_stride;
auto stride = exponential_biased_generator.GetStride(mean_interval);
state = {stride, stride};
bool should_sample = initialized || cordz_should_profile() > 0;
return should_sample ? old_stride : 0;
}
--state.next_sample;
return 0;
}
void cordz_set_next_sample_for_testing(int64_t next_sample) {
cordz_next_sample = {next_sample, next_sample};
}
#endif
int32_t get_cordz_mean_interval() {
return g_cordz_mean_interval.load(std::memory_order_acquire);
}
void set_cordz_mean_interval(int32_t mean_interval) {
g_cordz_mean_interval.store(mean_interval, std::memory_order_release);
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cordz_functions.h"
#include <thread>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::Eq;
using ::testing::Ge;
using ::testing::Le;
TEST(CordzFunctionsTest, SampleRate) {
int32_t orig_sample_rate = get_cordz_mean_interval();
int32_t expected_sample_rate = 123;
set_cordz_mean_interval(expected_sample_rate);
EXPECT_THAT(get_cordz_mean_interval(), Eq(expected_sample_rate));
set_cordz_mean_interval(orig_sample_rate);
}
#ifdef ABSL_INTERNAL_CORDZ_ENABLED
TEST(CordzFunctionsTest, ShouldProfileDisable) {
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(0);
cordz_set_next_sample_for_testing(0);
EXPECT_EQ(cordz_should_profile(), 0);
EXPECT_THAT(cordz_next_sample.next_sample, Eq(1 << 16));
set_cordz_mean_interval(orig_sample_rate);
}
TEST(CordzFunctionsTest, ShouldProfileAlways) {
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(1);
cordz_set_next_sample_for_testing(1);
EXPECT_GT(cordz_should_profile(), 0);
EXPECT_THAT(cordz_next_sample.next_sample, Le(1));
set_cordz_mean_interval(orig_sample_rate);
}
TEST(CordzFunctionsTest, DoesNotAlwaysSampleFirstCord) {
set_cordz_mean_interval(10000);
int tries = 0;
bool sampled = false;
do {
++tries;
ASSERT_THAT(tries, Le(1000));
std::thread thread([&sampled] { sampled = cordz_should_profile() > 0; });
thread.join();
} while (sampled);
}
TEST(CordzFunctionsTest, ShouldProfileRate) {
static constexpr int kDesiredMeanInterval = 1000;
static constexpr int kSamples = 10000;
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(kDesiredMeanInterval);
int64_t sum_of_intervals = 0;
for (int i = 0; i < kSamples; i++) {
cordz_set_next_sample_for_testing(0);
cordz_should_profile();
sum_of_intervals += cordz_next_sample.next_sample;
}
EXPECT_THAT(sum_of_intervals, Ge(9396115));
EXPECT_THAT(sum_of_intervals, Le(10618100));
set_cordz_mean_interval(orig_sample_rate);
}
#else
TEST(CordzFunctionsTest, ShouldProfileDisabled) {
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(1);
cordz_set_next_sample_for_testing(0);
EXPECT_FALSE(cordz_should_profile());
set_cordz_mean_interval(orig_sample_rate);
}
#endif
}
}
ABSL_NAMESPACE_END
} |
289 | #ifndef TENSORFLOW_TSL_PLATFORM_ENV_H_
#define TENSORFLOW_TSL_PLATFORM_ENV_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "tsl/platform/env_time.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/numa.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
#ifdef PLATFORM_WINDOWS
#undef CopyFile
#undef DeleteFile
#endif
namespace tsl {
class Thread;
struct ThreadOptions;
class Env {
public:
Env();
virtual ~Env() = default;
static Env* Default();
virtual absl::Status GetFileSystemForFile(const std::string& fname,
FileSystem** result);
virtual absl::Status GetRegisteredFileSystemSchemes(
std::vector<std::string>* schemes);
virtual absl::Status RegisterFileSystem(const std::string& scheme,
FileSystemRegistry::Factory factory);
virtual absl::Status RegisterFileSystem(
const std::string& scheme, std::unique_ptr<FileSystem> filesystem);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::string& value);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::vector<string>& values);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::vector<int64_t>& values);
absl::Status SetOption(const std::string& scheme, const std::string& key,
const std::vector<double>& values);
absl::Status FlushFileSystemCaches();
absl::Status NewRandomAccessFile(const std::string& fname,
std::unique_ptr<RandomAccessFile>* result);
absl::Status NewRandomAccessFile(const std::string& fname,
TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
return absl::OkStatus();
}
absl::Status NewWritableFile(const std::string& fname,
std::unique_ptr<WritableFile>* result);
absl::Status NewWritableFile(const std::string& fname,
TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
return absl::OkStatus();
}
absl::Status NewAppendableFile(const std::string& fname,
std::unique_ptr<WritableFile>* result);
absl::Status NewAppendableFile(const std::string& fname,
TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
return absl::OkStatus();
}
absl::Status NewReadOnlyMemoryRegionFromFile(
const std::string& fname, std::unique_ptr<ReadOnlyMemoryRegion>* result);
absl::Status NewReadOnlyMemoryRegionFromFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
return absl::OkStatus();
}
absl::Status FileExists(const std::string& fname);
absl::Status FileExists(const std::string& fname, TransactionToken* token) {
return absl::OkStatus();
}
bool FilesExist(const std::vector<string>& files,
std::vector<absl::Status>* status);
bool FilesExist(const std::vector<string>& files, TransactionToken* token,
std::vector<absl::Status>* status) {
return true;
}
absl::Status GetChildren(const std::string& dir, std::vector<string>* result);
absl::Status GetChildren(const std::string& dir, TransactionToken* token,
std::vector<string>* result) {
return absl::OkStatus();
}
virtual bool MatchPath(const std::string& path,
const std::string& pattern) = 0;
virtual absl::Status GetMatchingPaths(const std::string& pattern,
std::vector<string>* results);
absl::Status GetMatchingPaths(const std::string& pattern,
TransactionToken* token,
std::vector<string>* results) {
return absl::OkStatus();
}
absl::Status DeleteFile(const std::string& fname);
absl::Status DeleteFile(const std::string& fname, TransactionToken* token) {
return absl::OkStatus();
}
absl::Status DeleteRecursively(const std::string& dirname,
int64_t* undeleted_files,
int64_t* undeleted_dirs);
absl::Status DeleteRecursively(const std::string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
return absl::OkStatus();
}
absl::Status RecursivelyCreateDir(const std::string& dirname);
absl::Status RecursivelyCreateDir(const std::string& dirname,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status CreateDir(const std::string& dirname);
absl::Status CreateDir(const std::string& dirname, TransactionToken* token) {
return absl::OkStatus();
}
absl::Status DeleteDir(const std::string& dirname);
absl::Status DeleteDir(const std::string& dirname, TransactionToken* token) {
return absl::OkStatus();
}
absl::Status Stat(const std::string& fname, FileStatistics* stat);
absl::Status Stat(const std::string& fname, TransactionToken* token,
FileStatistics* stat) {
return absl::OkStatus();
}
absl::Status IsDirectory(const std::string& fname);
absl::Status HasAtomicMove(const std::string& path, bool* has_atomic_move);
absl::Status CanCreateTempFile(const std::string& fname,
bool* can_create_temp_file);
absl::Status GetFileSize(const std::string& fname, uint64* file_size);
absl::Status GetFileSize(const std::string& fname, TransactionToken* token,
uint64* file_size) {
return absl::OkStatus();
}
absl::Status RenameFile(const std::string& src, const std::string& target);
absl::Status RenameFile(const std::string& src, const std::string& target,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status CopyFile(const std::string& src, const std::string& target);
absl::Status CopyFile(const std::string& src, const std::string& target,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status StartTransaction(const std::string& filename,
TransactionToken** token) {
*token = nullptr;
return absl::OkStatus();
}
absl::Status AddToTransaction(const std::string& path,
TransactionToken* token) {
return absl::OkStatus();
}
absl::Status GetTokenOrStartTransaction(const std::string& path,
TransactionToken** token) {
*token = nullptr;
return absl::OkStatus();
}
absl::Status GetTransactionForPath(const std::string& path,
TransactionToken** token) {
*token = nullptr;
return absl::OkStatus();
}
absl::Status EndTransaction(TransactionToken* token) {
return absl::OkStatus();
}
std::string GetExecutablePath();
bool LocalTempFilename(std::string* filename);
bool CreateUniqueFileName(std::string* prefix, const std::string& suffix);
virtual std::string GetRunfilesDir() = 0;
virtual uint64 NowNanos() const { return EnvTime::NowNanos(); }
virtual uint64 NowMicros() const { return EnvTime::NowMicros(); }
virtual uint64 NowSeconds() const { return EnvTime::NowSeconds(); }
virtual void SleepForMicroseconds(int64_t micros) = 0;
int32 GetProcessId();
virtual Thread* StartThread(
const ThreadOptions& thread_options, const std::string& name,
absl::AnyInvocable<void()> fn) TF_MUST_USE_RESULT = 0;
virtual int32 GetCurrentThreadId() = 0;
virtual bool GetCurrentThreadName(std::string* name) = 0;
virtual void SchedClosure(absl::AnyInvocable<void()> closure) = 0;
virtual void SchedClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) = 0;
virtual absl::Status LoadDynamicLibrary(const char* library_filename,
void** handle) = 0;
virtual absl::Status GetSymbolFromLibrary(void* handle,
const char* symbol_name,
void** symbol) = 0;
virtual std::string FormatLibraryFileName(const std::string& name,
const std::string& version) = 0;
virtual void GetLocalTempDirectories(std::vector<string>* list) = 0;
private:
std::unique_ptr<FileSystemRegistry> file_system_registry_;
Env(const Env&) = delete;
void operator=(const Env&) = delete;
};
class EnvWrapper : public Env {
public:
explicit EnvWrapper(Env* t) : target_(t) {}
~EnvWrapper() override;
Env* target() const { return target_; }
absl::Status GetFileSystemForFile(const std::string& fname,
FileSystem** result) override {
return target_->GetFileSystemForFile(fname, result);
}
absl::Status GetRegisteredFileSystemSchemes(
std::vector<string>* schemes) override {
return target_->GetRegisteredFileSystemSchemes(schemes);
}
absl::Status RegisterFileSystem(
const std::string& scheme, FileSystemRegistry::Factory factory) override {
return target_->RegisterFileSystem(scheme, factory);
}
bool MatchPath(const std::string& path, const std::string& pattern) override {
return target_->MatchPath(path, pattern);
}
uint64 NowMicros() const override { return target_->NowMicros(); }
void SleepForMicroseconds(int64_t micros) override {
target_->SleepForMicroseconds(micros);
}
Thread* StartThread(const ThreadOptions& thread_options,
const std::string& name,
absl::AnyInvocable<void()> fn) override {
return target_->StartThread(thread_options, name, std::move(fn));
}
int32 GetCurrentThreadId() override { return target_->GetCurrentThreadId(); }
bool GetCurrentThreadName(std::string* name) override {
return target_->GetCurrentThreadName(name);
}
void SchedClosure(absl::AnyInvocable<void()> closure) override {
target_->SchedClosure(std::move(closure));
}
void SchedClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) override {
target_->SchedClosureAfter(micros, std::move(closure));
}
absl::Status LoadDynamicLibrary(const char* library_filename,
void** handle) override {
return target_->LoadDynamicLibrary(library_filename, handle);
}
absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name,
void** symbol) override {
return target_->GetSymbolFromLibrary(handle, symbol_name, symbol);
}
std::string FormatLibraryFileName(const std::string& name,
const std::string& version) override {
return target_->FormatLibraryFileName(name, version);
}
std::string GetRunfilesDir() override { return target_->GetRunfilesDir(); }
private:
void GetLocalTempDirectories(std::vector<string>* list) override {
target_->GetLocalTempDirectories(list);
}
Env* target_;
};
class Thread {
public:
Thread() {}
virtual ~Thread();
private:
Thread(const Thread&) = delete;
void operator=(const Thread&) = delete;
};
int setenv(const char* name, const char* value, int overwrite);
int unsetenv(const char* name);
struct ThreadOptions {
size_t stack_size = 0;
size_t guard_size = 0;
int numa_node = port::kNUMANoAffinity;
};
absl::Status FileSystemCopyFile(FileSystem* src_fs, const std::string& src,
FileSystem* target_fs,
const std::string& target);
absl::Status ReadFileToString(Env* env, const std::string& fname,
std::string* data);
absl::Status WriteStringToFile(Env* env, const std::string& fname,
const StringPiece& data);
absl::Status WriteBinaryProto(Env* env, const std::string& fname,
const protobuf::MessageLite& proto);
absl::Status ReadBinaryProto(Env* env, const std::string& fname,
protobuf::MessageLite* proto);
inline absl::Status WriteTextProto(Env* ,
const std::string& ,
const protobuf::MessageLite& ) {
return errors::Unimplemented("Can't write text protos with protolite.");
}
absl::Status WriteTextProto(Env* env, const std::string& fname,
const protobuf::Message& proto);
inline absl::Status ReadTextProto(Env* ,
const std::string& ,
protobuf::MessageLite* ) {
return errors::Unimplemented("Can't parse text protos with protolite.");
}
absl::Status ReadTextProto(Env* env, const std::string& fname,
protobuf::Message* proto);
absl::Status ReadTextOrBinaryProto(Env* env, const std::string& fname,
protobuf::Message* proto);
absl::Status ReadTextOrBinaryProto(Env* env, const std::string& fname,
protobuf::MessageLite* proto);
namespace register_file_system {
template <typename Factory>
struct Register {
Register(Env* env, const std::string& scheme, bool try_modular_filesystems) {
if (try_modular_filesystems) {
const char* env_value = getenv("TF_USE_MODULAR_FILESYSTEM");
string load_plugin = env_value ? absl::AsciiStrToLower(env_value) : "";
if (load_plugin == "true" || load_plugin == "1") {
LOG(WARNING) << "Using modular file system for '" << scheme << "'."
<< " Please switch to tensorflow-io"
<< " (https:
<< " support of '" << scheme << "'.";
return;
}
}
env->RegisterFileSystem(scheme, []() -> FileSystem* { return new Factory; })
.IgnoreError();
}
};
}
}
#define REGISTER_FILE_SYSTEM_ENV(env, scheme, factory, modular) \
REGISTER_FILE_SYSTEM_UNIQ_HELPER(__COUNTER__, env, scheme, factory, modular)
#define REGISTER_FILE_SYSTEM_UNIQ_HELPER(ctr, env, scheme, factory, modular) \
REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory, modular)
#define REGISTER_FILE_SYSTEM_UNIQ(ctr, env, scheme, factory, modular) \
static ::tsl::register_file_system::Register<factory> register_ff##ctr \
TF_ATTRIBUTE_UNUSED = \
::tsl::register_file_system::Register<factory>(env, scheme, modular)
#define REGISTER_FILE_SYSTEM(scheme, factory) \
REGISTER_FILE_SYSTEM_ENV(::tsl::Env::Default(), scheme, factory, false);
#define REGISTER_LEGACY_FILE_SYSTEM(scheme, factory) \
REGISTER_FILE_SYSTEM_ENV(::tsl::Env::Default(), scheme, factory, true);
#endif
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <fnmatch.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#ifdef __FreeBSD__
#include <pthread_np.h>
#endif
#include <map>
#include <thread>
#include <vector>
#include "tsl/platform/default/posix_file_system.h"
#include "tsl/platform/env.h"
#include "tsl/platform/load_library.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/ram_file_system.h"
#include "tsl/platform/strcat.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
mutex name_mutex(tsl::LINKER_INITIALIZED);
std::map<std::thread::id, string>& GetThreadNameRegistry()
TF_EXCLUSIVE_LOCKS_REQUIRED(name_mutex) {
static auto* thread_name_registry = new std::map<std::thread::id, string>();
return *thread_name_registry;
}
class PThread : public Thread {
public:
PThread(const ThreadOptions& thread_options, const std::string& name,
absl::AnyInvocable<void()> fn) {
ThreadParams* params = new ThreadParams;
params->name = name;
params->fn = std::move(fn);
pthread_attr_t attributes;
pthread_attr_init(&attributes);
if (thread_options.stack_size != 0) {
pthread_attr_setstacksize(&attributes, thread_options.stack_size);
}
int ret = pthread_create(&thread_, &attributes, &ThreadFn, para | #include "tensorflow/core/platform/env.h"
#include <sys/stat.h>
#include <memory>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/platform/cord.h"
#include "tensorflow/core/platform/null_file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
namespace tsl {
namespace {
string CreateTestFile(Env* env, const string& filename, int length) {
string input(length, 0);
for (int i = 0; i < length; i++) input[i] = i;
TF_EXPECT_OK(WriteStringToFile(env, filename, input));
return input;
}
tensorflow::GraphDef CreateTestProto() {
tensorflow::GraphDef g;
tensorflow::NodeDef* node = g.add_node();
node->set_name("name1");
node->set_op("op1");
node = g.add_node();
node->set_name("name2");
node->set_op("op2");
return g;
}
static void ExpectHasSubstr(StringPiece s, StringPiece expected) {
EXPECT_TRUE(absl::StrContains(s, expected))
<< "'" << s << "' does not contain '" << expected << "'";
}
}
string BaseDir() { return io::JoinPath(testing::TmpDir(), "base_dir"); }
class DefaultEnvTest : public ::testing::Test {
protected:
void SetUp() override { TF_CHECK_OK(env_->CreateDir(BaseDir())); }
void TearDown() override {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(
env_->DeleteRecursively(BaseDir(), &undeleted_files, &undeleted_dirs));
}
Env* env_ = Env::Default();
};
TEST_F(DefaultEnvTest, IncompleteReadOutOfRange) {
const string filename = io::JoinPath(BaseDir(), "out_of_range");
const string input = CreateTestFile(env_, filename, 2);
std::unique_ptr<RandomAccessFile> f;
TF_EXPECT_OK(env_->NewRandomAccessFile(filename, &f));
StringPiece result;
char scratch[3];
EXPECT_EQ(error::OUT_OF_RANGE, f->Read(0, 3, &result, scratch).code());
EXPECT_EQ(input, result);
TF_EXPECT_OK(f->Read(0, 2, &result, scratch));
EXPECT_EQ(input, result);
}
TEST_F(DefaultEnvTest, ReadFileToString) {
for (const int length : {0, 1, 1212, 2553, 4928, 8196, 9000, (1 << 20) - 1,
1 << 20, (1 << 20) + 1, (256 << 20) + 100}) {
const string filename =
io::JoinPath(BaseDir(), "bar", "..", strings::StrCat("file", length));
const string input = CreateTestFile(env_, filename, length);
string output;
TF_EXPECT_OK(ReadFileToString(env_, filename, &output));
EXPECT_EQ(length, output.size());
EXPECT_EQ(input, output);
FileStatistics stat;
TF_EXPECT_OK(env_->Stat(filename, &stat));
EXPECT_EQ(length, stat.length);
EXPECT_FALSE(stat.is_directory);
}
}
TEST_F(DefaultEnvTest, ReadWriteBinaryProto) {
const tensorflow::GraphDef proto = CreateTestProto();
const string filename = strings::StrCat(BaseDir(), "binary_proto");
TF_EXPECT_OK(WriteBinaryProto(env_, filename, proto));
tensorflow::GraphDef result;
TF_EXPECT_OK(ReadBinaryProto(env_, filename, &result));
EXPECT_EQ(result.DebugString(), proto.DebugString());
tensorflow::GraphDef result2;
TF_EXPECT_OK(ReadTextOrBinaryProto(env_, filename, &result2));
EXPECT_EQ(result2.DebugString(), proto.DebugString());
}
TEST_F(DefaultEnvTest, ReadWriteTextProto) {
const tensorflow::GraphDef proto = CreateTestProto();
const string filename = strings::StrCat(BaseDir(), "text_proto");
string as_text;
EXPECT_TRUE(protobuf::TextFormat::PrintToString(proto, &as_text));
TF_EXPECT_OK(WriteStringToFile(env_, filename, as_text));
tensorflow::GraphDef result;
TF_EXPECT_OK(ReadTextProto(env_, filename, &result));
EXPECT_EQ(result.DebugString(), proto.DebugString());
tensorflow::GraphDef result2;
TF_EXPECT_OK(ReadTextOrBinaryProto(env_, filename, &result2));
EXPECT_EQ(result2.DebugString(), proto.DebugString());
}
TEST_F(DefaultEnvTest, FileToReadonlyMemoryRegion) {
for (const int length : {1, 1212, 2553, 4928, 8196, 9000, (1 << 20) - 1,
1 << 20, (1 << 20) + 1}) {
const string filename =
io::JoinPath(BaseDir(), strings::StrCat("file", length));
const string input = CreateTestFile(env_, filename, length);
std::unique_ptr<ReadOnlyMemoryRegion> region;
TF_EXPECT_OK(env_->NewReadOnlyMemoryRegionFromFile(filename, ®ion));
ASSERT_NE(region, nullptr);
EXPECT_EQ(length, region->length());
EXPECT_EQ(input, string(reinterpret_cast<const char*>(region->data()),
region->length()));
FileStatistics stat;
TF_EXPECT_OK(env_->Stat(filename, &stat));
EXPECT_EQ(length, stat.length);
EXPECT_FALSE(stat.is_directory);
}
}
TEST_F(DefaultEnvTest, DeleteRecursively) {
const string parent_dir = io::JoinPath(BaseDir(), "root_dir");
const string child_dir1 = io::JoinPath(parent_dir, "child_dir1");
const string child_dir2 = io::JoinPath(parent_dir, "child_dir2");
TF_EXPECT_OK(env_->CreateDir(parent_dir));
const string root_file1 = io::JoinPath(parent_dir, "root_file1");
const string root_file2 = io::JoinPath(parent_dir, "root_file2");
const string root_file3 = io::JoinPath(parent_dir, ".root_file3");
CreateTestFile(env_, root_file1, 100);
CreateTestFile(env_, root_file2, 100);
CreateTestFile(env_, root_file3, 100);
TF_EXPECT_OK(env_->CreateDir(child_dir1));
const string child1_file1 = io::JoinPath(child_dir1, "child1_file1");
CreateTestFile(env_, child1_file1, 100);
TF_EXPECT_OK(env_->CreateDir(child_dir2));
int64_t undeleted_files, undeleted_dirs;
TF_EXPECT_OK(
env_->DeleteRecursively(parent_dir, &undeleted_files, &undeleted_dirs));
EXPECT_EQ(0, undeleted_files);
EXPECT_EQ(0, undeleted_dirs);
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(root_file1).code());
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(root_file2).code());
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(root_file3).code());
EXPECT_EQ(error::Code::NOT_FOUND, env_->FileExists(child1_file1).code());
}
TEST_F(DefaultEnvTest, DeleteRecursivelyFail) {
const string parent_dir = io::JoinPath(BaseDir(), "root_dir");
int64_t undeleted_files, undeleted_dirs;
absl::Status s =
env_->DeleteRecursively(parent_dir, &undeleted_files, &undeleted_dirs);
EXPECT_EQ(error::Code::NOT_FOUND, s.code());
EXPECT_EQ(0, undeleted_files);
EXPECT_EQ(1, undeleted_dirs);
}
TEST_F(DefaultEnvTest, RecursivelyCreateDir) {
const string create_path = io::JoinPath(BaseDir(), "a", "b", "c", "d");
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_EXPECT_OK(env_->FileExists(create_path));
}
TEST_F(DefaultEnvTest, RecursivelyCreateDirEmpty) {
TF_CHECK_OK(env_->RecursivelyCreateDir(""));
}
TEST_F(DefaultEnvTest, RecursivelyCreateDirSubdirsExist) {
const string subdir_path = io::JoinPath(BaseDir(), "a", "b");
TF_CHECK_OK(env_->CreateDir(io::JoinPath(BaseDir(), "a")));
TF_CHECK_OK(env_->CreateDir(subdir_path));
TF_EXPECT_OK(env_->FileExists(subdir_path));
const string create_path = io::JoinPath(BaseDir(), "a", "b", "c", "d");
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_CHECK_OK(env_->RecursivelyCreateDir(create_path));
TF_EXPECT_OK(env_->FileExists(create_path));
TF_EXPECT_OK(env_->FileExists(io::JoinPath(BaseDir(), "a", "b", "c")));
}
TEST_F(DefaultEnvTest, LocalFileSystem) {
int expected_num_files = 0;
std::vector<string> matching_paths;
for (const int length : {0, 1, 1212, 2553, 4928, 8196, 9000, (1 << 20) - 1,
1 << 20, (1 << 20) + 1}) {
string filename = io::JoinPath(BaseDir(), strings::StrCat("len", length));
filename = strings::StrCat("file:
const string input = CreateTestFile(env_, filename, length);
++expected_num_files;
TF_EXPECT_OK(env_->GetMatchingPaths(
strings::StrCat("file:
&matching_paths));
EXPECT_EQ(expected_num_files, matching_paths.size());
TF_EXPECT_OK(env_->GetMatchingPaths(
io::JoinPath(BaseDir(), "l*"), &matching_paths));
EXPECT_EQ(expected_num_files, matching_paths.size());
string output;
TF_EXPECT_OK(ReadFileToString(env_, filename, &output));
EXPECT_EQ(length, output.size());
EXPECT_EQ(input, output);
FileStatistics stat;
TF_EXPECT_OK(env_->Stat(filename, &stat));
EXPECT_EQ(length, stat.length);
EXPECT_FALSE(stat.is_directory);
}
}
TEST_F(DefaultEnvTest, SleepForMicroseconds) {
const int64_t start = env_->NowMicros();
const int64_t sleep_time = 1e6 + 5e5;
env_->SleepForMicroseconds(sleep_time);
const int64_t delta = env_->NowMicros() - start;
EXPECT_GE(delta, sleep_time - 200);
}
class TmpDirFileSystem : public NullFileSystem {
public:
TF_USE_FILESYSTEM_METHODS_WITH_NO_TRANSACTION_SUPPORT;
absl::Status FileExists(const string& dir, TransactionToken* token) override {
StringPiece scheme, host, path;
io::ParseURI(dir, &scheme, &host, &path);
if (path.empty()) return errors::NotFound(dir, " not found");
if (path == "/flushed") {
if (flushed_) {
return absl::OkStatus();
} else {
return errors::NotFound("FlushCaches() not called yet");
}
}
return Env::Default()->FileExists(io::JoinPath(BaseDir(), path));
}
absl::Status CreateDir(const string& dir, TransactionToken* token) override {
StringPiece scheme, host, path;
io::ParseURI(dir, &scheme, &host, &path);
if (scheme != "tmpdirfs") {
return errors::FailedPrecondition("scheme must be tmpdirfs");
}
if (host != "testhost") {
return errors::FailedPrecondition("host must be testhost");
}
absl::Status status =
Env::Default()->CreateDir(io::JoinPath(BaseDir(), path));
if (status.ok()) {
created_directories_.push_back(std::string(path));
}
return status;
}
absl::Status IsDirectory(const string& dir,
TransactionToken* token) override {
StringPiece scheme, host, path;
io::ParseURI(dir, &scheme, &host, &path);
for (const auto& existing_dir : created_directories_)
if (existing_dir == path) return absl::OkStatus();
return errors::NotFound(dir, " not found");
}
void FlushCaches(TransactionToken* token) override { flushed_ = true; }
private:
bool flushed_ = false;
std::vector<std::string> created_directories_ = {"/"};
};
REGISTER_FILE_SYSTEM("tmpdirfs", TmpDirFileSystem);
TEST_F(DefaultEnvTest, FlushFileSystemCaches) {
Env* env = Env::Default();
const string flushed =
strings::StrCat("tmpdirfs:
EXPECT_EQ(error::Code::NOT_FOUND, env->FileExists(flushed).code());
TF_EXPECT_OK(env->FlushFileSystemCaches());
TF_EXPECT_OK(env->FileExists(flushed));
}
TEST_F(DefaultEnvTest, RecursivelyCreateDirWithUri) {
Env* env = Env::Default();
const string create_path = strings::StrCat(
"tmpdirfs:
EXPECT_EQ(error::Code::NOT_FOUND, env->FileExists(create_path).code());
TF_CHECK_OK(env->RecursivelyCreateDir(create_path));
TF_CHECK_OK(env->RecursivelyCreateDir(create_path));
TF_EXPECT_OK(env->FileExists(create_path));
}
TEST_F(DefaultEnvTest, GetExecutablePath) {
Env* env = Env::Default();
TF_EXPECT_OK(env->FileExists(env->GetExecutablePath()));
}
TEST_F(DefaultEnvTest, LocalTempFilename) {
Env* env = Env::Default();
string filename;
EXPECT_TRUE(env->LocalTempFilename(&filename));
EXPECT_FALSE(env->FileExists(filename).ok());
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename, &file_to_write));
#if defined(PLATFORM_GOOGLE)
TF_CHECK_OK(file_to_write->Append("Nu"));
TF_CHECK_OK(file_to_write->Append(absl::Cord("ll")));
#else
TF_CHECK_OK(file_to_write->Append("Null"));
#endif
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename));
std::unique_ptr<WritableFile> file_to_append;
TF_CHECK_OK(env->NewAppendableFile(filename, &file_to_append));
int64_t pos;
TF_CHECK_OK(file_to_append->Tell(&pos));
ASSERT_EQ(4, pos);
std::unique_ptr<RandomAccessFile> file_to_read;
TF_CHECK_OK(env->NewRandomAccessFile(filename, &file_to_read));
StringPiece content;
char scratch[1024];
CHECK_EQ(
error::OUT_OF_RANGE,
file_to_read->Read(0, 1024, &content, scratch).code());
EXPECT_EQ("Null", content);
TF_CHECK_OK(env->DeleteFile(filename));
EXPECT_FALSE(env->FileExists(filename).ok());
}
TEST_F(DefaultEnvTest, CreateUniqueFileName) {
Env* env = Env::Default();
string prefix = "tempfile-prefix-";
string suffix = ".tmp";
string filename = prefix;
EXPECT_TRUE(env->CreateUniqueFileName(&filename, suffix));
EXPECT_TRUE(absl::StartsWith(filename, prefix));
EXPECT_TRUE(str_util::EndsWith(filename, suffix));
}
TEST_F(DefaultEnvTest, GetProcessId) {
Env* env = Env::Default();
EXPECT_NE(env->GetProcessId(), 0);
}
TEST_F(DefaultEnvTest, GetThreadInformation) {
Env* env = Env::Default();
#if !defined(__APPLE__)
EXPECT_NE(env->GetCurrentThreadId(), 0);
#endif
string thread_name;
bool res = env->GetCurrentThreadName(&thread_name);
#if defined(PLATFORM_WINDOWS) || defined(__ANDROID__)
EXPECT_FALSE(res);
#elif !defined(__APPLE__)
EXPECT_TRUE(res);
EXPECT_GT(thread_name.size(), 0);
#endif
}
TEST_F(DefaultEnvTest, GetChildThreadInformation) {
Env* env = Env::Default();
Thread* child_thread = env->StartThread({}, "tf_child_thread", [env]() {
#if !defined(__APPLE__)
EXPECT_NE(env->GetCurrentThreadId(), 0);
#endif
string thread_name;
bool res = env->GetCurrentThreadName(&thread_name);
EXPECT_TRUE(res);
ExpectHasSubstr(thread_name, "tf_child_thread");
});
delete child_thread;
}
} |
290 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_FUNCTION_OPTIMIZATION_REGISTRY_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_FUNCTION_OPTIMIZATION_REGISTRY_H_
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
class FunctionOptimizationPass {
public:
struct FunctionOptions {
std::string xla_compile_device_type = "";
bool allow_soft_placement = false;
};
virtual ~FunctionOptimizationPass() {}
virtual Status Run(const std::string& function_name,
const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptions& function_options,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) = 0;
};
class FunctionOptimizationPassRegistry {
public:
void Init(std::unique_ptr<FunctionOptimizationPass> pass);
Status Run(const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated);
static FunctionOptimizationPassRegistry& Global();
private:
std::unique_ptr<FunctionOptimizationPass> pass_;
};
namespace function_optimization_registration {
class FunctionOptimizationPassRegistration {
public:
explicit FunctionOptimizationPassRegistration(
std::unique_ptr<FunctionOptimizationPass> pass) {
FunctionOptimizationPassRegistry::Global().Init(std::move(pass));
}
};
}
}
#endif
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include <string>
#include "tensorflow/core/framework/metrics.h"
namespace tensorflow {
void FunctionOptimizationPassRegistry::Init(
std::unique_ptr<FunctionOptimizationPass> pass) {
DCHECK(!pass_) << "Only one pass should be set.";
pass_ = std::move(pass);
}
Status FunctionOptimizationPassRegistry::Run(
const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptimizationPass::FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) {
if (!pass_) return absl::OkStatus();
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"GraphOptimizationPass", "FunctionOptimizationPassRegistry"});
return pass_->Run(function_name, device_set, config_proto, function_options,
graph, flib_def, control_ret_node_names,
control_rets_updated);
}
FunctionOptimizationPassRegistry& FunctionOptimizationPassRegistry::Global() {
static FunctionOptimizationPassRegistry* kGlobalRegistry =
new FunctionOptimizationPassRegistry;
return *kGlobalRegistry;
}
} | #include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include <memory>
#include <string>
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
class PassingFunctionPass : public FunctionOptimizationPass {
public:
static bool ran_;
Status Run(const std::string& function_name, const DeviceSet& device_set,
const ConfigProto& config_proto,
const FunctionOptions& function_options,
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def,
std::vector<std::string>* control_ret_node_names,
bool* control_rets_updated) override {
ran_ = true;
return absl::OkStatus();
}
};
bool PassingFunctionPass::ran_ = false;
TEST(FunctionOptimizationPassRegistry, PassNoError) {
EXPECT_FALSE(PassingFunctionPass::ran_);
FunctionOptimizationPassRegistry::Global().Init(
std::make_unique<PassingFunctionPass>());
DeviceSet device_set;
ConfigProto config_proto;
FunctionOptimizationPass::FunctionOptions function_options;
Status status = FunctionOptimizationPassRegistry::Global().Run(
"test_func", device_set, config_proto, function_options,
nullptr,
nullptr,
nullptr, nullptr);
EXPECT_EQ(status, absl::OkStatus());
EXPECT_TRUE(PassingFunctionPass::ran_);
}
} |
291 | #ifndef TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_ASSERT_NEXT_DATASET_OP_H_
#define TENSORFLOW_CORE_KERNELS_DATA_EXPERIMENTAL_ASSERT_NEXT_DATASET_OP_H_
#include "tensorflow/core/framework/dataset.h"
namespace tensorflow {
namespace data {
namespace experimental {
class AssertNextDatasetOp : public UnaryDatasetOpKernel {
public:
static constexpr const char* const kDatasetType = "AssertNext";
static constexpr const char* const kInputDataset = "input_dataset";
static constexpr const char* const kTransformations = "transformations";
static constexpr const char* const kOutputTypes = "output_types";
static constexpr const char* const kOutputShapes = "output_shapes";
explicit AssertNextDatasetOp(OpKernelConstruction* ctx);
protected:
void MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) override;
private:
class Dataset;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
}
}
}
#endif
#include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h"
#include <map>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const AssertNextDatasetOp::kInputDataset;
constexpr const char* const AssertNextDatasetOp::kDatasetType;
constexpr const char* const AssertNextDatasetOp::kTransformations;
constexpr const char* const AssertNextDatasetOp::kOutputTypes;
constexpr const char* const AssertNextDatasetOp::kOutputShapes;
class AssertNextDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const std::vector<tstring>& transformations,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
transformations_(transformations),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* transformations_node = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, transformations_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
std::vector<string> tokens =
absl::StrSplit(prefix(), ':', absl::SkipEmpty());
if (dataset()->transformations_.size() > tokens.size() - 2) {
return errors::InvalidArgument(
"Asserted next ", dataset()->transformations_.size(),
" transformations but encountered only ", tokens.size() - 2, ".");
}
int n = tokens.size();
for (size_t i = 0; i < dataset()->transformations_.size(); ++i) {
if (!MatchesAnyVersion(dataset()->transformations_[i],
tokens[n - 2 - i])) {
return errors::InvalidArgument("Asserted transformation matching ",
dataset()->transformations_[i],
" at offset ", i, " but encountered ",
tokens[n - 2 - i],
" transformation instead.");
}
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
};
const DatasetBase* input_;
const std::vector<tstring> transformations_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
};
AssertNextDatasetOp::AssertNextDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void AssertNextDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::vector<tstring> transformations;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kTransformations,
&transformations));
*output =
new Dataset(ctx, input, transformations, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AssertNextDataset").Device(DEVICE_CPU),
AssertNextDatasetOp);
REGISTER_KERNEL_BUILDER(
Name("ExperimentalAssertNextDataset").Device(DEVICE_CPU),
AssertNextDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/assert_next_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "assert_next_dataset";
class AssertNextDatasetParams : public DatasetParams {
public:
template <typename T>
AssertNextDatasetParams(T input_dataset_params,
const std::vector<tstring>& transformations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
transformations_(transformations) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
int num_transformations = transformations_.size();
return {CreateTensor<tstring>(TensorShape({num_transformations}),
transformations_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() + 1);
input_names->emplace_back(AssertNextDatasetOp::kInputDataset);
input_names->emplace_back(AssertNextDatasetOp::kTransformations);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{AssertNextDatasetOp::kOutputShapes, output_shapes_},
{AssertNextDatasetOp::kOutputTypes, output_dtypes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return AssertNextDatasetOp::kDatasetType;
}
private:
std::vector<tstring> transformations_;
};
class AssertNextDatasetOpTest : public DatasetOpsTestBase {};
AssertNextDatasetParams AssertNextDatasetParams1() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams AssertNextDatasetParams2() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams InvalidAssertNextDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(std::move(take_dataset_params),
{"Whoops"},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertNextDatasetParams ShortAssertNextDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertNextDatasetParams(
std::move(take_dataset_params),
{TakeDatasetOp::kDatasetType, RangeDatasetOp::kDatasetType, "Whoops"},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AssertNextDatasetParams>> GetNextTestCases() {
return {{AssertNextDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertNextDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_GET_NEXT_TEST_P(AssertNextDatasetOpTest, AssertNextDatasetParams,
GetNextTestCases())
TEST_F(AssertNextDatasetOpTest, DatasetNodeName) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertNextDatasetOpTest, DatasetTypeString) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(AssertNextDatasetOp::kDatasetType)));
}
TEST_F(AssertNextDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(AssertNextDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertNextDatasetOpTest, Cardinality) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(3));
}
TEST_F(AssertNextDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(AssertNextDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertNextDatasetOpTest, IteratorPrefix) {
auto dataset_params = AssertNextDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
AssertNextDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<AssertNextDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{AssertNextDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertNextDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(AssertNextDatasetOpTest,
AssertNextDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(AssertNextDatasetOpTest, InvalidArguments) {
auto dataset_params = InvalidAssertNextDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(AssertNextDatasetOpTest, ShortAssertNext) {
auto dataset_params = ShortAssertNextDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} |
292 | #ifndef AROLLA_EXPR_OPTIMIZATION_PEEPHOLE_OPTIMIZATIONS_BOOL_H_
#define AROLLA_EXPR_OPTIMIZATION_PEEPHOLE_OPTIMIZATIONS_BOOL_H_
#include "absl/status/statusor.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
namespace arolla::expr {
absl::StatusOr<PeepholeOptimizationPack> BoolOptimizations();
}
#endif
#include "arolla/expr/optimization/peephole_optimizations/bool.h"
#include <array>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
auto Matches(const std::vector<ExprNodePtr>& patterns) {
absl::flat_hash_set<Fingerprint> pattern_prints;
pattern_prints.reserve(patterns.size());
for (const auto& p : patterns) {
pattern_prints.insert(p->fingerprint());
}
return [pattern_prints(std::move(pattern_prints))](const ExprNodePtr& node) {
return pattern_prints.contains(node->fingerprint());
};
}
std::vector<ExprNodePtr> BoolLiterals(bool value) {
return {Literal(value), Literal(MakeOptionalValue(value))};
}
constexpr std::array kComparisonOppositeOps = {
std::pair{"bool.equal", "bool.not_equal"},
std::pair{"bool.not_equal", "bool.equal"},
std::pair{"bool.less", "bool.greater_equal"},
std::pair{"bool.less_equal", "bool.greater"}};
absl::Status LogicalNotComparisonOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr b = Placeholder("b");
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("bool.logical_not",
{CallOpReference("bool.logical_not", {a})}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, a));
}
for (auto [cmp1, cmp2] : kComparisonOppositeOps) {
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("bool.logical_not", {CallOpReference(cmp1, {a, b})}));
ASSIGN_OR_RETURN(ExprNodePtr to, CallOpReference(cmp2, {a, b}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
return absl::OkStatus();
}
constexpr std::array kComparisonOps = {"equal", "not_equal", "less",
"less_equal"};
constexpr std::array kLogicalOps = {"and", "or"};
absl::Status CoreBoolComparisonOptimizations(
PeepholeOptimizationPack& optimizations) {
ExprNodePtr a = Placeholder("a");
ExprNodePtr b = Placeholder("b");
ExprNodePtr c = Placeholder("c");
ExprNodePtr d = Placeholder("d");
ExprNodePtr true_ = Placeholder("true");
std::vector<ExprNodePtr> true_literals = BoolLiterals(true);
auto is_true = Matches(true_literals);
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference("core.equal", {true_, a}));
ASSIGN_OR_RETURN(ExprNodePtr to, CallOpReference("core.equal", {a, true_}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, to, {{"true", is_true}, {"a", std::not_fn(is_true)}}));
}
for (absl::string_view comparison_op : kComparisonOps) {
ASSIGN_OR_RETURN(
ExprNodePtr bool_cmp,
CallOpReference(absl::StrCat("bool.", comparison_op), {a, b}));
ASSIGN_OR_RETURN(
ExprNodePtr core_cmp,
CallOpReference(absl::StrCat("core.", comparison_op), {a, b}));
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference("core.equal", {bool_cmp, true_}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, core_cmp, {{"true", is_true}}));
}
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference(
"core.equal",
{CallOpReference("core.to_optional._scalar", {bool_cmp}),
true_}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, core_cmp, {{"true", is_true}}));
}
}
absl::flat_hash_set<std::string> bool_comparison_ops;
for (absl::string_view comparison_op : kComparisonOps) {
bool_comparison_ops.insert(absl::StrCat("bool.", comparison_op));
}
auto eq_true_will_be_optimized_further =
[bool_comparison_ops](const ExprNodePtr& node) {
if (node->is_literal()) return true;
if (!node->is_op()) return false;
return IsRegisteredOperator(node->op()) &&
bool_comparison_ops.contains(node->op()->display_name());
};
for (absl::string_view logical_op : kLogicalOps) {
ASSIGN_OR_RETURN(
ExprNodePtr bool_logic,
CallOpReference(absl::StrCat("bool.logical_", logical_op), {a, b}));
ASSIGN_OR_RETURN(
ExprNodePtr core_logic,
CallOpReference(absl::StrCat("core.presence_", logical_op),
{CallOpReference("core.equal", {a, true_}),
CallOpReference("core.equal", {b, true_})}));
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference("core.equal", {bool_logic, true_}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, core_logic,
{{"true", is_true}, {"a", eq_true_will_be_optimized_further}}));
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, core_logic,
{{"true", is_true}, {"b", eq_true_will_be_optimized_further}}));
}
}
return absl::OkStatus();
}
absl::Status LogicalIfOptimizations(PeepholeOptimizationPack& optimizations) {
ExprNodePtr condition = Placeholder("condition");
ExprNodePtr a = Placeholder("a");
ExprNodePtr b = Placeholder("b");
ExprNodePtr c = Placeholder("c");
auto is_scalar_bool = [](const ExprNodePtr& expr) {
return expr->qtype() == GetQType<bool>();
};
ExprNodePtr true_ = Placeholder("true");
std::vector<ExprNodePtr> true_literals = BoolLiterals(true);
auto is_true = Matches(true_literals);
ExprNodePtr false_ = Placeholder("false");
std::vector<ExprNodePtr> false_literals = BoolLiterals(false);
auto is_false = Matches(false_literals);
{
ASSIGN_OR_RETURN(
ExprNodePtr from1,
CallOpReference(
"bool.logical_if",
{CallOpReference("core.to_optional._scalar", {condition}), a, b,
c}));
ASSIGN_OR_RETURN(
ExprNodePtr to,
CallOpReference(
"core.where",
{CallOpReference("core.equal", {condition, Literal(true)}), a, b}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from1, to, {{"condition", is_scalar_bool}}));
ASSIGN_OR_RETURN(ExprNodePtr from2,
CallOpReference("bool.logical_if",
{CallOpReference("core.presence_or",
{condition, false_}),
a, b, c}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from2, to, {{"false", is_false}}));
}
{
ASSIGN_OR_RETURN(ExprNodePtr from,
CallOpReference("bool.logical_if", {condition, a, b, b}));
ASSIGN_OR_RETURN(
ExprNodePtr to,
CallOpReference(
"core.where",
{CallOpReference("core.equal", {condition, Literal(true)}), a, b}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(from, to));
}
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("bool.logical_if", {CallOpReference("core.presence_or",
{condition, true_}),
a, b, c}));
ASSIGN_OR_RETURN(
ExprNodePtr to,
CallOpReference(
"core.where",
{CallOpReference("core.equal", {condition, Literal(false)}), b,
a}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, to, {{"true", is_true}}));
}
{
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("bool.logical_if", {condition, true_, false_, a}));
ASSIGN_OR_RETURN(ExprNodePtr to,
CallOpReference("core.presence_or", {condition, a}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
from, to, {{"true", is_true}, {"false", is_false}}));
}
return absl::OkStatus();
}
}
absl::StatusOr<PeepholeOptimizationPack> BoolOptimizations() {
PeepholeOptimizationPack optimizations;
RETURN_IF_ERROR(LogicalNotComparisonOptimizations(optimizations));
RETURN_IF_ERROR(CoreBoolComparisonOptimizations(optimizations));
RETURN_IF_ERROR(LogicalIfOptimizations(optimizations));
return optimizations;
}
} | #include "arolla/expr/optimization/peephole_optimizations/bool.h"
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::WithQTypeAnnotation;
class BoolOptimizationsTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_OK(InitArolla());
ASSERT_OK_AND_ASSIGN(optimizer_,
CreatePeepholeOptimizer({BoolOptimizations}));
}
absl::StatusOr<ExprNodePtr> ApplyOptimizer(
absl::StatusOr<ExprNodePtr> status_or_expr) const {
ASSIGN_OR_RETURN(auto expr, ToLowest(status_or_expr));
return ToLowest(optimizer_->ApplyToNode(expr));
}
absl::StatusOr<ExprNodePtr> ToLowest(
const absl::StatusOr<ExprNodePtr>& status_or_expr) const {
if (!status_or_expr.ok()) {
return std::move(status_or_expr).status();
}
return ::arolla::expr::ToLowest(*status_or_expr);
}
std::unique_ptr<PeepholeOptimizer> optimizer_;
};
TEST_F(BoolOptimizationsTest, LogicalNotRemoval) {
ExprNodePtr x = Leaf("x");
ExprNodePtr y = Leaf("y");
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("bool.logical_not", {CallOp("bool.logical_not", {x})})));
EXPECT_THAT(actual_expr, EqualsExpr(x));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_eq, CallOp("bool.equal", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_not_eq,
CallOp("bool.not_equal", {x, y}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp("bool.logical_not", {bool_eq})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(bool_not_eq));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp("bool.logical_not", {bool_not_eq})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(bool_eq));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ApplyOptimizer(CallOp("bool.logical_not",
{CallOp("bool.less", {x, y})})));
ASSERT_OK_AND_ASSIGN(auto expected_expr,
ToLowest(CallOp("bool.greater_equal", {x, y})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("bool.logical_not", {CallOp("bool.less_equal", {x, y})})));
ASSERT_OK_AND_ASSIGN(auto expected_expr,
ToLowest(CallOp("bool.greater", {x, y})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
TEST_F(BoolOptimizationsTest, BoolToCore) {
ASSERT_OK_AND_ASSIGN(auto x, WithQTypeAnnotation(Leaf("x"), GetQType<int>()));
ASSERT_OK_AND_ASSIGN(auto y, WithQTypeAnnotation(Leaf("y"), GetQType<int>()));
ExprNodePtr w = Leaf("w");
ExprNodePtr q = Leaf("q");
ExprNodePtr true_opt = Literal(MakeOptionalValue(true));
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp, CallOp("bool.equal", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp, CallOp("core.equal", {x, y}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp("core.equal", {bool_cmp, true_opt})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_cmp));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp("core.equal", {true_opt, bool_cmp})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_cmp));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(auto core_cmp,
CallOp("core.equal", {Literal(true), true_opt}));
ASSERT_OK_AND_ASSIGN(auto actual_expr, ApplyOptimizer(core_cmp));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_cmp));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp, CallOp("bool.less", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp, CallOp("core.less", {x, y}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp("core.equal", {bool_cmp, true_opt})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_cmp));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp("core.equal", {true_opt, bool_cmp})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_cmp));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp, CallOp("bool.less", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp, CallOp("core.less", {x, y}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.equal",
{CallOp("core.to_optional", {bool_cmp}), true_opt})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_cmp));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(CallOp("core.equal", {true_opt, bool_cmp})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_cmp));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp1, CallOp("bool.less", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp2,
CallOp("bool.less_equal", {w, q}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_and,
CallOp("bool.logical_and", {bool_cmp1, bool_cmp2}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp1, CallOp("core.less", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp2,
CallOp("core.less_equal", {w, q}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_and,
CallOp("core.presence_and", {core_cmp1, core_cmp2}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ToLowest(CallOp("core.equal", {bool_and, true_opt})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_and));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ToLowest(CallOp("core.equal", {true_opt, bool_and})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_and));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp1, CallOp("bool.less", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp2,
CallOp("bool.less_equal", {w, q}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_or,
CallOp("bool.logical_or", {bool_cmp1, bool_cmp2}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp1, CallOp("core.less", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp2,
CallOp("core.less_equal", {w, q}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_or,
CallOp("core.presence_or", {core_cmp1, core_cmp2}));
{
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ToLowest(CallOp("core.equal", {bool_or, true_opt})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_or));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ToLowest(CallOp("core.equal", {true_opt, bool_or})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_or));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_cmp1, CallOp("bool.less", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_or,
CallOp("bool.logical_or", {bool_cmp1, q}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr core_cmp1, CallOp("core.less", {x, y}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr core_or,
CallOp("core.presence_or",
{core_cmp1, CallOp("core.equal", {q, true_opt})}));
{
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ToLowest(CallOp("core.equal", {bool_or, true_opt})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_or));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ToLowest(CallOp("core.equal", {true_opt, bool_or})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_or));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_or,
CallOp("bool.logical_or", {true_opt, q}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr core_or,
CallOp("core.presence_or", {CallOp("core.equal", {true_opt, true_opt}),
CallOp("core.equal", {q, true_opt})}));
{
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ToLowest(CallOp("core.equal", {bool_or, true_opt})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_or));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ToLowest(CallOp("core.equal", {true_opt, bool_or})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(core_or));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr bool_or,
CallOp("bool.logical_or", {w, q}));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("core.equal", {bool_or, true_opt}));
ASSERT_OK_AND_ASSIGN(auto actual_expr, ApplyOptimizer(expr));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(expr));
EXPECT_THAT(ApplyOptimizer(expr), IsOkAndHolds(EqualsExpr(expr)));
}
}
TEST_F(BoolOptimizationsTest, LogicalIf) {
ExprNodePtr a = Leaf("a");
ExprNodePtr b = Leaf("b");
ExprNodePtr c = Leaf("c");
ExprNodePtr d = Leaf("d");
ASSERT_OK_AND_ASSIGN(ExprNodePtr cond_full,
WithQTypeAnnotation(Leaf("cond"), GetQType<bool>()));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr cond_optional,
WithQTypeAnnotation(Leaf("cond"), GetOptionalQType<bool>()));
ExprNodePtr cond_unknown = Leaf("cond");
{
for (const auto& [cond, do_optimize] :
{std::pair{cond_full, true}, std::pair{cond_unknown, false}}) {
ASSERT_OK_AND_ASSIGN(
ExprNodePtr from,
CallOp("bool.logical_if",
{CallOp("core.to_optional", {cond}), a, b, c}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr to,
CallOp("core.where",
{CallOp("core.equal", {cond, Literal(true)}), a, b}));
auto result = do_optimize ? to : from;
EXPECT_THAT(ApplyOptimizer(from),
IsOkAndHolds(EqualsExpr(ToLowest(result))));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr from1,
CallOp("bool.logical_if",
{CallOp("core.presence_or", {cond_unknown, Literal(false)}), a,
b, c}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr from2,
CallOp("bool.logical_if",
{CallOp("core.presence_or",
{cond_unknown, Literal(MakeOptionalValue(false))}),
a, b, c}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr to,
CallOp("core.where",
{CallOp("core.equal", {cond_unknown, Literal(true)}), a, b}));
EXPECT_THAT(ApplyOptimizer(from1),
IsOkAndHolds(EqualsExpr(ToLowest(to))));
EXPECT_THAT(ApplyOptimizer(from2),
IsOkAndHolds(EqualsExpr(ToLowest(to))));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr no_optimization,
CallOp("bool.logical_if",
{CallOp("core.presence_or", {cond_unknown, d}), a, b, c}));
EXPECT_THAT(ApplyOptimizer(no_optimization),
IsOkAndHolds(EqualsExpr(ToLowest(no_optimization))));
}
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ToLowest(CallOp("bool.logical_if",
{CallOp("bool.equal", {a, Literal(1)}), b, c, c})));
ASSERT_OK_AND_ASSIGN(actual_expr,
ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(actual_expr, ToLowest(optimizer_->Apply(actual_expr)));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("core.where",
{CallOp("core.equal", {a, Literal(1)}), b, c})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr from,
CallOp("bool.logical_if", {a, Literal(true), Literal(false), b}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr to, CallOp("core.presence_or", {a, b}));
EXPECT_THAT(ApplyOptimizer(from), IsOkAndHolds(EqualsExpr(to)));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr1,
ApplyOptimizer(
CallOp("bool.logical_if",
{CallOp("core.presence_or", {cond_unknown, Literal(true)}),
a, b, c})));
ASSERT_OK_AND_ASSIGN(
auto actual_expr2,
ApplyOptimizer(
CallOp("bool.logical_if",
{CallOp("core.presence_or",
{cond_unknown, Literal(MakeOptionalValue(true))}),
a, b, c})));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp(
"core.where",
{CallOp("core.equal", {cond_unknown, Literal(false)}), b, a})));
EXPECT_THAT(actual_expr1, EqualsExpr(expected_expr));
EXPECT_THAT(actual_expr2, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr no_optimization,
CallOp("bool.logical_if",
{CallOp("core.presence_or", {Literal(false), cond_unknown}), a,
b, c}));
EXPECT_THAT(ApplyOptimizer(no_optimization),
IsOkAndHolds(EqualsExpr(no_optimization)));
}
}
}
} |
293 | #ifndef TENSORFLOW_CORE_TRANSFORMS_UTILS_EVAL_UTILS_H_
#define TENSORFLOW_CORE_TRANSFORMS_UTILS_EVAL_UTILS_H_
#include <memory>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/ir/tf_op_wrapper.h"
namespace Eigen {
class ThreadPoolDevice;
}
namespace mlir {
namespace tfg {
namespace util {
class SimpleDevice : public tensorflow::DeviceBase {
public:
SimpleDevice();
~SimpleDevice() override;
tensorflow::Status MakeTensorFromProto(
const tensorflow::TensorProto& tensor_proto,
const tensorflow::AllocatorAttributes alloc_attrs,
tensorflow::Tensor* tensor) override;
tensorflow::Allocator* GetAllocator(
tensorflow::AllocatorAttributes attr) override;
const std::string& device_type() const override { return device_type_; }
private:
std::unique_ptr<tensorflow::thread::ThreadPool> eigen_worker_;
tensorflow::DeviceBase::CpuWorkerThreads eigen_worker_threads_;
std::unique_ptr<Eigen::ThreadPoolDevice> eigen_device_;
const std::string device_type_ = tensorflow::DEVICE_CPU;
};
LogicalResult EvaluateOperation(tensorflow::DeviceBase* cpu_device,
tensorflow::ResourceMgr* resource_mgr, TFOp op,
ArrayRef<ElementsAttr> operands,
SmallVectorImpl<TypedAttr>& results);
}
}
}
#endif
#define EIGEN_USE_THREADS
#include "tensorflow/core/transforms/utils/eval_utils.h"
#include <cassert>
#include <utility>
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Builders.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/control_flow.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/ir/importexport/convert_tensor.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/public/version.h"
namespace mlir {
namespace tfg {
namespace util {
static constexpr int kThreads = 2;
SimpleDevice::SimpleDevice() : DeviceBase(tensorflow::Env::Default()) {
eigen_worker_ = std::make_unique<tensorflow::thread::ThreadPool>(
tensorflow::Env::Default(), "eval_utils", kThreads);
eigen_worker_threads_.num_threads = kThreads;
eigen_worker_threads_.workers = eigen_worker_.get();
eigen_device_ = std::make_unique<Eigen::ThreadPoolDevice>(
eigen_worker_threads_.workers->AsEigenThreadPool(),
eigen_worker_threads_.num_threads);
set_tensorflow_cpu_worker_threads(&eigen_worker_threads_);
set_eigen_cpu_device(eigen_device_.get());
}
SimpleDevice::~SimpleDevice() {}
tensorflow::Allocator *SimpleDevice::GetAllocator(
tensorflow::AllocatorAttributes attr) {
return tensorflow::cpu_allocator();
}
tensorflow::Status SimpleDevice::MakeTensorFromProto(
const tensorflow::TensorProto &tensor_proto,
const tensorflow::AllocatorAttributes alloc_attrs,
tensorflow::Tensor *tensor) {
tensorflow::Tensor parsed(tensor_proto.dtype());
if (!parsed.FromProto(tensorflow::cpu_allocator(), tensor_proto)) {
return tensorflow::errors::InvalidArgument(
"Cannot parse tensor from tensor_proto.");
}
*tensor = std::move(parsed);
return ::tensorflow::OkStatus();
}
LogicalResult EvaluateOperation(tensorflow::DeviceBase *cpu_device,
tensorflow::ResourceMgr *resource_mgr, TFOp op,
ArrayRef<ElementsAttr> operands,
SmallVectorImpl<TypedAttr> &results) {
assert(cpu_device && "cpu device can't be null");
assert(resource_mgr && "ResourceMgr can't be null");
if (llvm::any_of(operands, [](Attribute operand) { return !operand; })) {
VLOG(3) << "cannot be evaluated with null operands";
return failure();
}
tensorflow::NodeDef node_def;
if (!ConvertToNodeDef(&*op, &node_def, op.getDialect(), [&](Value value) {
return GetValueName(value, op.getDialect());
}).ok()) {
VLOG(3) << "failed to convert operation to NodeDef";
return failure();
}
absl::InlinedVector<tensorflow::Tensor, 4> input_tensors(operands.size());
absl::InlinedVector<tensorflow::TensorValue, 4> input_tensor_values(
operands.size());
for (auto it : llvm::zip(operands, input_tensors, input_tensor_values)) {
auto &[operand, input_tensor, input_tensor_value] = it;
if (!ConvertToTensor(operand, &input_tensor).ok()) return failure();
input_tensor_value.tensor = &input_tensor;
}
tensorflow::Status status;
std::unique_ptr<tensorflow::OpKernel> op_kernel = tensorflow::CreateOpKernel(
tensorflow::DEVICE_CPU, cpu_device, cpu_device->GetAllocator({}),
node_def, TF_GRAPH_DEF_VERSION, &status);
if (!status.ok()) {
VLOG(3) << status.message();
return failure();
}
tensorflow::OpKernelContext::Params params;
params.device = cpu_device;
params.frame_iter = tensorflow::FrameAndIter(0, 0);
params.inputs = input_tensor_values;
params.op_kernel = op_kernel.get();
params.resource_manager = resource_mgr;
absl::InlinedVector<tensorflow::AllocatorAttributes, 4> output_attrs(
op_kernel->num_outputs());
for (auto &attr : output_attrs) attr.set_on_host(true);
params.output_attr_array = output_attrs.data();
tensorflow::OpKernelContext op_context(¶ms);
op_kernel->Compute(&op_context);
if (!op_context.status().ok()) {
VLOG(3) << op_context.status().message();
return failure();
}
Builder builder(op->getContext());
for (int i = 0; i < op_kernel->num_outputs(); ++i) {
if (op_context.mutable_output(i) == nullptr) {
results.push_back(nullptr);
continue;
}
absl::StatusOr<ElementsAttr> attr_or =
ConvertTensor(*(op_context.mutable_output(i)), builder);
if (!attr_or.status().ok()) {
VLOG(3) << attr_or.status().message();
return failure();
}
results.push_back(attr_or.value());
}
return success();
}
}
}
} | #include "tensorflow/core/transforms/utils/eval_utils.h"
#include <memory>
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
TEST(EvalUtilsTest, InvalidInputs) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<2x2xi32>) {
%Const_0, %ctl_0 = Const name("c0") {dtype = i1, value = dense<1> : tensor<i1>} : () -> (tensor<i1>)
%Const_1, %ctl_2 = Const name("c1") {dtype = i32, value = dense<2> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Switch:2, %ctl_3 = Switch(%Const_1, %Const_0) name("switch") {T = i1} : (tensor<2x2xi32>, tensor<i1>) -> (tensor<*xi32>, tensor<*xi32>)
return (%Const_1) : tensor<2x2xi32>
}
)mlir";
MLIRContext context;
auto tfg_dialect = context.getOrLoadDialect<tfg::TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iter = func.getBody().begin()->begin();
Operation *const_0 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_0));
Operation *const_1 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_1));
Operation *switch_op = &*iter++;
auto cpu_device = std::make_unique<util::SimpleDevice>();
auto resource_mgr = std::make_unique<tensorflow::ResourceMgr>();
llvm::SmallVector<TypedAttr> result;
EXPECT_TRUE(failed(
util::EvaluateOperation(cpu_device.get(), resource_mgr.get(), switch_op,
{const_0->getAttrOfType<ElementsAttr>("value"),
const_1->getAttrOfType<ElementsAttr>("value")},
result)));
}
TEST(EvalUtilsTest, EvaluateOperation) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<2x2xi32>) {
%Const_0, %ctl_0 = Const name("c0") {dtype = i32, value = dense<1> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Const_1, %ctl_2 = Const name("c1") {dtype = i32, value = dense<2> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Add, %ctl_7 = Add(%Const_0, %Const_1) name("add") {T = i32} : (tensor<2x2xi32>, tensor<2x2xi32>) -> (tensor<2x2xi32>)
return (%Const_1) : tensor<2x2xi32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<tfg::TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iter = func.getBody().begin()->begin();
Operation *const_0 = &*iter++;
Operation *const_1 = &*iter++;
Operation *add = &*iter++;
auto cpu_device = std::make_unique<util::SimpleDevice>();
auto resource_mgr = std::make_unique<tensorflow::ResourceMgr>();
llvm::SmallVector<TypedAttr> result;
ASSERT_TRUE(succeeded(util::EvaluateOperation(
cpu_device.get(), resource_mgr.get(), const_0,
{const_0->getAttrOfType<ElementsAttr>("value")}, result)));
ASSERT_EQ(result.size(), 1);
ASSERT_TRUE(mlir::isa<ElementsAttr>(result[0]));
EXPECT_EQ(mlir::cast<ElementsAttr>(result[0]).getValues<int>()[0], 1);
result.clear();
ASSERT_TRUE(succeeded(util::EvaluateOperation(
cpu_device.get(), resource_mgr.get(), const_1,
{const_1->getAttrOfType<ElementsAttr>("value")}, result)));
ASSERT_EQ(result.size(), 1);
ASSERT_TRUE(mlir::isa<ElementsAttr>(result[0]));
EXPECT_EQ(mlir::cast<ElementsAttr>(result[0]).getValues<int>()[0], 2);
result.clear();
ASSERT_TRUE(succeeded(
util::EvaluateOperation(cpu_device.get(), resource_mgr.get(), add,
{const_0->getAttrOfType<ElementsAttr>("value"),
const_1->getAttrOfType<ElementsAttr>("value")},
result)));
ASSERT_EQ(result.size(), 1);
ASSERT_TRUE(mlir::isa<ElementsAttr>(result[0]));
EXPECT_EQ(mlir::cast<ElementsAttr>(result[0]).getValues<int>()[0], 3);
}
TEST(EvalUtilsTest, OutputInvalidation) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<2x2xi32>) {
%Const_0, %ctl_0 = Const name("c0") {dtype = i1, value = dense<1> : tensor<i1>} : () -> (tensor<i1>)
%Const_1, %ctl_2 = Const name("c1") {dtype = i32, value = dense<2> : tensor<2x2xi32>} : () -> (tensor<2x2xi32>)
%Switch:2, %ctl_3 = Switch(%Const_1, %Const_0) name("switch") {T = i1} : (tensor<2x2xi32>, tensor<i1>) -> (tensor<*xi32>, tensor<*xi32>)
%Identity_0, %ctl_4 = Identity(%Switch#0) name("id1") {T = i32} : (tensor<*xi32>) -> (tensor<*xi32>)
%Identity_1, %ctl_5 = Identity(%Switch#1) name("id2") {T = i32} : (tensor<*xi32>) -> (tensor<*xi32>)
return (%Const_1) : tensor<2x2xi32>
}
)mlir";
MLIRContext context;
auto tfg_dialect = context.getOrLoadDialect<tfg::TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iter = func.getBody().begin()->begin();
Operation *const_0 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_0));
Operation *const_1 = &*iter++;
ASSERT_TRUE(tfg_dialect->IsConstant(const_1));
Operation *switch_op = &*iter++;
auto cpu_device = std::make_unique<util::SimpleDevice>();
auto resource_mgr = std::make_unique<tensorflow::ResourceMgr>();
llvm::SmallVector<TypedAttr> result;
ASSERT_TRUE(succeeded(
util::EvaluateOperation(cpu_device.get(), resource_mgr.get(), switch_op,
{const_1->getAttrOfType<ElementsAttr>("value"),
const_0->getAttrOfType<ElementsAttr>("value")},
result)));
ASSERT_EQ(result.size(), 2);
EXPECT_EQ(result[0], nullptr);
EXPECT_EQ(mlir::cast<ElementsAttr>(result[1]).getValues<int>()[0], 2);
}
}
} |
294 | #ifndef TENSORFLOW_CORE_KERNELS_TENSOR_MAP_H_
#define TENSORFLOW_CORE_KERNELS_TENSOR_MAP_H_
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_key.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_tensor_data.h"
#include "tensorflow/core/lib/core/refcount.h"
namespace tensorflow {
class TensorMap {
public:
TensorMap() : tensors_(new Tensors) {}
~TensorMap();
TensorMap(const TensorMap& other) : tensors_(other.tensors_) {
tensors_->Ref();
}
TensorMap(TensorMap&& rhs) : tensors_(rhs.tensors_) {
rhs.tensors_ = nullptr;
}
TensorMap& operator=(const TensorMap& rhs) {
if (this == &rhs) return *this;
tensors_->Unref();
tensors_ = rhs.tensors_;
tensors_->Ref();
return *this;
}
TensorMap& operator=(TensorMap&& rhs) {
if (this == &rhs) return *this;
std::swap(tensors_, rhs.tensors_);
return *this;
}
static const char kTypeName[];
string TypeName() const { return kTypeName; }
void Encode(VariantTensorData* data) const;
bool Decode(const VariantTensorData& data);
string DebugString() const { return "TensorMap"; }
absl::flat_hash_map<TensorKey, Tensor>& tensors() {
return tensors_->values_;
}
const absl::flat_hash_map<TensorKey, Tensor>& tensors() const {
return tensors_->values_;
}
TensorMap Copy() const {
TensorMap out;
out.tensors_->values_ = tensors_->values_;
return out;
}
bool insert(const TensorKey& key, const Tensor& value) {
auto r = tensors_->values_.try_emplace(key, value);
return r.second;
}
absl::flat_hash_map<TensorKey, Tensor>::iterator find(TensorKey key) {
return tensors_->values_.find(key);
}
Tensor& lookup(TensorKey key) { return tensors_->values_.find(key)->second; }
Tensor& operator[](TensorKey& k) { return tensors_->values_[k]; }
bool replace(const TensorKey& k, const Tensor& v) {
tensors_->values_[k] = v;
return true;
}
size_t erase(TensorKey key) { return tensors_->values_.erase(key); }
size_t size() const { return tensors_->values_.size(); }
std::vector<Tensor> keys() const {
std::vector<Tensor> keys;
keys.reserve(tensors_->values_.size());
absl::flat_hash_map<TensorKey, Tensor>::iterator it =
tensors_->values_.begin();
while (it != tensors_->values_.end()) {
keys.push_back(it->first);
it++;
}
return keys;
}
bool RefCountIsOne() const { return tensors_->RefCountIsOne(); }
private:
class Tensors : public core::RefCounted {
public:
absl::flat_hash_map<TensorKey, Tensor> values_;
};
Tensors* tensors_;
};
#if defined(PLATFORM_GOOGLE)
static_assert(Variant::CanInlineType<TensorMap>() || sizeof(void*) < 8,
"Must be able to inline TensorMap into a Variant");
#endif
}
#endif
#include "tensorflow/core/kernels/tensor_map.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/lib/core/coding.h"
namespace tensorflow {
TensorMap::~TensorMap() {
if (tensors_) tensors_->Unref();
}
void TensorMap::Encode(VariantTensorData* data) const {
data->set_type_name(TypeName());
absl::flat_hash_map<TensorKey, Tensor>::const_iterator map_it =
tensors().begin();
while (map_it != tensors().end()) {
Tensor k = map_it->first;
Tensor v = map_it->second;
CHECK_NE(k.dtype(), DT_INVALID);
CHECK_NE(v.dtype(), DT_INVALID);
*data->add_tensors() = k;
*data->add_tensors() = v;
map_it++;
}
}
static Status TensorMapDeviceCopy(
const TensorMap& from, TensorMap* to,
const UnaryVariantOpRegistry::AsyncTensorDeviceCopyFn& copy) {
for (const std::pair<TensorKey, Tensor>& p : from.tensors()) {
TensorKey to_key(p.first.dtype());
Tensor to_val(p.second.dtype());
TF_RETURN_IF_ERROR(copy(p.first, &to_key));
TF_RETURN_IF_ERROR(copy(p.second, &to_val));
to->tensors().emplace(to_key, to_val);
}
return absl::OkStatus();
}
#define REGISTER_LIST_COPY(DIRECTION) \
INTERNAL_REGISTER_UNARY_VARIANT_DEVICE_COPY_FUNCTION(TensorMap, DIRECTION, \
TensorMapDeviceCopy)
REGISTER_LIST_COPY(VariantDeviceCopyDirection::HOST_TO_DEVICE);
REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_HOST);
REGISTER_LIST_COPY(VariantDeviceCopyDirection::DEVICE_TO_DEVICE);
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(TensorMap, TensorMap::kTypeName);
bool TensorMap::Decode(const VariantTensorData& data) {
std::vector<Tensor>::const_iterator tensors_it = data.tensors().begin();
while (tensors_it != data.tensors().end()) {
if (std::next(tensors_it) == data.tensors().end()) {
return false;
}
tensors().emplace(tensors_it[0], tensors_it[1]);
tensors_it += 2;
}
return true;
}
const char TensorMap::kTypeName[] = "tensorflow::TensorMap";
} | #include "tensorflow/core/kernels/tensor_map.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
TEST(TensorMapTest, Empty) {
TensorMap tm;
EXPECT_EQ(tm.tensors().size(), 0);
EXPECT_EQ(tm.tensors().begin(), tm.tensors().end());
}
TEST(TensorKeyTest, Equal) {
TensorKey k1 = Tensor(15);
TensorKey k2 = Tensor(15);
EXPECT_EQ(k1, k2);
EXPECT_EQ(k1.shape(), k2.shape());
EXPECT_EQ(k1.dtype(), k2.dtype());
TensorKey k3 = Tensor(37.0);
EXPECT_NE(k1, k3);
EXPECT_NE(k1.dtype(), k3.dtype());
}
TEST(TensorMapTest, Insert) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
absl::flat_hash_map<TensorKey, Tensor> am;
am.try_emplace(k, v);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it =
tm.tensors().begin();
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v);
map_it++;
EXPECT_EQ(map_it, tm.tensors().end());
}
TEST(TensorMapTest, Lookup) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
Tensor f = map_it->second;
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(f, v);
}
TEST(TensorMapTest, Erase) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
tm.erase(k);
EXPECT_EQ(tm.find(k), tm.tensors().end());
}
TEST(TensorMapTest, SameKeyInsert) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v1 = Tensor(22);
Tensor v2 = Tensor(23);
bool b1 = tm.insert(k, v1);
bool b2 = tm.insert(k, v2);
EXPECT_EQ(b1, true);
EXPECT_EQ(b2, false);
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v1);
}
TEST(TensorMapTest, Replace) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v1 = Tensor(22);
Tensor v2 = Tensor(23);
tm[k] = v2;
absl::flat_hash_map<TensorKey, Tensor>::iterator map_it = tm.find(k);
EXPECT_EQ(map_it->first, k);
test::ExpectTensorEqual<int32>(map_it->second, v2);
}
TEST(TensorMapTest, ListKeys) {
TensorMap tm;
TensorKey k = Tensor(11.0);
TensorKey k2 = Tensor(12.0);
Tensor v = Tensor(22);
Tensor v2 = Tensor(23);
tm.insert(k, v);
tm.insert(k2, v2);
std::vector<Tensor> keys = tm.keys();
std::vector<std::pair<double, int>> key_doubles;
for (int i = 0; i < keys.size(); i++) {
double x = keys[i].scalar<double>()();
std::pair<double, int> p = std::pair<double, int>(x, i);
key_doubles.push_back(p);
}
sort(key_doubles.begin(), key_doubles.end());
EXPECT_EQ(keys.size(), 2);
EXPECT_EQ(key_doubles[0].first, 11.0);
EXPECT_EQ(key_doubles[1].first, 12.0);
int ind1 = key_doubles[0].second;
int ind2 = key_doubles[1].second;
EXPECT_EQ(keys[ind1].shape(), k.shape());
EXPECT_EQ(keys[ind2].shape(), k2.shape());
}
TEST(TensorMapTest, Size) {
TensorMap tm;
EXPECT_EQ(tm.size(), 0);
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
EXPECT_EQ(tm.size(), 1);
}
TEST(TensorMapTest, Copy) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
TensorMap tmc = tm.Copy();
EXPECT_EQ(tm.size(), tmc.size());
EXPECT_NE(tm.find(k), tm.tensors().end());
EXPECT_NE(tmc.find(k), tmc.tensors().end());
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
}
TEST(TensorMapTest, EncodeDecode) {
TensorMap tm;
TensorKey k = Tensor(11);
Tensor v = Tensor(22);
tm.insert(k, v);
VariantTensorData data;
tm.Encode(&data);
TensorMap tmc;
tmc.Decode(data);
EXPECT_EQ(tm.size(), tmc.size());
EXPECT_NE(tm.find(k), tm.tensors().end());
EXPECT_NE(tmc.find(k), tmc.tensors().end());
EXPECT_EQ(tm.find(k)->first, tmc.find(k)->first);
test::ExpectTensorEqual<int32>(tm.find(k)->second, tmc.find(k)->second);
}
}
} |
295 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_PLUGIN_COORDINATION_SERVICE_AGENT_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_PLUGIN_COORDINATION_SERVICE_AGENT_H_
#include <cstdint>
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include "tensorflow/c/experimental/next_pluggable_device/c_api.h"
#include "tensorflow/c/kernels_experimental.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/plugin_coordination_service_agent.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
class CPluginCoordinationServiceAgent : public PluginCoordinationServiceAgent {
public:
explicit CPluginCoordinationServiceAgent(void* agent)
: agent_(reinterpret_cast<TF_CoordinationServiceAgent*>(agent)) {}
bool IsInitialized() const override {
if (agent_ == nullptr) return false;
return TF_CoordinationServiceIsInitialized(agent_);
}
Status InsertKeyValue(std::string_view key, std::string_view value) override;
absl::StatusOr<std::string> GetKeyValue(std::string_view key) override;
absl::StatusOr<std::string> GetKeyValue(std::string_view key,
absl::Duration timeout) override;
absl::StatusOr<std::string> TryGetKeyValue(std::string_view key) override;
Status DeleteKeyValue(std::string_view key) override;
private:
TF_CoordinationServiceAgent* agent_;
};
}
#endif
#include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <string>
#include <string_view>
#include "absl/time/time.h"
#include "tensorflow/c/experimental/next_pluggable_device/c_api.h"
#include "tensorflow/c/tf_buffer.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace {
absl::StatusOr<std::string> ProcessGetKeyValueResult(TF_Buffer* result_buf,
TF_Status* status) {
if (TF_GetCode(status) != TF_OK) {
return StatusFromTF_Status(status);
} else {
std::string result{static_cast<const char*>(result_buf->data),
result_buf->length};
TF_DeleteBuffer(result_buf);
return result;
}
}
}
Status CPluginCoordinationServiceAgent::InsertKeyValue(std::string_view key,
std::string_view value) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceInsertKeyValue(key.data(), key.size(), value.data(),
value.size(), agent_, status);
return StatusFromTF_Status(status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf =
TF_CoordinationServiceGetKeyValue(key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::GetKeyValue(
std::string_view key, absl::Duration timeout) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceGetKeyValueWithTimeout(
key.data(), key.size(), absl::ToInt64Seconds(timeout), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
absl::StatusOr<std::string> CPluginCoordinationServiceAgent::TryGetKeyValue(
std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_Buffer* result_buf = TF_CoordinationServiceTryGetKeyValue(
key.data(), key.size(), agent_, status);
return ProcessGetKeyValueResult(result_buf, status);
}
Status CPluginCoordinationServiceAgent::DeleteKeyValue(std::string_view key) {
TF_StatusPtr c_status_ptr(TF_NewStatus());
TF_Status* status = c_status_ptr.get();
TF_CoordinationServiceDeleteKeyValue(key.data(), key.size(), agent_, status);
return StatusFromTF_Status(status);
}
} | #include "tensorflow/core/common_runtime/next_pluggable_device/c_plugin_coordination_service_agent.h"
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/coordination_config.pb.h"
#include "tsl/protobuf/coordination_service.pb.h"
namespace tensorflow {
namespace {
using tsl::CoordinationClient;
using tsl::CoordinationServiceAgent;
using tsl::CallOptions;
using tsl::DeleteKeyValueRequest;
using tsl::DeleteKeyValueResponse;
using tsl::GetKeyValueRequest;
using tsl::GetKeyValueResponse;
using tsl::InsertKeyValueRequest;
using tsl::InsertKeyValueResponse;
using ::testing::_;
using ::testing::DoAll;
using ::testing::InvokeArgument;
using ::testing::Pointee;
using ::testing::SetArgPointee;
using ::testing::WithArgs;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.DebugString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.DebugString() == expected_;
}
void DescribeTo(std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
MATCHER(KvEq, "simple KeyValueEntry matcher") {
const KeyValueEntry& kv0 = std::get<0>(arg);
const KeyValueEntry& kv1 = std::get<1>(arg);
return kv0.key() == kv1.key() && kv0.value() == kv1.value();
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
MOCK_METHOD(void, GetKeyValueAsync,
(CallOptions * call_opts, const GetKeyValueRequest*,
GetKeyValueResponse*, StatusCallback),
(override));
MOCK_METHOD(void, TryGetKeyValueAsync,
(const TryGetKeyValueRequest*, TryGetKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, InsertKeyValueAsync,
(const InsertKeyValueRequest*, InsertKeyValueResponse*,
StatusCallback),
(override));
MOCK_METHOD(void, DeleteKeyValueAsync,
(const DeleteKeyValueRequest*, DeleteKeyValueResponse*,
StatusCallback),
(override));
void GetKeyValueDirAsync(const tsl::GetKeyValueDirRequest* request,
tsl::GetKeyValueDirResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetKeyValueDirAsync"));
}
void ResetTaskAsync(const tsl::ResetTaskRequest* request,
tsl::ResetTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ResetTaskAsync"));
}
void ReportErrorToServiceAsync(
const tsl::ReportErrorToServiceRequest* request,
tsl::ReportErrorToServiceResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToServiceAsync"));
}
void BarrierAsync(const tsl::BarrierRequest* request,
tsl::BarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("BarrierAsync"));
}
void GetTaskStateAsync(const tsl::GetTaskStateRequest* request,
tsl::GetTaskStateResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("GetTaskStateAsync"));
}
void WaitForAllTasksAsync(const tsl::WaitForAllTasksRequest* request,
tsl::WaitForAllTasksResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("WaitForAllTasksAsync"));
}
void CancelBarrierAsync(const tsl::CancelBarrierRequest* request,
tsl::CancelBarrierResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("CancelBarrierAsync"));
}
void RegisterTaskAsync(tsl::CallOptions*,
const tsl::RegisterTaskRequest* request,
tsl::RegisterTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("RegisterTaskAsync"));
}
void ShutdownTaskAsync(tsl::CallOptions*,
const tsl::ShutdownTaskRequest* request,
tsl::ShutdownTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ShutdownTaskAsync"));
}
void HeartbeatAsync(tsl::CallOptions*, const tsl::HeartbeatRequest* request,
tsl::HeartbeatResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("HeartbeatAsync"));
}
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
done(absl::UnimplementedError("ReportErrorToTaskAsync"));
}
};
class CPluginCoordinationServiceAgentTest : public ::testing::Test {
public:
void InitializeAgent(CoordinationServiceConfig config = {}) {
config.set_service_leader("test_leader");
TF_ASSERT_OK(impl_->Initialize(
tsl::Env::Default(), "test_job",
0, config, std::move(client_),
[](Status s) {
LOG(ERROR) << "Coordination agent is set to error: " << s;
}));
}
TestCoordinationClient* GetClient() {
CHECK(client_ != nullptr)
<< "GetClient() was called after InitializeAgent()";
return client_.get();
}
protected:
std::unique_ptr<CoordinationServiceAgent> impl_ =
tsl::CreateCoordinationServiceAgent();
std::unique_ptr<CPluginCoordinationServiceAgent> agent_ =
std::make_unique<CPluginCoordinationServiceAgent>(impl_.get());
std::unique_ptr<TestCoordinationClient> client_ =
std::make_unique<TestCoordinationClient>();
};
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Simple_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_WithTimeout_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
GetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
InvokeArgument<3>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(10));
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
TEST_F(CPluginCoordinationServiceAgentTest, GetKeyValue_Timeout_ReturnError) {
const std::string test_key = "test_key";
StatusCallback owned_done;
ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
.WillByDefault(WithArgs<3>([&](StatusCallback done) {
owned_done = done;
}));
InitializeAgent();
auto result = agent_->GetKeyValue(test_key, absl::Seconds(1));
EXPECT_EQ(result.status().code(), error::DEADLINE_EXCEEDED);
owned_done(absl::CancelledError("error"));
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_ZeroTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::ZeroDuration());
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest,
GetKeyValue_NegativeTimeout_ReturnError) {
const std::string test_key = "test_key";
auto result = agent_->GetKeyValue(test_key, absl::Seconds(-1));
EXPECT_EQ(result.status().code(), error::INVALID_ARGUMENT);
}
TEST_F(CPluginCoordinationServiceAgentTest, InsertKeyValue_Success) {
const std::string test_key = "test_key";
const std::string test_value = "test_value";
InsertKeyValueRequest expected_input;
auto kv = expected_input.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
EXPECT_CALL(*GetClient(),
InsertKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->InsertKeyValue(test_key, test_value));
}
TEST_F(CPluginCoordinationServiceAgentTest, DeleteKeyValue_Success) {
const std::string test_key = "test_x_key";
DeleteKeyValueRequest expected_input;
expected_input.set_key(test_key);
expected_input.set_is_directory(true);
EXPECT_CALL(*GetClient(),
DeleteKeyValueAsync(Pointee(EqualsProto(expected_input)), _, _))
.WillOnce(InvokeArgument<2>(absl::OkStatus()));
InitializeAgent();
TF_ASSERT_OK(agent_->DeleteKeyValue(test_key));
}
TEST_F(CPluginCoordinationServiceAgentTest, TryGetKeyValue_Simple_Success) {
const std::string& test_key = "test_key";
const std::string& test_value = "test_value";
TryGetKeyValueResponse mocked_response;
auto kv = mocked_response.mutable_kv();
kv->set_key(test_key);
kv->set_value(test_value);
ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _))
.WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
InvokeArgument<2>(absl::OkStatus())));
InitializeAgent();
auto result = agent_->TryGetKeyValue(test_key);
TF_ASSERT_OK(result.status());
EXPECT_EQ(*result, test_value);
}
}
} |
296 | #ifndef AROLLA_MEMORY_OPTIONAL_VALUE_H_
#define AROLLA_MEMORY_OPTIONAL_VALUE_H_
#include <cstdint>
#include <optional>
#include <ostream>
#include <tuple>
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/is_bzero_constructible.h"
#include "arolla/util/meta.h"
#include "arolla/util/repr.h"
#include "arolla/util/status.h"
#include "arolla/util/struct_field.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
#include "arolla/util/view_types.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
template <typename T>
struct OptionalValue {
static_assert(std::is_default_constructible<T>(),
"OptionalValue<T> T must be default constructible.");
static_assert(std::is_standard_layout<T>(),
"OptionalValue<T> T must have standard layout.");
using value_type = T;
constexpr OptionalValue() : present(false), value() {}
constexpr OptionalValue(T v) : present(true), value(std::move(v)) {}
constexpr OptionalValue(std::nullopt_t) : present(false), value() {}
constexpr OptionalValue(bool present, T value)
: present(present), value(std::move(value)) {}
template <typename X = T,
typename = std::enable_if_t<
std::is_same_v<X, T> && !std::is_same_v<X, view_type_t<X>>, X>>
explicit OptionalValue(view_type_t<X> value) : present(true), value(value) {}
template <typename X = T,
typename = std::enable_if_t<
std::is_same_v<X, T> && !std::is_same_v<X, view_type_t<X>>, X>>
explicit OptionalValue(OptionalValue<view_type_t<X>> v) : present(v.present) {
if (v.present) value = T(v.value);
}
template <typename X = T,
typename = std::enable_if_t<std::is_same_v<X, T>>>
constexpr OptionalValue(std::optional<X> opt)
: present(opt.has_value()), value(std::move(opt).value_or(T{})) {}
operator OptionalValue<view_type_t<T>>() const {
return {present, value};
}
OptionalValue<T>& operator=(const OptionalValue<T>&) & = default;
OptionalValue<T>& operator=(const OptionalValue<T>&) && = delete;
OptionalValue<T>& operator=(T value) & {
this->present = true;
this->value = std::move(value);
return *this;
}
OptionalValue<T>& operator=(T) && = delete;
explicit operator bool() const { return present; }
constexpr std::optional<T> AsOptional() const& {
if (present) {
return value;
}
return {};
}
constexpr std::optional<T> AsOptional() && {
if (present) {
return std::move(value);
}
return {};
}
bool present;
value_type value = {};
void ArollaFingerprint(FingerprintHasher* hasher) const {
if (present) {
hasher->Combine(true, value);
} else {
hasher->Combine(false);
}
}
constexpr static auto ArollaStructFields() {
using CppType = OptionalValue;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(present),
AROLLA_DECLARE_STRUCT_FIELD(value),
};
}
};
template <typename T>
using strip_optional_t = meta::strip_template_t<OptionalValue, T>;
template <typename T>
using wrap_with_optional_t = OptionalValue<strip_optional_t<T>>;
template <typename T>
constexpr bool is_optional_v = meta::is_wrapped_with_v<OptionalValue, T>;
template <typename T>
struct view_type<OptionalValue<T>> {
using type = OptionalValue<view_type_t<T>>;
};
template <>
struct OptionalValue<Unit> {
using value_type = Unit;
constexpr OptionalValue() : present(false) {}
constexpr OptionalValue(Unit v)
: present(true) {}
constexpr OptionalValue(
std::nullopt_t)
: present(false) {}
constexpr OptionalValue(bool present, Unit value) : present(present) {}
constexpr explicit OptionalValue(bool present) : present(present) {}
constexpr OptionalValue(
std::optional<Unit> opt)
: present(opt.has_value()) {}
OptionalValue<Unit>& operator=(const OptionalValue<Unit>&) & = default;
OptionalValue<Unit>& operator=(const OptionalValue<Unit>&) && = delete;
explicit operator bool() const { return present; }
constexpr std::optional<Unit> AsOptional() const {
if (present) {
return Unit{};
}
return {};
}
template <typename H>
friend H AbslHashValue(H h, const OptionalValue& v) {
return H::combine(std::move(h), v.present);
}
bool present;
static constexpr Unit value = {};
constexpr static auto ArollaStructFields() {
using CppType = OptionalValue;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(present),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
using OptionalUnit = OptionalValue<Unit>;
constexpr OptionalUnit kPresent{Unit{}};
constexpr OptionalUnit kMissing{};
template <class T>
constexpr OptionalValue<T> MakeOptionalValue(T v) {
return {std::move(v)};
}
template <class T>
absl::StatusOr<OptionalValue<T>> MakeStatusOrOptionalValue(
absl::StatusOr<T> v) {
using ResultT = absl::StatusOr<OptionalValue<T>>;
return v.ok() ? ResultT{OptionalValue<T>{*std::move(v)}}
: ResultT{v.status()};
}
AROLLA_DECLARE_REPR(OptionalValue<bool>);
AROLLA_DECLARE_REPR(OptionalValue<int32_t>);
AROLLA_DECLARE_REPR(OptionalValue<int64_t>);
AROLLA_DECLARE_REPR(OptionalValue<uint64_t>);
AROLLA_DECLARE_REPR(OptionalValue<float>);
AROLLA_DECLARE_REPR(OptionalValue<double>);
AROLLA_DECLARE_REPR(OptionalValue<Bytes>);
AROLLA_DECLARE_REPR(OptionalValue<Text>);
AROLLA_DECLARE_REPR(OptionalUnit);
template <class T, typename = std::enable_if_t<
std::is_invocable_v<ReprTraits<OptionalValue<T>>, T>>>
std::ostream& operator<<(std::ostream& stream, const OptionalValue<T>& value) {
return stream << Repr(value);
}
template <typename T>
struct is_bzero_constructible<OptionalValue<T>> : is_bzero_constructible<T> {};
template <typename T>
constexpr bool operator==(const OptionalValue<T>& a,
const OptionalValue<T>& b) {
if (a.present && b.present) {
return a.value == b.value;
}
return (a.present == b.present);
}
template <typename T>
constexpr bool operator==(const OptionalValue<T>& a, const T& b) {
return a.present && a.value == b;
}
template <typename T>
constexpr bool operator==(const T& a, const OptionalValue<T>& b) {
return b.present && a == b.value;
}
template <typename T>
constexpr bool operator==(const OptionalValue<T>& a, std::nullopt_t) {
return !a.present;
}
template <typename T>
constexpr bool operator==(std::nullopt_t, const OptionalValue<T>& a) {
return !a.present;
}
template <typename T>
constexpr bool operator!=(const OptionalValue<T>& a,
const OptionalValue<T>& b) {
return !(a == b);
}
template <typename T>
constexpr bool operator!=(const OptionalValue<T>& a, const T& b) {
return !(a == b);
}
template <typename T>
constexpr bool operator!=(const T& a, const OptionalValue<T>& b) {
return !(a == b);
}
template <typename T>
constexpr bool operator!=(const OptionalValue<T>& a, std::nullopt_t) {
return a.present;
}
template <typename T>
constexpr bool operator!=(std::nullopt_t, const OptionalValue<T>& a) {
return a.present;
}
constexpr bool operator==(const OptionalUnit& a, const OptionalUnit& b) {
return a.present == b.present;
}
constexpr bool operator==(const OptionalUnit& a, const Unit& b) {
return a.present;
}
constexpr bool operator==(const Unit& a, const OptionalUnit& b) {
return b.present;
}
constexpr bool operator==(const OptionalValue<absl::string_view>& a,
absl::string_view b) {
return a.present && a.value == b;
}
template <typename T>
OptionalValue(T value) -> OptionalValue<T>;
namespace optional_value_impl {
template <class Fn, class ArgList>
class OptionalFn;
template <class To, class From>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool is_available(const From& v) {
if constexpr (!is_optional_v<To>) {
return v.present;
} else {
return true;
}
}
template <class To, class From>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline const To& value(const From& v) {
if constexpr (!is_optional_v<To>) {
return v.value;
} else {
return v;
}
}
template <class Fn, class... Args>
class OptionalFn<Fn, meta::type_list<Args...>> {
private:
using FnResT = std::decay_t<typename meta::function_traits<Fn>::return_type>;
static constexpr bool kHasStatus = IsStatusOrT<FnResT>::value;
using OptResT = wrap_with_optional_t<strip_statusor_t<FnResT>>;
using ResT = std::conditional_t<kHasStatus, absl::StatusOr<OptResT>, OptResT>;
public:
explicit constexpr OptionalFn(Fn fn) : fn_(std::move(fn)) {}
ResT operator()(
const wrap_with_optional_t<std::decay_t<Args>>&... args) const {
if ((is_available<std::decay_t<Args>>(args) && ...)) {
if constexpr (kHasStatus && !std::is_same_v<FnResT, ResT>) {
ASSIGN_OR_RETURN(auto res, fn_(value<std::decay_t<Args>>(args)...));
return OptResT(res);
} else {
return fn_(value<std::decay_t<Args>>(args)...);
}
} else {
return OptResT(std::nullopt);
}
}
private:
Fn fn_;
};
}
template <class Fn>
constexpr auto WrapFnToAcceptOptionalArgs(Fn fn) {
return optional_value_impl::OptionalFn<
Fn, typename meta::function_traits<Fn>::arg_types>(fn);
}
}
#endif
#include "arolla/memory/optional_value.h"
#include <cstdint>
#include "absl/strings/str_cat.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/text.h"
namespace arolla {
ReprToken ReprTraits<OptionalValue<bool>>::operator()(
const OptionalValue<bool>& value) const {
return ReprToken{
value.present ? absl::StrCat("optional_boolean{", Repr(value.value), "}")
: "optional_boolean{NA}"};
}
ReprToken ReprTraits<OptionalValue<int32_t>>::operator()(
const OptionalValue<int32_t>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_int32{", Repr(value.value), "}")
: "optional_int32{NA}"};
}
ReprToken ReprTraits<OptionalValue<int64_t>>::operator()(
const OptionalValue<int64_t>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_int64{NA}"};
}
ReprToken ReprTraits<OptionalValue<uint64_t>>::operator()(
const OptionalValue<uint64_t>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_uint64{NA}"};
}
ReprToken ReprTraits<OptionalValue<float>>::operator()(
const OptionalValue<float>& value) const {
return ReprToken{
value.present ? absl::StrCat("optional_float32{", Repr(value.value), "}")
: "optional_float32{NA}"};
}
ReprToken ReprTraits<OptionalValue<double>>::operator()(
const OptionalValue<double>& value) const {
return ReprToken{value.present ? absl::StrCat("optional_", Repr(value.value))
: "optional_float64{NA}"};
}
ReprToken ReprTraits<OptionalValue<Bytes>>::operator()(
const OptionalValue<Bytes>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_bytes{", Repr(value.value), "}")
: "optional_bytes{NA}"};
}
ReprToken ReprTraits<OptionalValue<Text>>::operator()(
const OptionalValue<Text>& value) const {
return ReprToken{value.present
? absl::StrCat("optional_text{", Repr(value.value), "}")
: "optional_text{NA}"};
}
ReprToken ReprTraits<OptionalUnit>::operator()(
const OptionalUnit& value) const {
return ReprToken{value.present ? "present" : "missing"};
}
} | #include "arolla/memory/optional_value.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <new>
#include <optional>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/util/bytes.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/text.h"
#include "arolla/util/view_types.h"
namespace arolla {
namespace testing {
namespace {
using ::testing::HasSubstr;
using ::testing::Test;
TEST(OptionalValueTest, TestEmptyValues) {
OptionalValue<float> v1;
EXPECT_FALSE(v1.present);
OptionalValue<float> v2(std::optional<float>{});
EXPECT_FALSE(v2.present);
OptionalValue<float> v3(std::nullopt);
EXPECT_FALSE(v3.present);
EXPECT_EQ(v1, v2);
EXPECT_EQ(v1, v3);
v1.value = 1.0f;
v2.value = 2.0f;
EXPECT_EQ(v1, v2);
auto absl_v = v2.AsOptional();
EXPECT_FALSE(absl_v.has_value());
}
TEST(OptionalValueTest, TestConstExpr) {
static_assert(!OptionalValue<int>().present);
static_assert(OptionalValue<int>(5).present);
static_assert(OptionalValue<int>(5).value == 5);
static_assert(MakeOptionalValue(5).present);
static_assert(MakeOptionalValue(5).value == 5);
}
TEST(OptionalValueTest, TestPresentValues) {
OptionalValue<float> v1(1.0f);
EXPECT_TRUE(v1.present);
EXPECT_EQ(1.0f, v1.value);
EXPECT_EQ(Repr(v1), "optional_float32{1.}");
auto v_auto = MakeOptionalValue(1.0f);
EXPECT_TRUE(v_auto.present);
EXPECT_EQ(1.0f, v_auto.value);
EXPECT_EQ(Repr(v_auto), "optional_float32{1.}");
OptionalValue<float> v2(std::optional<float>{2.0f});
EXPECT_TRUE(v2.present);
EXPECT_EQ(2.0f, v2.value);
EXPECT_EQ(Repr(v2), "optional_float32{2.}");
EXPECT_NE(v1, v2);
v1.value = 2.0f;
EXPECT_EQ(v1, v2);
}
TEST(OptionalValueTest, TestAssignment) {
OptionalValue<float> v1;
v1 = 1.0f;
EXPECT_TRUE(v1.present);
EXPECT_EQ(v1.value, 1.0f);
v1 = std::nullopt;
EXPECT_FALSE(v1.present);
}
TEST(OptionalValueTest, MakeStatusOrOptionalValue) {
absl::StatusOr<OptionalValue<float>> v =
MakeStatusOrOptionalValue(absl::StatusOr<float>(1.0f));
ASSERT_OK(v.status());
EXPECT_TRUE(v.value().present);
EXPECT_EQ(v.value().value, 1.0f);
absl::StatusOr<OptionalValue<float>> v_error = MakeStatusOrOptionalValue(
absl::StatusOr<float>(absl::InternalError("fake")));
EXPECT_THAT(v_error.status(),
StatusIs(absl::StatusCode::kInternal, HasSubstr("fake")));
}
TEST(OptionalValueTest, OptionalUnit) {
EXPECT_EQ(OptionalUnit(), kMissing);
EXPECT_EQ(OptionalUnit(false), kMissing);
EXPECT_FALSE(kMissing);
EXPECT_FALSE(kMissing.present);
EXPECT_EQ(Repr(kMissing), "missing");
EXPECT_EQ(OptionalUnit(true), kPresent);
EXPECT_TRUE(kPresent);
EXPECT_TRUE(kPresent.present);
EXPECT_EQ(Repr(kPresent), "present");
}
TEST(OptionalValueTest, Comparison) {
OptionalValue<float> v0;
v0.value = 1.0f;
OptionalValue<float> v1(1.0f);
OptionalValue<float> v2(2.0f);
{
EXPECT_TRUE(v1 == v1);
EXPECT_TRUE(v0 == v0);
EXPECT_FALSE(v1 == v2);
EXPECT_FALSE(v1 == v0);
EXPECT_FALSE(v1 != v1);
EXPECT_FALSE(v0 != v0);
EXPECT_TRUE(v1 != v2);
EXPECT_TRUE(v1 != v0);
OptionalValue<float> v0_2;
v0_2.value = 2.0f;
EXPECT_TRUE(v0 == v0_2);
EXPECT_FALSE(v0 != v0_2);
}
{
EXPECT_TRUE(v1 == 1.0f);
EXPECT_TRUE(1.0f == v1);
EXPECT_FALSE(v1 != 1.0f);
EXPECT_FALSE(1.0f != v1);
EXPECT_FALSE(v1 == 2.0f);
EXPECT_FALSE(2.0f == v1);
EXPECT_TRUE(v1 != 2.0f);
EXPECT_TRUE(2.0f != v1);
}
{
EXPECT_FALSE(v1 == std::nullopt);
EXPECT_FALSE(std::nullopt == v1);
EXPECT_TRUE(v0 == std::nullopt);
EXPECT_TRUE(std::nullopt == v0);
EXPECT_TRUE(v1 != std::nullopt);
EXPECT_TRUE(std::nullopt != v1);
EXPECT_FALSE(v0 != std::nullopt);
EXPECT_FALSE(std::nullopt != v0);
}
}
TEST(OptionalValueTest, TestImplicitConstructors) {
OptionalValue<float> v = {};
EXPECT_EQ(v, OptionalValue<float>());
v = 3.5;
EXPECT_EQ(v, OptionalValue<float>(3.5));
v = std::optional<float>(2.5);
EXPECT_EQ(v, OptionalValue<float>(2.5));
}
TEST(OptionalValueTest, TestMoves) {
auto ptr = std::make_unique<std::string>("Hello!");
OptionalValue<std::unique_ptr<std::string>> v1(std::move(ptr));
EXPECT_TRUE(v1.present);
EXPECT_EQ("Hello!", *(v1.value));
std::optional<std::unique_ptr<std::string>> v2(std::move(v1).AsOptional());
EXPECT_TRUE(v2.has_value());
EXPECT_EQ("Hello!", **v2);
}
template <typename T>
using Slot = FrameLayout::Slot<T>;
TEST(OptionalValueTest, TestFrameLayout) {
FrameLayout::Builder builder;
builder.AddSlot<double>();
builder.AddSlot<int32_t>();
auto optional_slot = builder.AddSlot<OptionalValue<float>>();
Slot<bool> presence_slot = optional_slot.GetSubslot<0>();
Slot<float> value_slot = optional_slot.GetSubslot<1>();
FrameLayout layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr frame = alloc.frame();
frame.Set(optional_slot, OptionalValue<float>{1.0f});
EXPECT_EQ(true, frame.Get(presence_slot));
EXPECT_EQ(1.0f, frame.Get(value_slot));
frame.Set(value_slot, 2.0f);
EXPECT_EQ(2.0, frame.Get(optional_slot).value);
}
TEST(OptionalValue, IsBZeroConstructible) {
EXPECT_TRUE(is_bzero_constructible<OptionalValue<float>>());
EXPECT_TRUE(is_bzero_constructible<OptionalValue<int>>());
EXPECT_FALSE(is_bzero_constructible<OptionalValue<std::string>>());
}
TEST(OptionalValue, BZeroStateIsEmptyValue) {
using T = OptionalValue<float>;
std::aligned_storage_t<sizeof(T), alignof(T)> storage;
memset(&storage, 0, sizeof(storage));
EXPECT_FALSE(std::launder(reinterpret_cast<const T*>(&storage))->present);
}
TEST(OptionalValue, StructuredBindings) {
{
OptionalValue<float> f;
auto [present, value] = f;
EXPECT_FALSE(present);
}
{
OptionalValue<float> f = 17.0;
auto [present, value] = f;
EXPECT_TRUE(present);
EXPECT_EQ(value, 17.0);
}
}
TEST(OptionalValue, ViewType) {
static_assert(std::is_same_v<view_type_t<OptionalValue<int64_t>>,
OptionalValue<int64_t>>);
static_assert(std::is_same_v<view_type_t<OptionalValue<Bytes>>,
OptionalValue<absl::string_view>>);
auto fn = [](OptionalValue<absl::string_view> v) -> char {
return (v.present && !v.value.empty()) ? v.value[0] : 'X';
};
EXPECT_EQ(fn(OptionalValue<Text>(Text("Hello"))), 'H');
EXPECT_EQ(fn(std::nullopt), 'X');
}
TEST(OptionalValue, WrapFnToAcceptOptionalArgs) {
{
auto fn = [](int a, OptionalValue<int64_t> b, int64_t c) -> int {
return a + c + (b.present ? b.value : 10);
};
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(1, 2, 3), OptionalValue<int>(6));
EXPECT_EQ(opt_fn(std::nullopt, 2, 3), OptionalValue<int>());
EXPECT_EQ(opt_fn(1, std::nullopt, 3), OptionalValue<int>(14));
EXPECT_EQ(opt_fn(1, 2, std::nullopt), OptionalValue<int>());
}
{
auto fn = [](const Bytes& v) -> const Bytes& { return v; };
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(Bytes("123")), OptionalValue<Bytes>("123"));
}
{
auto fn = [](absl::string_view v) { return v; };
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_EQ(opt_fn(MakeOptionalValue(Bytes("123"))),
MakeOptionalValue(absl::string_view("123")));
}
{
auto fn = [](int a, OptionalValue<int64_t> b,
int64_t c) -> absl::StatusOr<int> {
if (c < 0) {
return absl::InvalidArgumentError("c < 0");
} else {
return a + c + (b.present ? b.value : 10);
}
};
auto opt_fn = WrapFnToAcceptOptionalArgs(fn);
EXPECT_THAT(opt_fn(1, 2, 3), IsOkAndHolds(OptionalValue<int>(6)));
EXPECT_THAT(opt_fn(1, 2, -3),
StatusIs(absl::StatusCode::kInvalidArgument, "c < 0"));
EXPECT_THAT(opt_fn(std::nullopt, 2, -3),
IsOkAndHolds(OptionalValue<int>()));
}
}
TEST(OptionalValueReprTest, bool) {
EXPECT_EQ(Repr(OptionalValue<bool>(true)), "optional_boolean{true}");
EXPECT_EQ(Repr(OptionalValue<bool>()), "optional_boolean{NA}");
}
TEST(OptionalValueReprTest, int32_t) {
EXPECT_EQ(Repr(OptionalValue<int32_t>(1)), "optional_int32{1}");
EXPECT_EQ(Repr(OptionalValue<int32_t>()), "optional_int32{NA}");
}
TEST(OptionalValueReprTest, int64_t) {
EXPECT_EQ(Repr(OptionalValue<int64_t>(1)), "optional_int64{1}");
EXPECT_EQ(Repr(OptionalValue<int64_t>()), "optional_int64{NA}");
}
TEST(OptionalValueReprTest, uint64_t) {
EXPECT_EQ(Repr(OptionalValue<uint64_t>(1)), "optional_uint64{1}");
EXPECT_EQ(Repr(OptionalValue<uint64_t>()), "optional_uint64{NA}");
}
TEST(OptionalValueReprTest, float) {
EXPECT_EQ(Repr(OptionalValue<float>(1.5)), "optional_float32{1.5}");
EXPECT_EQ(Repr(OptionalValue<float>()), "optional_float32{NA}");
}
TEST(OptionalValueReprTest, double) {
EXPECT_EQ(Repr(OptionalValue<double>(1.5)), "optional_float64{1.5}");
EXPECT_EQ(Repr(OptionalValue<double>()), "optional_float64{NA}");
}
TEST(OptionalValueReprTest, Bytes) {
EXPECT_EQ(Repr(OptionalValue<Bytes>("abc")), "optional_bytes{b'abc'}");
EXPECT_EQ(Repr(OptionalValue<Bytes>()), "optional_bytes{NA}");
}
TEST(OptionalValueReprTest, Text) {
EXPECT_EQ(Repr(OptionalValue<Text>("abc")), "optional_text{'abc'}");
EXPECT_EQ(Repr(OptionalValue<Text>()), "optional_text{NA}");
}
TEST(OptionalValueReprTest, StreamOp) {
{
std::ostringstream oss;
oss << OptionalValue<float>(1.5);
EXPECT_EQ(oss.str(), "optional_float32{1.5}");
}
{
std::ostringstream oss;
oss << OptionalValue<float>();
EXPECT_EQ(oss.str(), "optional_float32{NA}");
}
}
}
}
} |
297 | #ifndef TENSORFLOW_TOOLS_BENCHMARK_BENCHMARK_MODEL_H_
#define TENSORFLOW_TOOLS_BENCHMARK_BENCHMARK_MODEL_H_
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/stat_summarizer.h"
namespace tensorflow {
namespace benchmark_model {
struct InputLayerInfo {
string name;
DataType data_type;
TensorShape shape;
std::vector<float> initialization_values;
};
Status InitializeSession(int num_threads, const string& graph,
std::unique_ptr<Session>* session,
std::unique_ptr<GraphDef>* graph_def);
Status RunBenchmark(const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* inference_time_us);
Status TimeMultipleRuns(double sleep_seconds, int num_runs, double max_time_s,
const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* total_time_us,
int64_t* actual_num_runs);
int Main(int argc, char** argv);
}
}
#endif
#include "tensorflow/tools/benchmark/benchmark_model.h"
#include <cstdlib>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/numbers.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/core/util/reporter.h"
#include "tensorflow/core/util/stat_summarizer.h"
#include "tensorflow/core/util/stat_summarizer_options.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace benchmark_model {
namespace {
Status InitializeVariables(Session* session,
const std::vector<string>& init_ops) {
LOG(INFO) << "Initializing graph variables";
for (const string& init_op : init_ops) {
TF_RETURN_IF_ERROR(session->Run({}, {}, {init_op}, nullptr));
}
return absl::OkStatus();
}
template <class T>
void InitializeTensor(const std::vector<float>& initialization_values,
Tensor* input_tensor) {
auto type_tensor = input_tensor->flat<T>();
type_tensor = type_tensor.constant(0);
if (!initialization_values.empty()) {
for (int i = 0; i < initialization_values.size(); ++i) {
type_tensor(i) = static_cast<T>(initialization_values[i]);
}
}
}
void CreateTensorsFromInputInfo(
const std::vector<InputLayerInfo>& inputs,
std::vector<std::pair<string, tensorflow::Tensor> >* input_tensors) {
for (const InputLayerInfo& input : inputs) {
Tensor input_tensor(input.data_type, input.shape);
switch (input.data_type) {
case DT_INT32: {
InitializeTensor<int32>(input.initialization_values, &input_tensor);
break;
}
case DT_INT64: {
InitializeTensor<int64>(input.initialization_values, &input_tensor);
break;
}
case DT_FLOAT: {
InitializeTensor<float>(input.initialization_values, &input_tensor);
break;
}
case DT_QUINT8: {
InitializeTensor<quint8>(input.initialization_values, &input_tensor);
break;
}
case DT_UINT8: {
InitializeTensor<uint8>(input.initialization_values, &input_tensor);
break;
}
case DT_BOOL: {
InitializeTensor<bool>(input.initialization_values, &input_tensor);
break;
}
case DT_STRING: {
if (!input.initialization_values.empty()) {
LOG(FATAL) << "Initialization values are not supported for strings";
}
auto type_tensor = input_tensor.flat<tstring>();
type_tensor = type_tensor.constant("");
break;
}
default:
LOG(FATAL) << "Unsupported input type: "
<< DataTypeString(input.data_type);
}
input_tensors->push_back({input.name, input_tensor});
}
}
Status GetOutputShapes(const std::vector<InputLayerInfo>& inputs,
const std::set<string>& wanted_shapes, Session* session,
std::unordered_map<string, TensorShape>* node_shapes) {
std::vector<std::pair<string, tensorflow::Tensor> > input_tensors;
CreateTensorsFromInputInfo(inputs, &input_tensors);
std::vector<tensorflow::Tensor> output_tensors;
std::vector<string> output_tensor_names;
for (const string& wanted_shape : wanted_shapes) {
bool is_input = false;
for (const std::pair<string, tensorflow::Tensor>& input_tensor :
input_tensors) {
if (input_tensor.first == wanted_shape) {
(*node_shapes)[wanted_shape] = input_tensor.second.shape();
is_input = true;
break;
}
}
if (!is_input) {
output_tensor_names.push_back(wanted_shape);
}
}
TF_RETURN_IF_ERROR(
session->Run(input_tensors, output_tensor_names, {}, &output_tensors));
CHECK_EQ(output_tensors.size(), output_tensor_names.size());
for (int i = 0; i < output_tensor_names.size(); ++i) {
const string& wanted_shape_name = output_tensor_names[i];
const TensorShape& found_shape = output_tensors[i].shape();
(*node_shapes)[wanted_shape_name] = found_shape;
}
return absl::OkStatus();
}
Status CalculateFlops(const GraphDef& graph,
const std::vector<InputLayerInfo>& inputs,
Session* session, int64_t* total_flops,
std::unordered_map<string, int64_t>* flops_by_op) {
std::unordered_set<string> floppable_ops = {
"Conv2D", "MatMul", "QuantizedConv2D", "QuantizedMatMul",
"DepthwiseConv2dNative"};
std::set<string> wanted_shapes;
for (const NodeDef& node : graph.node()) {
if (floppable_ops.count(node.op())) {
for (const string& input : node.input()) {
wanted_shapes.insert(input);
}
wanted_shapes.insert(node.name());
}
}
std::unordered_map<string, TensorShape> found_shapes;
TF_RETURN_IF_ERROR(
GetOutputShapes(inputs, wanted_shapes, session, &found_shapes));
*total_flops = 0;
for (const NodeDef& node : graph.node()) {
if (floppable_ops.count(node.op())) {
int64_t current_flops = 0;
if ((node.op() == "Conv2D") || (node.op() == "QuantizedConv2D")) {
const TensorShape& filter_shape = found_shapes[node.input(1)];
const TensorShape& output_shape = found_shapes[node.name()];
int64_t filter_height = filter_shape.dim_size(0);
int64_t filter_width = filter_shape.dim_size(1);
int64_t filter_in_depth = filter_shape.dim_size(2);
int64_t output_count = output_shape.num_elements();
current_flops =
output_count * filter_in_depth * filter_height * filter_width * 2;
} else if ((node.op() == "MatMul") || (node.op() == "QuantizedMatMul")) {
const bool transpose_a = node.attr().at("transpose_a").b();
const TensorShape& a_shape = found_shapes[node.input(0)];
const TensorShape& output_shape = found_shapes[node.name()];
int64_t k;
if (transpose_a) {
k = a_shape.dim_size(0);
} else {
k = a_shape.dim_size(1);
}
int64_t output_count = output_shape.num_elements();
current_flops = k * output_count * 2;
} else if (node.op() == "DepthwiseConv2dNative") {
const TensorShape& filter_shape = found_shapes[node.input(1)];
const TensorShape& output_shape = found_shapes[node.name()];
int64_t filter_height = filter_shape.dim_size(0);
int64_t filter_width = filter_shape.dim_size(1);
int64_t output_count = output_shape.num_elements();
current_flops = output_count * filter_height * filter_width * 2;
}
(*flops_by_op)[node.op()] += current_flops;
*total_flops += current_flops;
}
}
return absl::OkStatus();
}
void RecordBenchmarkEntry(const string& output_prefix,
const string& benchmark_name, const string& postfix,
int num_runs, double total_time_s,
double throughput = -1.0) {
std::stringstream stream;
stream << benchmark_name;
if (!postfix.empty()) {
stream << "_" << postfix;
}
TestReporter node_reporter(output_prefix, stream.str());
TF_QCHECK_OK(node_reporter.Initialize());
TF_QCHECK_OK(
node_reporter.Benchmark(num_runs, -1.0, total_time_s, throughput));
TF_QCHECK_OK(node_reporter.Close());
}
void SleepSeconds(double sleep_seconds) {
if (sleep_seconds <= 0.0) {
return;
}
#ifdef PLATFORM_WINDOWS
Env::Default()->SleepForMicroseconds(sleep_seconds * 1000 * 1000);
#else
timespec req;
req.tv_sec = static_cast<time_t>(sleep_seconds);
req.tv_nsec = (sleep_seconds - req.tv_sec) * 1000000000;
nanosleep(&req, nullptr);
#endif
}
}
Status InitializeSession(int num_threads, const string& graph,
std::unique_ptr<Session>* session,
std::unique_ptr<GraphDef>* graph_def) {
LOG(INFO) << "Loading TensorFlow.";
tensorflow::SessionOptions options;
tensorflow::ConfigProto& config = options.config;
if (num_threads > 0) {
config.set_intra_op_parallelism_threads(num_threads);
config.set_inter_op_parallelism_threads(num_threads);
}
LOG(INFO) << "Got config, " << config.device_count_size() << " devices";
session->reset(tensorflow::NewSession(options));
graph_def->reset(new GraphDef());
tensorflow::GraphDef tensorflow_graph;
Status s = ReadBinaryProto(Env::Default(), graph, graph_def->get());
if (!s.ok()) {
s = ReadTextProto(Env::Default(), graph, graph_def->get());
}
if (!s.ok()) {
LOG(ERROR) << "Could not create TensorFlow Graph: " << s;
return s;
}
s = (*session)->Create(*(graph_def->get()));
if (!s.ok()) {
LOG(ERROR) << "Could not create TensorFlow Session: " << s;
return s;
}
return absl::OkStatus();
}
Status RunBenchmark(const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* inference_time_us) {
std::vector<std::pair<string, tensorflow::Tensor> > input_tensors;
CreateTensorsFromInputInfo(inputs, &input_tensors);
std::vector<tensorflow::Tensor> output_tensors;
tensorflow::Status s;
RunOptions run_options;
if (stats != nullptr) {
run_options.set_trace_level(RunOptions::FULL_TRACE);
}
RunMetadata run_metadata;
const int64_t start_time = Env::Default()->NowMicros();
s = session->Run(run_options, input_tensors, outputs, targets,
&output_tensors, &run_metadata);
const int64_t end_time = Env::Default()->NowMicros();
*inference_time_us = end_time - start_time;
if (!s.ok()) {
LOG(ERROR) << "Error during inference: " << s;
return s;
}
if (stats != nullptr) {
assert(run_metadata.has_step_stats());
const StepStats& step_stats = run_metadata.step_stats();
stats->ProcessStepStats(step_stats);
}
return s;
}
Status TimeMultipleRuns(double sleep_seconds, int num_runs, double max_time_s,
const std::vector<InputLayerInfo>& inputs,
const std::vector<string>& outputs,
const std::vector<string>& targets, Session* session,
StatSummarizer* stats, int64_t* total_time_us,
int64_t* actual_num_runs) {
*total_time_us = 0;
LOG(INFO) << "Running benchmark for max " << num_runs << " iterations, max "
<< max_time_s << " seconds "
<< (stats != nullptr ? "with" : "without")
<< " detailed stat logging, with " << sleep_seconds
<< "s sleep between inferences";
Stat<int64_t> stat;
const bool until_max_time = num_runs <= 0;
for (int i = 0; until_max_time || i < num_runs; ++i) {
int64_t time;
Status run_status =
RunBenchmark(inputs, outputs, targets, session, stats, &time);
stat.UpdateStat(time);
(*total_time_us) += time;
++(*actual_num_runs);
if (max_time_s > 0.0 && (*total_time_us / 1000000.0) > max_time_s) {
break;
}
if (!run_status.ok()) {
LOG(INFO) << "Failed on run " << i;
return run_status;
}
if (sleep_seconds > 0.0) {
SleepSeconds(sleep_seconds);
}
}
std::stringstream stream;
stat.OutputToStream(&stream);
LOG(INFO) << stream.str() << std::endl;
return absl::OkStatus();
}
int Main(int argc, char** argv) {
string graph = "/data/local/tmp/tensorflow_inception_graph.pb";
string init_ops_string = "";
string input_layer_string = "input:0";
string input_layer_shape_string = "1,224,224,3";
string input_layer_type_string = "float";
string input_layer_values_string = "";
string output_layer_string = "output:0";
string target_layer_string = "";
int max_num_runs = 1000;
string max_time = "10.0";
string inference_delay = "-1.0";
string inter_benchmark_delay = "-1.0";
int num_threads = -1;
string benchmark_name = "";
string output_prefix = "";
bool show_sizes = false;
bool show_run_order = true;
int run_order_limit = 0;
bool show_time = true;
int time_limit = 10;
bool show_memory = true;
int memory_limit = 10;
bool show_type = true;
bool show_summary = true;
bool show_flops = false;
int warmup_runs = 1;
std::vector<Flag> flag_list = {
Flag("graph", &graph, "graph file name"),
Flag("init_ops", &init_ops_string, "init ops"),
Flag("input_layer", &input_layer_string, "input layer names"),
Flag("input_layer_shape", &input_layer_shape_string, "input layer shape"),
Flag("input_layer_type", &input_layer_type_string, "input layer type"),
Flag("input_layer_values", &input_layer_values_string,
"values to initialize the inputs with"),
Flag("output_layer", &output_layer_string, "output layer name"),
Flag("target_layer", &target_layer_string, "target layer name"),
Flag("max_num_runs", &max_num_runs, "number of runs max"),
Flag("max_time", &max_time, "length to run max"),
Flag("inference_delay", &inference_delay,
"delay between runs in seconds"),
Flag("inter_benchmark_delay", &inter_benchmark_delay,
"delay between benchmarks in seconds"),
Flag("num_threads", &num_threads, "number of threads"),
Flag("benchmark_name", &benchmark_name, "benchmark name"),
Flag("output_prefix", &output_prefix, "benchmark output prefix"),
Flag("show_sizes", &show_sizes, "whether to show sizes"),
Flag("show_run_order", &show_run_order,
"whether to list stats by run order"),
Flag("run_order_limit", &run_order_limit,
"how many items to show by run order"),
Flag("show_time", &show_time, "whether to list stats by time taken"),
Flag("time_limit", &time_limit, "how many items to show by time taken"),
Flag("show_memory", &show_memory, "whether to list stats by memory used"),
Flag("memory_limit", &memory_limit,
"how many items to show by memory used"),
Flag("show_type", &show_type, "whether to list stats by op type"),
Flag("show_summary", &show_summary,
"whether to show a summary of the stats"),
Flag("show_flops", &show_flops, "whether to estimate the model's FLOPs"),
Flag("warmup_runs", &warmup_runs, "how many runs to initialize model"),
};
string usage = Flags::Usage(argv[0], flag_list);
const bool parse_result = Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
LOG(ERROR) << usage;
return -1;
}
std::vector<string> init_ops = str_util::Split(init_ops_string, ',');
std::vector<string> input_layers = str_util::Split(input_layer_string, ',');
std::vector<string> input_layer_shapes =
str_util::Split(input_layer_shape_string, ':');
std::vector<string> input_layer_types =
str_util::Split(input_layer_type_string, ',');
std::vector<string> input_layer_values =
str_util::Split(input_layer_values_string, ':');
std::vector<string> output_layers = str_util::Split(output_layer_string, ',');
std::vector<string> target_layers = str_util::Split(target_layer_string, ',');
if ((input_layers.size() != input_layer_shapes.size()) ||
(input_layers.size() != input_layer_types.size())) {
LOG(ERROR) << "There must be the same number of items in --input_layer,"
<< " --input_layer_shape, and --input_layer_type, for example"
<< " --input_layer=input1,input2 --input_layer_type=float,float "
<< " --input_layer_shape=1,224,224,4:1,20";
LOG(ERROR) << "--input_layer=" << input_layer_string << " ("
<< input_layers.size() << " items)";
LOG(ERROR) << "--input_layer_type=" << input_layer_type_string << " ("
<< input_layer_types.size() << " items)";
LOG(ERROR) << "--input_layer_shape=" << input_layer_shape_string << " ("
<< input_layer_shapes.size() << " items)";
return -1;
}
const size_t inputs_count = input_layers.size();
::tensorflow::port::InitMain(argv[0], &argc, &argv);
if (argc > 1) {
LOG(ERROR) << "Unknown argument " << argv[1] << "\n" << usage;
return -1;
}
LOG(INFO) << "Graph: [" << graph << "]";
LOG(INFO) << "Init ops:" << init_ops_string;
LOG(INFO) << "Input layers: [" << input_layer_string << "]";
LOG(INFO) << "Input shapes: [" << input_layer_shape_string << "]";
LOG(INFO) << "Input types: [" << input_layer_type_string << "]";
LOG(INFO) << "Output layers: [" << output_layer_string << "]";
LOG(INFO) << "Target layers: [" << target_layer_string << "]";
LOG(INFO) << "Num runs: [" << max_num_runs << "]";
LOG(INFO) << "Inter-inference delay (seconds): [" << inference_delay << "]";
LOG(INFO) << "Inter-benchmark delay (seconds): [" << inter_benchmark_delay
<< "]";
LOG(INFO) << "Num threads: [" << num_threads << "]";
LOG(INFO) << "Benchmark name: [" << benchmark_name << "]";
LOG(INFO) << "Output prefix: [" << output_prefix << "]";
LOG(INFO) << "Show sizes: [" << show_sizes << "]";
LOG(INFO) << "Warmup runs: [" << warmup_runs << "]";
std::unique_ptr<Session> session;
std::unique_ptr<StatSummarizer> stats;
std::unique_ptr<GraphDef> graph_def;
int64_t initialization_start_us = Env::Default()->NowMicros();
Status initialize_status =
InitializeSession(num_threads, graph, &session, &graph_def);
int64_t initialization_end_us = Env::Default()->NowMicros();
double initialization_time_s =
(initialization_end_us - initialization_start_us) / 1000000.0;
LOG(INFO) << "Initialized session in " << initialization_time_s << "s";
if (!initialize_status.ok()) {
return -1;
}
if (!init_ops.empty()) {
Status initialize_variables_status =
InitializeVariables(session.get(), init_ops);
if (!initialize_variables_status.ok()) {
LOG(ERROR) << "Graph variables initialization failed with "
<< initialize_variables_status;
return -1;
}
}
StatSummarizerOptions stats_options;
stats_options.show_run_order = show_run_order;
stats_options.run_order_limit = run_order_limit;
stats_options.show_time = show_time;
stats_options.time_limit = time_limit;
stats_options.show_memory = show_memory;
stats_options.memory_limit = memory_limit;
stats_options.show_type = show_type;
stats_options.show_summary = show_summary;
stats.reset(new tensorflow::StatSummarizer(stats_options));
const double inter_inference_sleep_seconds =
std::strtod(inference_delay.c_str(), nullptr);
const double inter_benchmark_sleep_seconds =
std::strtod(inter_benchmark_delay.c_str(), nullptr);
const double max_benchmark_time_seconds =
std::strtod(max_time.c_str(), nullptr);
std::vector<InputLayerInfo> inputs;
for (int n = 0; n < inputs_count; ++n) {
InputLayerInfo input;
CHECK(DataTypeFromString(input_layer_types[n], &input.data_type))
<< input_layer_types[n] << " was an invalid type";
std::vector<string> split_layer_shapes =
str_util::Split(input_layer_shapes[n], ',');
for (const string& layer_shape : split_layer_shapes) {
int32_t tmp;
CHECK(strings::safe_strto32(layer_shape, &tmp))
<< "Incorrect size string specified: " << input_layer_shapes[n];
if (tmp == -1) {
LOG(ERROR) << "Any unknown sizes in the shapes (-1's) must be replaced"
<< " with the size you want to benchmark with.";
return -1;
} else {
input.shape.AddDim(tmp);
}
}
input.name = input_layers[n];
if (n < input_layer_values.size()) {
std::vector<string> string_tokens =
str_util::Split(input_layer_values[n], ',');
input.initialization_values.clear();
input.initialization_values.reserve(string_tokens.size());
for (const string& str_val : string_tokens) {
float val;
CHECK(strings::safe_strtof(str_val, &val))
<< "Incorrect initialization values string specified: "
<< input_layer_values[n];
input.initialization_values.push_back(val);
}
}
inputs.push_back(input);
}
int64_t warmup_time_us = 0;
int64_t num_warmup_runs = 0;
if (warmup_runs > 0) {
Status warmup_time_status =
TimeMultipleRuns(inter_inference_sleep_seconds, warmup_runs, -1.0,
inputs, output_layers, target_layers, session.get(),
nullptr, &warmup_time_us, &num_warmup_runs);
if (!warmup_time_status.ok()) {
LOG(ERROR) << "Timing failed with " << warmup_time_status;
return -1;
}
}
SleepSeconds(inter_benchmark_sleep_seconds);
int64_t no_stat_time_us = 0;
int64_t no_stat_num_runs = 0;
Status no_stat_time_status = TimeMultipleRuns(
inter_inference_sleep_seconds, max_num_runs, max_benchmark_time_seconds,
inputs, output_layers, target_layers, session.get(), nullptr,
&no_stat_time_us, &no_stat_num_runs);
const double no_stat_wall_time = no_stat_time_us / 1000000.0;
if (!no_stat_time_status.ok()) {
LOG(ERROR) << "Timing failed with " << no_stat_time_status;
return -1;
}
SleepSeconds(inter_benchmark_sleep_seconds);
int64_t stat_time_us = 0;
int64_t stat_num_runs = 0;
Status stat_time_status = TimeMultipleRuns(
inter_inference_sleep_seconds, max_num_runs, max_benchmark_time_seconds,
inputs, output_layers, target_layers, session.get(), stats.get(),
&stat_time_us, &stat_num_runs);
if (!stat_time_status.ok()) {
LOG(ERROR) << "Timing failed with " << stat_time_status;
return -1;
}
LOG(INFO) << "Average inference timings in us: "
<< "Warmup: "
<< (warmup_runs > 0 ? warmup_time_us / warmup_runs : 0) << ", "
<< "no stats: " << no_stat_time_us / no_stat_num_runs << ", "
<< "with stats: " << stat_time_us / stat_num_runs;
stats->PrintStepStats();
if (show_sizes) {
stats->PrintOutputs();
}
if (show_flops) {
int64_t total_flops;
std::unordered_map<string, int64_t> flops_by_op;
Status flop_status = CalculateFlops(*graph_def, inputs, session.get(),
&total_flops, &flops_by_op);
if (!flop_status.ok()) {
LOG(ERROR) << "FLOPs calculation failed with " << flop_status;
return -1;
}
string pretty_flops;
if (total_flops < 1000) {
pretty_flops = strings::StrCat(total_flops, " FLOPs");
} else if (total_flops < (1000 * 1000)) {
const float rounded_flops = (total_flops / 1000.0f);
pretty_flops = strings::StrCat(rounded_flops, "k FLOPs");
} else if (total_flops < (1000 * 1000 * 1000)) {
const float rounded_flops = round(total_flops / 1000.0f) / 1000.0f;
pretty_flops = strings::StrCat(rounded_flops, " million FLOPs");
} else {
const float rounded_flops =
round(total_flops / (1000.0f * 1000.0f)) / 1000.0f;
pretty_flops = strings::StrCat(rounded_flops, " billion FLOPs");
}
LOG(INFO) << "FLOPs estimate: " << strings::HumanReadableNum(total_flops);
const double mean_run_time = no_stat_wall_time / no_stat_num_runs;
LOG(INFO) << "FLOPs/second: "
<< strings::HumanReadableNum(
static_cast<int64_t>(total_flops / mean_run_time));
}
if (!benchmark_name.empty() && !output_prefix.empty()) {
int64_t total_size = inputs[0].shape.num_elements();
const double throughput =
DataTypeSize(inputs[0].data_type) * total_size * no_stat_num_runs /
static_cast<double>(no_stat_wall_time) / (1024 * 1024);
RecordBenchmarkEntry(output_prefix, benchmark_name, "", no_stat_num_runs,
no_stat_wall_time, throughput);
RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-init", 1,
initialization_time_s);
RecordBenchmarkEntry(output_prefix, benchmark_name, "meta-first-inference",
warmup_runs, warmup_time_us / 1000000.0);
RecordBenchmarkEntry(
output_prefix, benchmark_name, "meta-init-plus-first-inference", 1,
initialization_time_s + (warmup_time_us / 1000000.0) / warmup_runs);
std::map<std::string, int64_t> node_type_map_count;
std::map<std::string, int64_t> node_type_map_time;
std::map<std::string, int64_t> node_type_map_memory;
std::map<std::string, int64_t> node_type_map_times_called;
int64_t accumulated_us;
stats->ComputeStatsByType(&node_type_map_count, &node_type_map_time,
&node_type_map_memory,
&node_type_map_times_called, &accumulated_us);
for (const auto& time : node_type_map_time) {
LOG(INFO) << "Outputting: [" << time.first << "]";
RecordBenchmarkEntry(output_prefix, benchmark_name, time.first,
stat_num_runs,
(time.second * stat_num_runs) / 1000000.0f);
}
}
return 0;
}
}
} | #include "tensorflow/tools/benchmark/benchmark_model.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/stat_summarizer.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
void CreateTestGraph(const ::tensorflow::Scope& root,
benchmark_model::InputLayerInfo* input,
string* output_name, GraphDef* graph_def) {
const int input_width = 400;
const int input_height = 10;
input->shape = TensorShape({input_width, input_height});
input->data_type = DT_FLOAT;
const TensorShape constant_shape({input_height, input_width});
Tensor constant_tensor(DT_FLOAT, constant_shape);
test::FillFn<float>(&constant_tensor, [](int) -> float { return 3.0; });
auto placeholder =
ops::Placeholder(root, DT_FLOAT, ops::Placeholder::Shape(input->shape));
input->name = placeholder.node()->name();
auto m = ops::MatMul(root, placeholder, constant_tensor);
*output_name = m.node()->name();
TF_ASSERT_OK(root.ToGraphDef(graph_def));
}
TEST(BenchmarkModelTest, InitializeAndRun) {
const string dir = testing::TmpDir();
const string filename_pb = io::JoinPath(dir, "graphdef.pb");
auto root = Scope::NewRootScope().ExitOnError();
benchmark_model::InputLayerInfo input;
string output_name;
GraphDef graph_def;
CreateTestGraph(root, &input, &output_name, &graph_def);
string graph_def_serialized;
graph_def.SerializeToString(&graph_def_serialized);
TF_ASSERT_OK(
WriteStringToFile(Env::Default(), filename_pb, graph_def_serialized));
std::unique_ptr<Session> session;
std::unique_ptr<GraphDef> loaded_graph_def;
TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_pb, &session,
&loaded_graph_def));
std::unique_ptr<StatSummarizer> stats;
stats.reset(new tensorflow::StatSummarizer(*(loaded_graph_def.get())));
int64_t time;
int64_t num_runs = 0;
TF_ASSERT_OK(benchmark_model::TimeMultipleRuns(
0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(),
&time, &num_runs));
ASSERT_EQ(num_runs, 10);
}
TEST(BenchmarkModeTest, TextProto) {
const string dir = testing::TmpDir();
const string filename_txt = io::JoinPath(dir, "graphdef.pb.txt");
auto root = Scope::NewRootScope().ExitOnError();
benchmark_model::InputLayerInfo input;
string output_name;
GraphDef graph_def;
CreateTestGraph(root, &input, &output_name, &graph_def);
TF_ASSERT_OK(WriteTextProto(Env::Default(), filename_txt, graph_def));
std::unique_ptr<Session> session;
std::unique_ptr<GraphDef> loaded_graph_def;
TF_ASSERT_OK(benchmark_model::InitializeSession(1, filename_txt, &session,
&loaded_graph_def));
std::unique_ptr<StatSummarizer> stats;
stats.reset(new tensorflow::StatSummarizer(*(loaded_graph_def.get())));
int64_t time;
int64_t num_runs = 0;
TF_ASSERT_OK(benchmark_model::TimeMultipleRuns(
0.0, 10, 0.0, {input}, {output_name}, {}, session.get(), stats.get(),
&time, &num_runs));
ASSERT_EQ(num_runs, 10);
}
}
} |
298 | #ifndef TENSORFLOW_LITE_SCHEMA_BUILTIN_OPS_HEADER_GENERATOR_H_
#define TENSORFLOW_LITE_SCHEMA_BUILTIN_OPS_HEADER_GENERATOR_H_
#include <iostream>
#include <string>
namespace tflite {
namespace builtin_ops_header {
bool IsValidInputEnumName(const std::string& name);
std::string ConstantizeVariableName(const std::string& name);
bool GenerateHeader(std::ostream& os);
}
}
#endif
#include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <string>
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace builtin_ops_header {
namespace {
const char* kFileHeader =
R"(
#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_
#define TENSORFLOW_LITE_BUILTIN_OPS_H_
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
)";
const char* kFileFooter =
R"(} TfLiteBuiltinOperator;
#ifdef __cplusplus
}
#endif
#endif
)";
}
bool IsValidInputEnumName(const std::string& name) {
const char* begin = name.c_str();
const char* ch = begin;
while (*ch != '\0') {
if (ch != begin) {
if (*ch != '_') {
return false;
}
++ch;
}
bool empty = true;
while (isupper(*ch) || isdigit(*ch)) {
empty = false;
++ch;
}
if (empty) {
return false;
}
}
return true;
}
std::string ConstantizeVariableName(const std::string& name) {
std::string result = "kTfLiteBuiltin";
bool uppercase = true;
for (char input_char : name) {
if (input_char == '_') {
uppercase = true;
} else if (uppercase) {
result += toupper(input_char);
uppercase = false;
} else {
result += tolower(input_char);
}
}
return result;
}
bool GenerateHeader(std::ostream& os) {
auto enum_names = tflite::EnumNamesBuiltinOperator();
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
if (!IsValidInputEnumName(enum_name)) {
std::cerr << "Invalid input enum name: " << enum_name << std::endl;
return false;
}
}
os << kFileHeader;
for (auto enum_value : EnumValuesBuiltinOperator()) {
auto enum_name = enum_names[enum_value];
os << " ";
os << ConstantizeVariableName(enum_name);
os << " = ";
os << enum_value;
os << ",\n";
}
os << kFileFooter;
return true;
}
}
} | #include "tensorflow/lite/schema/builtin_ops_header/generator.h"
#include <fstream>
#include <gtest/gtest.h>
namespace {
using tflite::builtin_ops_header::ConstantizeVariableName;
using tflite::builtin_ops_header::IsValidInputEnumName;
TEST(TestIsValidInputEnumName, TestWithValidInputNames) {
EXPECT_TRUE(IsValidInputEnumName("ADD"));
EXPECT_TRUE(IsValidInputEnumName("CONV_2D"));
EXPECT_TRUE(IsValidInputEnumName("L2_POOL_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLeadingUnderscore) {
EXPECT_FALSE(IsValidInputEnumName("_ADD"));
EXPECT_FALSE(IsValidInputEnumName("_CONV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithLowerCase) {
EXPECT_FALSE(IsValidInputEnumName("_AdD"));
EXPECT_FALSE(IsValidInputEnumName("_COnV_2D"));
}
TEST(TestIsValidInputEnumName, TestWithOtherCharacters) {
EXPECT_FALSE(IsValidInputEnumName("_AdD!2D"));
EXPECT_FALSE(IsValidInputEnumName("_COnV?2D"));
}
TEST(TestIsValidInputEnumName, TestWithDoubleUnderscores) {
EXPECT_FALSE(IsValidInputEnumName("ADD__2D"));
EXPECT_FALSE(IsValidInputEnumName("CONV__2D"));
}
TEST(TestConstantizeVariableName, TestWithValidInputNames) {
EXPECT_EQ(ConstantizeVariableName("ADD"), "kTfLiteBuiltinAdd");
EXPECT_EQ(ConstantizeVariableName("CONV_2D"), "kTfLiteBuiltinConv2d");
EXPECT_EQ(ConstantizeVariableName("L2_POOL_2D"), "kTfLiteBuiltinL2Pool2d");
}
} |
299 | #ifndef QUICHE_HTTP2_DECODER_PAYLOAD_DECODERS_UNKNOWN_PAYLOAD_DECODER_H_
#define QUICHE_HTTP2_DECODER_PAYLOAD_DECODERS_UNKNOWN_PAYLOAD_DECODER_H_
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/decoder/frame_decoder_state.h"
#include "quiche/common/platform/api/quiche_export.h"
namespace http2 {
class QUICHE_EXPORT UnknownPayloadDecoder {
public:
DecodeStatus StartDecodingPayload(FrameDecoderState* state, DecodeBuffer* db);
DecodeStatus ResumeDecodingPayload(FrameDecoderState* state,
DecodeBuffer* db);
};
}
#endif
#include "quiche/http2/decoder/payload_decoders/unknown_payload_decoder.h"
#include <stddef.h>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus UnknownPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DVLOG(2) << "UnknownPayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK(!IsSupportedHttp2FrameType(frame_header.type)) << frame_header;
QUICHE_DCHECK_LE(db->Remaining(), frame_header.payload_length);
state->InitializeRemainders();
state->listener()->OnUnknownStart(frame_header);
return ResumeDecodingPayload(state, db);
}
DecodeStatus UnknownPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "UnknownPayloadDecoder::ResumeDecodingPayload "
<< "remaining_payload=" << state->remaining_payload()
<< "; db->Remaining=" << db->Remaining();
QUICHE_DCHECK(!IsSupportedHttp2FrameType(state->frame_header().type))
<< state->frame_header();
QUICHE_DCHECK_LE(state->remaining_payload(),
state->frame_header().payload_length);
QUICHE_DCHECK_LE(db->Remaining(), state->remaining_payload());
size_t avail = db->Remaining();
if (avail > 0) {
state->listener()->OnUnknownPayload(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() == 0) {
state->listener()->OnUnknownEnd();
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
}
} | #include "quiche/http2/decoder/payload_decoders/unknown_payload_decoder.h"
#include <stddef.h>
#include <string>
#include <type_traits>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
Http2FrameType g_unknown_frame_type;
}
class UnknownPayloadDecoderPeer {
public:
static Http2FrameType FrameType() { return g_unknown_frame_type; }
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnUnknownStart(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnUnknownStart: " << header;
StartFrame(header)->OnUnknownStart(header);
}
void OnUnknownPayload(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnUnknownPayload: len=" << len;
CurrentFrame()->OnUnknownPayload(data, len);
}
void OnUnknownEnd() override {
QUICHE_VLOG(1) << "OnUnknownEnd";
EndFrame()->OnUnknownEnd();
}
};
constexpr bool SupportedFrameType = false;
class UnknownPayloadDecoderTest
: public AbstractPayloadDecoderTest<UnknownPayloadDecoder,
UnknownPayloadDecoderPeer, Listener,
SupportedFrameType>,
public ::testing::WithParamInterface<uint32_t> {
protected:
UnknownPayloadDecoderTest() : length_(GetParam()) {
QUICHE_VLOG(1) << "################ length_=" << length_
<< " ################";
do {
g_unknown_frame_type = static_cast<Http2FrameType>(Random().Rand8());
} while (IsSupportedHttp2FrameType(g_unknown_frame_type));
}
const uint32_t length_;
};
INSTANTIATE_TEST_SUITE_P(VariousLengths, UnknownPayloadDecoderTest,
::testing::Values(0, 1, 2, 3, 255, 256));
TEST_P(UnknownPayloadDecoderTest, ValidLength) {
std::string unknown_payload = Random().RandString(length_);
Http2FrameHeader frame_header(length_, g_unknown_frame_type, Random().Rand8(),
RandStreamId());
set_frame_header(frame_header);
FrameParts expected(frame_header, unknown_payload);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(unknown_payload, expected));
}
}
}
} |