Unnamed: 0
int64 0
409
| Code
stringlengths 131
27.3k
| Unit Test
stringlengths 89
30.5k
|
---|---|---|
100 | #ifndef TENSORFLOW_TSL_PROFILER_CONVERT_TRACE_EVENTS_TO_JSON_H_
#define TENSORFLOW_TSL_PROFILER_CONVERT_TRACE_EVENTS_TO_JSON_H_
#include <string>
#include "tsl/platform/types.h"
#include "tsl/profiler/convert/trace_container.h"
namespace tsl {
namespace profiler {
std::string TraceContainerToJson(const TraceContainer& container);
}
}
#endif
#include "tsl/profiler/convert/trace_events_to_json.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/types.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
#include "tsl/profiler/utils/format_utils.h"
#include "tsl/profiler/utils/math_utils.h"
namespace tsl {
namespace profiler {
namespace {
inline std::string PicosToMicrosString(uint64 ps) {
return MaxPrecision(PicoToMicro(ps));
}
inline std::string JsonString(const std::string& s) {
return Json::valueToQuotedString(s.c_str());
}
template <typename Map>
std::vector<const typename Map::value_type*> SortByKey(const Map& m) {
std::vector<const typename Map::value_type*> pairs;
pairs.reserve(m.size());
for (const auto& pair : m) {
pairs.push_back(&pair);
}
absl::c_sort(pairs, [](const typename Map::value_type* a,
const typename Map::value_type* b) {
return a->first < b->first;
});
return pairs;
}
inline void AddDeviceMetadata(uint32 device_id, const Device& device,
std::string* json) {
if (!device.name().empty()) {
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id,
R"(,"name":"process_name","args":{"name":)",
JsonString(device.name()), "}},");
}
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id,
R"(,"name":"process_sort_index","args":{"sort_index":)",
device_id, "}},");
}
inline void AddResourceMetadata(uint32 device_id, uint32 resource_id,
const Resource& resource, std::string* json) {
if (!resource.name().empty()) {
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_name","args":{"name":)",
JsonString(resource.name()), "}},");
}
uint32 sort_index =
resource.sort_index() ? resource.sort_index() : resource_id;
absl::StrAppend(json, R"({"ph":"M","pid":)", device_id, R"(,"tid":)",
resource_id, R"(,"name":"thread_sort_index")",
R"(,"args":{"sort_index":)", sort_index, "}},");
}
inline void AddTraceEvent(const TraceEvent& event, string* json) {
auto duration_ps = std::max(event.duration_ps(), protobuf_uint64{1});
absl::StrAppend(json, R"({"ph":"X","pid":)", event.device_id(), R"(,"tid":)",
event.resource_id(), R"(,"ts":)",
PicosToMicrosString(event.timestamp_ps()), R"(,"dur":)",
PicosToMicrosString(duration_ps), R"(,"name":)",
JsonString(event.name()));
if (!event.args().empty()) {
absl::StrAppend(json, R"(,"args":{)");
for (const auto* arg : SortByKey(event.args())) {
absl::StrAppend(json, JsonString(arg->first), ":",
JsonString(arg->second), ",");
}
json->back() = '}';
}
absl::StrAppend(json, "},");
}
}
std::string TraceContainerToJson(const TraceContainer& container) {
std::string json =
R"({"displayTimeUnit":"ns","metadata":{"highres-ticks":true},)"
R"("traceEvents":[)";
for (const auto* id_and_device : SortByKey(container.trace().devices())) {
uint32 device_id = id_and_device->first;
const Device& device = id_and_device->second;
AddDeviceMetadata(device_id, device, &json);
for (const auto* id_and_resource : SortByKey(device.resources())) {
uint32 resource_id = id_and_resource->first;
const Resource& resource = id_and_resource->second;
AddResourceMetadata(device_id, resource_id, resource, &json);
}
}
for (const TraceEvent* const event : container.UnsortedEvents()) {
AddTraceEvent(*event, &json);
}
absl::StrAppend(&json, "{}]}");
return json;
}
}
} | #include "tsl/profiler/convert/trace_events_to_json.h"
#include <string>
#include "json/json.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
#include "tsl/profiler/convert/trace_container.h"
#include "tsl/profiler/protobuf/trace_events.pb.h"
namespace tsl {
namespace profiler {
namespace {
Json::Value ToJsonValue(const std::string& json_str) {
Json::Value json;
Json::Reader reader;
EXPECT_TRUE(reader.parse(json_str, json));
return json;
}
TEST(TraceEventsToJson, JsonConversion) {
const std::string metadata_string = R"pb(
devices {
key: 2
value {
name: 'D2'
device_id: 2
resources {
key: 2
value { resource_id: 2 name: 'R2.2' }
}
}
}
devices {
key: 1
value {
name: 'D1'
device_id: 1
resources {
key: 2
value { resource_id: 1 name: 'R1.2' }
}
}
}
)pb";
TraceContainer container;
EXPECT_TRUE(container.ParseMetadataFromString(metadata_string));
TraceEvent* event = container.CreateEvent();
event->set_device_id(1);
event->set_resource_id(2);
event->set_name("E1.2.1");
event->set_timestamp_ps(100000);
event->set_duration_ps(10000);
event->mutable_args()->insert({"long_name", "E1.2.1 long"});
event->mutable_args()->insert({"arg2", "arg2 val"});
event = container.CreateEvent();
event->set_device_id(2);
event->set_resource_id(2);
event->set_name("E2.2.1 # \"comment\"");
event->set_timestamp_ps(105000);
container.CapEvents(2);
Json::Value json = ToJsonValue(TraceContainerToJson(container));
Json::Value expected_json = ToJsonValue(R"(
{
"displayTimeUnit": "ns",
"metadata": { "highres-ticks": true },
"traceEvents": [
{"ph":"M", "pid":1, "name":"process_name", "args":{"name":"D1"}},
{"ph":"M", "pid":1, "name":"process_sort_index", "args":{"sort_index":1}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_name",
"args":{"name":"R1.2"}},
{"ph":"M", "pid":1, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{"ph":"M", "pid":2, "name":"process_name", "args":{"name":"D2"}},
{"ph":"M", "pid":2, "name":"process_sort_index", "args":{"sort_index":2}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_name",
"args":{"name":"R2.2"}},
{"ph":"M", "pid":2, "tid":2, "name":"thread_sort_index",
"args":{"sort_index":2}},
{
"ph" : "X",
"pid" : 1,
"tid" : 2,
"name" : "E1.2.1",
"ts" : 0.1,
"dur" : 0.01,
"args" : {"arg2": "arg2 val", "long_name": "E1.2.1 long"}
},
{
"ph" : "X",
"pid" : 2,
"tid" : 2,
"name" : "E2.2.1 # \"comment\"",
"ts" : 0.105,
"dur" : 1e-6
},
{}
]
})");
EXPECT_EQ(json, expected_json);
}
}
}
} |
101 | #ifndef I18N_ADDRESSINPUT_ADDRESS_METADATA_H_
#define I18N_ADDRESSINPUT_ADDRESS_METADATA_H_
#include <libaddressinput/address_field.h>
#include <string>
namespace i18n {
namespace addressinput {
bool IsFieldRequired(AddressField field, const std::string& region_code);
bool IsFieldUsed(AddressField field, const std::string& region_code);
}
}
#endif
#include <libaddressinput/address_metadata.h>
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <string>
#include "format_element.h"
#include "region_data_constants.h"
#include "rule.h"
namespace i18n {
namespace addressinput {
bool IsFieldRequired(AddressField field, const std::string& region_code) {
if (field == COUNTRY) {
return true;
}
Rule rule;
rule.CopyFrom(Rule::GetDefault());
if (!rule.ParseSerializedRule(
RegionDataConstants::GetRegionData(region_code))) {
return false;
}
return std::find(rule.GetRequired().begin(),
rule.GetRequired().end(),
field) != rule.GetRequired().end();
}
bool IsFieldUsed(AddressField field, const std::string& region_code) {
if (field == COUNTRY) {
return true;
}
Rule rule;
rule.CopyFrom(Rule::GetDefault());
if (!rule.ParseSerializedRule(
RegionDataConstants::GetRegionData(region_code))) {
return false;
}
return std::find(rule.GetFormat().begin(),
rule.GetFormat().end(),
FormatElement(field)) != rule.GetFormat().end();
}
}
} | #include <libaddressinput/address_metadata.h>
#include <libaddressinput/address_field.h>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::IsFieldRequired;
using i18n::addressinput::IsFieldUsed;
using i18n::addressinput::COUNTRY;
using i18n::addressinput::ADMIN_AREA;
using i18n::addressinput::DEPENDENT_LOCALITY;
TEST(AddressMetadataTest, IsFieldRequiredCountry) {
EXPECT_TRUE(IsFieldRequired(COUNTRY, "US"));
EXPECT_TRUE(IsFieldRequired(COUNTRY, "CH"));
EXPECT_TRUE(IsFieldRequired(COUNTRY, "rrr"));
}
TEST(AddressMetadataTest, IsUsedRequiredCountry) {
EXPECT_TRUE(IsFieldUsed(COUNTRY, "US"));
EXPECT_TRUE(IsFieldUsed(COUNTRY, "CH"));
EXPECT_TRUE(IsFieldUsed(COUNTRY, "rrr"));
}
TEST(AddressMetadataTest, IsFieldRequiredAdminAreaUS) {
EXPECT_TRUE(IsFieldRequired(ADMIN_AREA, "US"));
}
TEST(AddressMetadataTest, IsFieldRequiredAdminAreaAT) {
EXPECT_FALSE(IsFieldRequired(ADMIN_AREA, "AT"));
}
TEST(AddressMetadataTest, IsFieldRequiredAdminAreaSU) {
EXPECT_FALSE(IsFieldRequired(ADMIN_AREA, "SU"));
}
TEST(AddressMetadataTest, IsFieldUsedDependentLocalityUS) {
EXPECT_FALSE(IsFieldUsed(DEPENDENT_LOCALITY, "US"));
}
TEST(AddressMetadataTest, IsFieldUsedDependentLocalityCN) {
EXPECT_TRUE(IsFieldUsed(DEPENDENT_LOCALITY, "CN"));
}
TEST(AddressMetadataTest, IsFieldUsedDependentLocalitySU) {
EXPECT_FALSE(IsFieldUsed(DEPENDENT_LOCALITY, "SU"));
}
} |
102 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_REQUEST_COST_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_REQUEST_COST_H_
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace tensorflow {
class RequestCost {
public:
void RecordCost(
const std::vector<std::pair<absl::string_view, absl::Duration>>& costs);
void RecordMetrics(
const std::vector<std::pair<absl::string_view, double>>& metrics);
absl::flat_hash_map<std::string, absl::Duration> GetCosts() const;
absl::flat_hash_map<std::string, double> GetMetrics() const;
struct BatchMetrics {
int64_t processed_size = 0;
int64_t input_size = 0;
int64_t padding_size = 0;
absl::flat_hash_map<std::string, absl::Duration> batch_costs;
};
void RecordBatchMetrics(const BatchMetrics& batch_metrics);
std::vector<BatchMetrics> GetBatchMetrics() const;
private:
mutable absl::Mutex mutex_;
absl::flat_hash_map<std::string, absl::Duration> cost_map_
ABSL_GUARDED_BY(mutex_);
absl::flat_hash_map<std::string, double> metric_map_ ABSL_GUARDED_BY(mutex_);
std::vector<BatchMetrics> batch_metrics_ ABSL_GUARDED_BY(mutex_);
};
}
#endif
#include "tensorflow/core/common_runtime/request_cost.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace tensorflow {
void RequestCost::RecordCost(
const std::vector<std::pair<absl::string_view, absl::Duration>>& costs) {
absl::MutexLock lock(&mutex_);
for (const auto& cost : costs) {
cost_map_[cost.first] += cost.second;
}
}
absl::flat_hash_map<std::string, absl::Duration> RequestCost::GetCosts() const {
absl::MutexLock lock(&mutex_);
return cost_map_;
}
void RequestCost::RecordMetrics(
const std::vector<std::pair<absl::string_view, double>>& metrics) {
absl::MutexLock lock(&mutex_);
for (const auto& metric : metrics) {
metric_map_[metric.first] = metric.second;
}
}
absl::flat_hash_map<std::string, double> RequestCost::GetMetrics() const {
absl::MutexLock lock(&mutex_);
return metric_map_;
}
void RequestCost::RecordBatchMetrics(const BatchMetrics& batch_metrics) {
absl::MutexLock lock(&mutex_);
batch_metrics_.push_back(batch_metrics);
}
std::vector<RequestCost::BatchMetrics> RequestCost::GetBatchMetrics() const {
absl::MutexLock lock(&mutex_);
return batch_metrics_;
}
} | #include "tensorflow/core/common_runtime/request_cost.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
TEST(RequestCostTest, RecordCost) {
RequestCost request_cost;
request_cost.RecordCost(
{{"tpu_v1", absl::Milliseconds(1)}, {"tpu_v2", absl::Milliseconds(2)}});
request_cost.RecordCost({{"tpu_v1", absl::Milliseconds(10)},
{"tpu_v2", absl::Milliseconds(20)},
{"cpu_v1", absl::Milliseconds(30)},
{"cpu_v2", absl::Milliseconds(40)}});
EXPECT_THAT(request_cost.GetCosts(),
UnorderedElementsAre(Pair("tpu_v1", absl::Milliseconds(11)),
Pair("tpu_v2", absl::Milliseconds(22)),
Pair("cpu_v1", absl::Milliseconds(30)),
Pair("cpu_v2", absl::Milliseconds(40))));
request_cost.RecordCost(
{{"cpu_v1", absl::Milliseconds(3)}, {"cpu_v2", absl::Milliseconds(4)}});
EXPECT_THAT(request_cost.GetCosts(),
UnorderedElementsAre(Pair("tpu_v1", absl::Milliseconds(11)),
Pair("tpu_v2", absl::Milliseconds(22)),
Pair("cpu_v1", absl::Milliseconds(33)),
Pair("cpu_v2", absl::Milliseconds(44))));
}
TEST(RequestCostTest, RecordMetrics) {
RequestCost request_cost;
request_cost.RecordMetrics({{"metric_v1", 1}, {"metric_v2", 3.14}});
EXPECT_THAT(
request_cost.GetMetrics(),
UnorderedElementsAre(Pair("metric_v1", 1), Pair("metric_v2", 3.14)));
request_cost.RecordMetrics({{"metric_v1", 11},
{"metric_v2", 3.14159},
{"other_metric_v1", 3},
{"other_metric_v2", 4}});
EXPECT_THAT(request_cost.GetMetrics(),
UnorderedElementsAre(
Pair("metric_v1", 11), Pair("metric_v2", 3.14159),
Pair("other_metric_v1", 3), Pair("other_metric_v2", 4)));
}
TEST(RequestCostTest, RecordBatchMetrics) {
RequestCost request_cost;
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
8,
8,
0,
{{"gcu", absl::Milliseconds(80)}, {"tpu", absl::Milliseconds(160)}}});
request_cost.RecordBatchMetrics(RequestCost::BatchMetrics{
4,
2,
1,
{{"gcu", absl::Milliseconds(40)}, {"tpu", absl::Milliseconds(80)}}});
EXPECT_THAT(
request_cost.GetBatchMetrics(),
ElementsAre(
FieldsAre(8, 8, 0,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(80)),
Pair("tpu", absl::Milliseconds(160)))),
FieldsAre(
4, 2, 1,
UnorderedElementsAre(Pair("gcu", absl::Milliseconds(40)),
Pair("tpu", absl::Milliseconds(80))))));
}
}
} |
103 | #ifndef XLA_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_
#define XLA_TSL_DISTRIBUTED_RUNTIME_RPC_GRPC_UTIL_H_
#include <memory>
#include <string>
#include "grpcpp/grpcpp.h"
#include "grpcpp/support/byte_buffer.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/protobuf/distributed_runtime_payloads.pb.h"
namespace tsl {
constexpr char kGrpcPayloadsLost[] =
"type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost";
constexpr char kStreamRemovedMessage[] = "Stream removed";
inline bool IsStreamRemovedError(const ::grpc::Status& s) {
return !s.ok() && s.error_code() == ::grpc::StatusCode::UNKNOWN &&
s.error_message() == kStreamRemovedMessage;
}
inline std::string SerializePayloads(const absl::Status& s) {
tensorflow::distributed_runtime::GrpcPayloadContainer container;
s.ForEachPayload([&container](StringPiece key, const absl::Cord& value) {
(*container.mutable_payloads())[std::string(key)] = std::string(value);
});
return container.SerializeAsString();
}
inline void InsertSerializedPayloads(absl::Status& s, std::string payloads) {
tensorflow::distributed_runtime::GrpcPayloadContainer container;
if (container.ParseFromString(payloads)) {
for (const auto& key_val : container.payloads()) {
s.SetPayload(key_val.first, absl::Cord(key_val.second));
}
} else {
s.SetPayload(kGrpcPayloadsLost,
absl::Cord(tensorflow::distributed_runtime::GrpcPayloadsLost()
.SerializeAsString()));
}
}
inline absl::Status FromGrpcStatus(const ::grpc::Status& s) {
if (s.ok()) {
return absl::OkStatus();
} else {
absl::Status converted;
if (IsStreamRemovedError(s)) {
converted =
absl::Status(absl::StatusCode::kUnavailable, s.error_message());
}
converted = absl::Status(static_cast<absl::StatusCode>(s.error_code()),
s.error_message());
InsertSerializedPayloads(converted, s.error_details());
return converted;
}
}
inline ::grpc::Status ToGrpcStatus(const absl::Status& s) {
if (s.ok()) {
return ::grpc::Status::OK;
} else {
if (s.message().size() > 3072 ) {
string scratch = strings::Printf("%.3072s ... [truncated]",
absl::StatusMessageAsCStr(s));
LOG(ERROR) << "Truncated error message: " << s;
return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()), scratch,
SerializePayloads(s));
}
return ::grpc::Status(static_cast<::grpc::StatusCode>(s.code()),
std::string(s.message()), SerializePayloads(s));
}
}
typedef std::shared_ptr<::grpc::Channel> SharedGrpcChannelPtr;
::grpc::Status GrpcMaybeUnparseProto(const protobuf::Message& src,
::grpc::ByteBuffer* dst);
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, protobuf::Message* dst);
::grpc::Status GrpcMaybeUnparseProto(const string& src,
::grpc::ByteBuffer* dst);
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, string* dst);
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, tstring* dst);
}
#endif
#include "xla/tsl/distributed_runtime/rpc/grpc_util.h"
#include <algorithm>
#include <vector>
#include "grpcpp/impl/codegen/proto_utils.h"
#include "tsl/platform/protobuf.h"
namespace tsl {
::grpc::Status GrpcMaybeUnparseProto(const protobuf::Message& src,
grpc::ByteBuffer* dst) {
bool own_buffer;
return ::grpc::SerializationTraits<protobuf::Message>::Serialize(src, dst,
&own_buffer);
}
bool GrpcMaybeParseProto(::grpc::ByteBuffer* src, protobuf::Message* dst) {
return ::grpc::SerializationTraits<protobuf::Message>::Deserialize(src, dst)
.ok();
}
::grpc::Status GrpcMaybeUnparseProto(const string& src, grpc::ByteBuffer* dst) {
::grpc::Slice s(src.data(), src.size());
::grpc::ByteBuffer buffer(&s, 1);
dst->Swap(&buffer);
return ::grpc::Status::OK;
}
bool GrpcMaybeParseProto(grpc::ByteBuffer* src, string* dst) {
dst->clear();
dst->reserve(src->Length());
std::vector<::grpc::Slice> slices;
if (!src->Dump(&slices).ok()) {
return false;
}
for (const ::grpc::Slice& s : slices) {
dst->append(reinterpret_cast<const char*>(s.begin()), s.size());
}
return true;
}
bool GrpcMaybeParseProto(grpc::ByteBuffer* src, tstring* dst) {
dst->clear();
dst->reserve(src->Length());
std::vector<::grpc::Slice> slices;
if (!src->Dump(&slices).ok()) {
return false;
}
for (const ::grpc::Slice& s : slices) {
dst->append(reinterpret_cast<const char*>(s.begin()), s.size());
}
return true;
}
} | #include "xla/tsl/distributed_runtime/rpc/grpc_util.h"
#include <algorithm>
#include <cmath>
#include <vector>
#include "grpcpp/grpcpp.h"
#include "xla/tsl/distributed_runtime/rpc/test_request.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace {
using tsl::test::TestRequest;
string ToString(const grpc::ByteBuffer& buf) {
std::vector<grpc::Slice> slices;
CHECK(buf.Dump(&slices).ok());
string result;
for (const grpc::Slice& s : slices) {
result.append(reinterpret_cast<const char*>(s.begin()), s.size());
}
return result;
}
grpc::ByteBuffer MakeBuffer(const string& str, int num_slices) {
std::vector<::grpc::Slice> slices;
const size_t per_slice = (str.size() + num_slices - 1) / num_slices;
for (size_t pos = 0; pos < str.size();) {
const size_t n = std::min(str.size() - pos, per_slice);
slices.emplace_back(&str[pos], n);
pos += n;
}
if (slices.empty()) {
slices.emplace_back();
}
return ::grpc::ByteBuffer(&slices[0], slices.size());
}
TestRequest MakeProto(int size) {
int approx_size = 0;
TestRequest proto;
int index = 0;
while (approx_size < size) {
int item_size = std::min(size - approx_size, 1024);
proto.add_data(string(item_size, 'a' + static_cast<char>(index % 26)));
approx_size += item_size + 3;
index++;
}
return proto;
}
TEST(PayloadSerialization, PayloadsAreTransmitted) {
absl::Status status = errors::InvalidArgument("invalid arg message");
status.SetPayload("a", absl::Cord("\\xFF\\x02\\x03"));
absl::Status status_recovered = FromGrpcStatus(ToGrpcStatus(status));
ASSERT_TRUE(status_recovered.GetPayload("a").has_value());
EXPECT_EQ(status_recovered.GetPayload("a").value(), "\\xFF\\x02\\x03");
}
TEST(PayloadSerialization, PayloadsCorrupted) {
::grpc::Status status(
::grpc::StatusCode::INVALID_ARGUMENT, "invalid arg message",
"string that can not be serialized to the GrpcPayloadContainer proto");
absl::Status converted = FromGrpcStatus(status);
EXPECT_TRUE(converted.GetPayload(kGrpcPayloadsLost).has_value());
}
TEST(GrpcProto, Unparse) {
TestRequest proto;
proto.add_data("hello");
proto.add_data("world");
grpc::ByteBuffer buf;
ASSERT_TRUE(GrpcMaybeUnparseProto(proto, &buf).ok());
TestRequest parsed;
ASSERT_TRUE(parsed.ParseFromString(ToString(buf)));
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
TEST(GrpcProto, UnparseToString) {
TestRequest proto;
proto.add_data("hello");
proto.add_data("world");
string str;
CHECK(proto.SerializeToString(&str));
grpc::ByteBuffer buf;
ASSERT_TRUE(GrpcMaybeUnparseProto(str, &buf).ok());
TestRequest parsed;
ASSERT_TRUE(parsed.ParseFromString(ToString(buf)));
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
TEST(GrpcProto, Parse) {
struct Case {
int length;
int slices;
};
for (Case c : std::vector<Case>{
{0, 1},
{20, 1},
{100, 1},
{1 << 20, 1},
{100, 5},
{10000, 50},
}) {
TestRequest proto = MakeProto(c.length);
::grpc::ByteBuffer src = MakeBuffer(proto.SerializeAsString(), c.slices);
TestRequest parsed;
ASSERT_TRUE(GrpcMaybeParseProto(&src, &parsed))
<< c.length << " " << c.slices;
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
}
TEST(GrpcProto, ParseFromString) {
struct Case {
int length;
int slices;
};
for (Case c : std::vector<Case>{
{0, 1},
{20, 1},
{100, 1},
{1 << 20, 1},
{100, 5},
{10000, 50},
}) {
TestRequest proto = MakeProto(c.length);
::grpc::ByteBuffer src = MakeBuffer(proto.SerializeAsString(), c.slices);
string parsed_str;
TestRequest parsed;
ASSERT_TRUE(GrpcMaybeParseProto(&src, &parsed_str))
<< c.length << " " << c.slices;
ASSERT_TRUE(parsed.ParseFromString(parsed_str));
ASSERT_EQ(proto.DebugString(), parsed.DebugString());
}
}
static void BM_UnparseGrpc(::testing::benchmark::State& state) {
const int size = state.range(0);
auto proto = MakeProto(size);
for (auto s : state) {
grpc::ByteBuffer buf;
CHECK(GrpcMaybeUnparseProto(proto, &buf).ok());
}
}
BENCHMARK(BM_UnparseGrpc)->Arg(1)->Arg(1 << 10)->Arg(1 << 20);
static void BM_UnparseString(::testing::benchmark::State& state) {
const int size = state.range(0);
auto proto = MakeProto(size);
for (auto s : state) {
string buf;
proto.SerializeToString(&buf);
}
}
BENCHMARK(BM_UnparseString)->Arg(1)->Arg(1 << 10)->Arg(1 << 20);
static void BM_ParseGrpc(::testing::benchmark::State& state) {
const int size = state.range(0);
const int num_slices = state.range(1);
TestRequest proto = MakeProto(size);
auto buf = MakeBuffer(proto.SerializeAsString(), num_slices);
for (auto s : state) {
CHECK(GrpcMaybeParseProto(&buf, &proto));
}
}
BENCHMARK(BM_ParseGrpc)
->ArgPair(1, 1)
->ArgPair(1 << 10, 1)
->ArgPair(1 << 10, 4)
->ArgPair(1 << 20, 1)
->ArgPair(1 << 20, 4);
static void BM_ParseString(::testing::benchmark::State& state) {
const int size = state.range(0);
TestRequest proto = MakeProto(size);
string serial = proto.SerializeAsString();
for (auto s : state) {
CHECK(proto.ParseFromString(serial));
}
}
BENCHMARK(BM_ParseString)->Arg(1)->Arg(1 << 10)->Arg(1 << 20);
}
} |
104 | #ifndef ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
#define ABSL_DEBUGGING_INTERNAL_SYMBOLIZE_H_
#ifdef __cplusplus
#include <cstddef>
#include <cstdint>
#include "absl/base/config.h"
#include "absl/strings/string_view.h"
#ifdef ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE
#error ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE cannot be directly set
#elif defined(__ELF__) && defined(__GLIBC__) && !defined(__native_client__) \
&& !defined(__asmjs__) && !defined(__wasm__)
#define ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE 1
#include <elf.h>
#include <link.h>
#include <functional>
#include <string>
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
bool ForEachSection(int fd,
const std::function<bool(absl::string_view name,
const ElfW(Shdr) &)>& callback);
bool GetSectionHeaderByName(int fd, const char *name, size_t name_len,
ElfW(Shdr) *out);
}
ABSL_NAMESPACE_END
}
#endif
#ifdef ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE
#error ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE cannot be directly set
#elif defined(__APPLE__)
#define ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE 1
#endif
#ifdef ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE
#error ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE cannot be directly set
#elif defined(__EMSCRIPTEN__)
#define ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE 1
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
struct SymbolDecoratorArgs {
const void *pc;
ptrdiff_t relocation;
int fd;
char *const symbol_buf;
size_t symbol_buf_size;
char *const tmp_buf;
size_t tmp_buf_size;
void* arg;
};
using SymbolDecorator = void (*)(const SymbolDecoratorArgs *);
int InstallSymbolDecorator(SymbolDecorator decorator, void* arg);
bool RemoveSymbolDecorator(int ticket);
bool RemoveAllSymbolDecorators();
bool RegisterFileMappingHint(const void* start, const void* end,
uint64_t offset, const char* filename);
bool GetFileMappingHint(const void** start, const void** end, uint64_t* offset,
const char** filename);
}
ABSL_NAMESPACE_END
}
#endif
#include <stdbool.h>
#ifdef __cplusplus
extern "C"
#endif
bool
AbslInternalGetFileMappingHint(const void** start, const void** end,
uint64_t* offset, const char** filename);
#endif
#include "absl/debugging/symbolize.h"
#ifdef _WIN32
#include <winapifamily.h>
#if !(WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)) || \
WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
#define ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32
#endif
#endif
#if defined(__EMSCRIPTEN__) && !defined(STANDALONE_WASM)
#define ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM
#endif
#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE)
#include "absl/debugging/symbolize_elf.inc"
#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WIN32)
#include "absl/debugging/symbolize_win32.inc"
#elif defined(__APPLE__)
#include "absl/debugging/symbolize_darwin.inc"
#elif defined(ABSL_INTERNAL_HAVE_SYMBOLIZE_WASM)
#include "absl/debugging/symbolize_emscripten.inc"
#else
#include "absl/debugging/symbolize_unimplemented.inc"
#endif | #include "absl/debugging/symbolize.h"
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif
#ifndef _WIN32
#include <fcntl.h>
#include <sys/mman.h>
#endif
#include <cstring>
#include <iostream>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
#include "absl/debugging/internal/stack_consumption.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
#endif
using testing::Contains;
#ifdef _WIN32
#define ABSL_SYMBOLIZE_TEST_NOINLINE __declspec(noinline)
#else
#define ABSL_SYMBOLIZE_TEST_NOINLINE ABSL_ATTRIBUTE_NOINLINE
#endif
extern "C" {
ABSL_SYMBOLIZE_TEST_NOINLINE void nonstatic_func() {
volatile int x = __LINE__;
static_cast<void>(x);
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
}
ABSL_SYMBOLIZE_TEST_NOINLINE static void static_func() {
volatile int x = __LINE__;
static_cast<void>(x);
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
}
}
struct Foo {
static void func(int x);
};
ABSL_SYMBOLIZE_TEST_NOINLINE void Foo::func(int) {
volatile int x = __LINE__;
static_cast<void>(x);
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
}
int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.unlikely) unlikely_func() {
return 0;
}
int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.hot) hot_func() { return 0; }
int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.startup) startup_func() { return 0; }
int ABSL_ATTRIBUTE_SECTION_VARIABLE(.text.exit) exit_func() { return 0; }
int regular_func() { return 0; }
#if ABSL_PER_THREAD_TLS
static ABSL_PER_THREAD_TLS_KEYWORD char symbolize_test_thread_small[1];
static ABSL_PER_THREAD_TLS_KEYWORD char
symbolize_test_thread_big[2 * 1024 * 1024];
#endif
#if !defined(__EMSCRIPTEN__)
static void *GetPCFromFnPtr(void *ptr) { return ptr; }
static volatile bool volatile_bool = false;
static constexpr size_t kHpageSize = 1 << 21;
const char kHpageTextPadding[kHpageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(
.text) = "";
#else
static void *GetPCFromFnPtr(void *ptr) {
return EM_ASM_PTR(
{ return wasmOffsetConverter.convert(wasmTable.get($0).name, 0); }, ptr);
}
#endif
static char try_symbolize_buffer[4096];
static const char *TrySymbolizeWithLimit(void *pc, int limit) {
CHECK_LE(limit, sizeof(try_symbolize_buffer))
<< "try_symbolize_buffer is too small";
auto heap_buffer = absl::make_unique<char[]>(sizeof(try_symbolize_buffer));
bool found = absl::Symbolize(pc, heap_buffer.get(), limit);
if (found) {
CHECK_LT(static_cast<int>(
strnlen(heap_buffer.get(), static_cast<size_t>(limit))),
limit)
<< "absl::Symbolize() did not properly terminate the string";
strncpy(try_symbolize_buffer, heap_buffer.get(),
sizeof(try_symbolize_buffer) - 1);
try_symbolize_buffer[sizeof(try_symbolize_buffer) - 1] = '\0';
}
return found ? try_symbolize_buffer : nullptr;
}
static const char *TrySymbolize(void *pc) {
return TrySymbolizeWithLimit(pc, sizeof(try_symbolize_buffer));
}
#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \
defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE) || \
defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE)
void ABSL_ATTRIBUTE_NOINLINE TestWithReturnAddress() {
#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
void *return_address = __builtin_return_address(0);
const char *symbol = TrySymbolize(return_address);
CHECK_NE(symbol, nullptr) << "TestWithReturnAddress failed";
CHECK_STREQ(symbol, "main") << "TestWithReturnAddress failed";
std::cout << "TestWithReturnAddress passed" << std::endl;
#endif
}
TEST(Symbolize, Cached) {
EXPECT_STREQ("nonstatic_func",
TrySymbolize(GetPCFromFnPtr((void *)(&nonstatic_func))));
const char *static_func_symbol =
TrySymbolize(GetPCFromFnPtr((void *)(&static_func)));
EXPECT_TRUE(strcmp("static_func", static_func_symbol) == 0 ||
strcmp("static_func()", static_func_symbol) == 0);
EXPECT_TRUE(nullptr == TrySymbolize(nullptr));
}
TEST(Symbolize, Truncation) {
constexpr char kNonStaticFunc[] = "nonstatic_func";
EXPECT_STREQ("nonstatic_func",
TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
strlen(kNonStaticFunc) + 1));
EXPECT_STREQ("nonstatic_...",
TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
strlen(kNonStaticFunc) + 0));
EXPECT_STREQ("nonstatic...",
TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)),
strlen(kNonStaticFunc) - 1));
EXPECT_STREQ("n...", TrySymbolizeWithLimit(
GetPCFromFnPtr((void *)(&nonstatic_func)), 5));
EXPECT_STREQ("...", TrySymbolizeWithLimit(
GetPCFromFnPtr((void *)(&nonstatic_func)), 4));
EXPECT_STREQ("..", TrySymbolizeWithLimit(
GetPCFromFnPtr((void *)(&nonstatic_func)), 3));
EXPECT_STREQ(
".", TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)), 2));
EXPECT_STREQ(
"", TrySymbolizeWithLimit(GetPCFromFnPtr((void *)(&nonstatic_func)), 1));
EXPECT_EQ(nullptr, TrySymbolizeWithLimit(
GetPCFromFnPtr((void *)(&nonstatic_func)), 0));
}
TEST(Symbolize, SymbolizeWithDemangling) {
Foo::func(100);
#ifdef __EMSCRIPTEN__
EXPECT_STREQ("Foo::func(int)",
TrySymbolize(GetPCFromFnPtr((void *)(&Foo::func))));
#else
EXPECT_STREQ("Foo::func()",
TrySymbolize(GetPCFromFnPtr((void *)(&Foo::func))));
#endif
}
TEST(Symbolize, SymbolizeSplitTextSections) {
EXPECT_STREQ("unlikely_func()",
TrySymbolize(GetPCFromFnPtr((void *)(&unlikely_func))));
EXPECT_STREQ("hot_func()", TrySymbolize(GetPCFromFnPtr((void *)(&hot_func))));
EXPECT_STREQ("startup_func()",
TrySymbolize(GetPCFromFnPtr((void *)(&startup_func))));
EXPECT_STREQ("exit_func()",
TrySymbolize(GetPCFromFnPtr((void *)(&exit_func))));
EXPECT_STREQ("regular_func()",
TrySymbolize(GetPCFromFnPtr((void *)(®ular_func))));
}
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
static void *g_pc_to_symbolize;
static char g_symbolize_buffer[4096];
static char *g_symbolize_result;
static void SymbolizeSignalHandler(int signo) {
if (absl::Symbolize(g_pc_to_symbolize, g_symbolize_buffer,
sizeof(g_symbolize_buffer))) {
g_symbolize_result = g_symbolize_buffer;
} else {
g_symbolize_result = nullptr;
}
}
static const char *SymbolizeStackConsumption(void *pc, int *stack_consumed) {
g_pc_to_symbolize = pc;
*stack_consumed = absl::debugging_internal::GetSignalHandlerStackConsumption(
SymbolizeSignalHandler);
return g_symbolize_result;
}
static int GetStackConsumptionUpperLimit() {
int stack_consumption_upper_limit = 2048;
#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER) || defined(ABSL_HAVE_THREAD_SANITIZER)
stack_consumption_upper_limit *= 5;
#endif
return stack_consumption_upper_limit;
}
TEST(Symbolize, SymbolizeStackConsumption) {
int stack_consumed = 0;
const char *symbol =
SymbolizeStackConsumption((void *)(&nonstatic_func), &stack_consumed);
EXPECT_STREQ("nonstatic_func", symbol);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, GetStackConsumptionUpperLimit());
symbol = SymbolizeStackConsumption((void *)(&static_func), &stack_consumed);
EXPECT_TRUE(strcmp("static_func", symbol) == 0 ||
strcmp("static_func()", symbol) == 0);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, GetStackConsumptionUpperLimit());
}
TEST(Symbolize, SymbolizeWithDemanglingStackConsumption) {
Foo::func(100);
int stack_consumed = 0;
const char *symbol =
SymbolizeStackConsumption((void *)(&Foo::func), &stack_consumed);
EXPECT_STREQ("Foo::func()", symbol);
EXPECT_GT(stack_consumed, 0);
EXPECT_LT(stack_consumed, GetStackConsumptionUpperLimit());
}
#endif
#if !defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE) && \
!defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE)
const size_t kPageSize = 64 << 10;
const char kPadding0[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) = "";
const char kPadding1[kPageSize * 4] ABSL_ATTRIBUTE_SECTION_VARIABLE(.text) = "";
static int FilterElfHeader(struct dl_phdr_info *info, size_t size, void *data) {
for (int i = 0; i < info->dlpi_phnum; i++) {
if (info->dlpi_phdr[i].p_type == PT_LOAD &&
info->dlpi_phdr[i].p_flags == (PF_R | PF_X)) {
const void *const vaddr =
absl::bit_cast<void *>(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
const auto segsize = info->dlpi_phdr[i].p_memsz;
const char *self_exe;
if (info->dlpi_name != nullptr && info->dlpi_name[0] != '\0') {
self_exe = info->dlpi_name;
} else {
self_exe = "/proc/self/exe";
}
absl::debugging_internal::RegisterFileMappingHint(
vaddr, reinterpret_cast<const char *>(vaddr) + segsize,
info->dlpi_phdr[i].p_offset, self_exe);
return 1;
}
}
return 1;
}
TEST(Symbolize, SymbolizeWithMultipleMaps) {
if (volatile_bool) {
LOG(INFO) << kPadding0;
LOG(INFO) << kPadding1;
}
char buf[512];
memset(buf, 0, sizeof(buf));
absl::Symbolize(kPadding0, buf, sizeof(buf));
EXPECT_STREQ("kPadding0", buf);
memset(buf, 0, sizeof(buf));
absl::Symbolize(kPadding1, buf, sizeof(buf));
EXPECT_STREQ("kPadding1", buf);
dl_iterate_phdr(FilterElfHeader, nullptr);
const char *ptrs[] = {kPadding0, kPadding1};
for (const char *ptr : ptrs) {
const int kMapFlags = MAP_ANONYMOUS | MAP_PRIVATE;
void *addr = mmap(nullptr, kPageSize, PROT_READ, kMapFlags, 0, 0);
ASSERT_NE(addr, MAP_FAILED);
void *remapped = reinterpret_cast<void *>(
reinterpret_cast<uintptr_t>(ptr + kPageSize) & ~(kPageSize - 1ULL));
const int kMremapFlags = (MREMAP_MAYMOVE | MREMAP_FIXED);
void *ret = mremap(addr, kPageSize, kPageSize, kMremapFlags, remapped);
ASSERT_NE(ret, MAP_FAILED);
}
absl::Symbolize(nullptr, buf, sizeof(buf));
const char *expected[] = {"kPadding0", "kPadding1"};
const size_t offsets[] = {0, kPageSize, 2 * kPageSize, 3 * kPageSize};
for (int i = 0; i < 2; i++) {
for (size_t offset : offsets) {
memset(buf, 0, sizeof(buf));
absl::Symbolize(ptrs[i] + offset, buf, sizeof(buf));
EXPECT_STREQ(expected[i], buf);
}
}
}
static void DummySymbolDecorator(
const absl::debugging_internal::SymbolDecoratorArgs *args) {
std::string *message = static_cast<std::string *>(args->arg);
strncat(args->symbol_buf, message->c_str(),
args->symbol_buf_size - strlen(args->symbol_buf) - 1);
}
TEST(Symbolize, InstallAndRemoveSymbolDecorators) {
int ticket_a;
std::string a_message("a");
EXPECT_GE(ticket_a = absl::debugging_internal::InstallSymbolDecorator(
DummySymbolDecorator, &a_message),
0);
int ticket_b;
std::string b_message("b");
EXPECT_GE(ticket_b = absl::debugging_internal::InstallSymbolDecorator(
DummySymbolDecorator, &b_message),
0);
int ticket_c;
std::string c_message("c");
EXPECT_GE(ticket_c = absl::debugging_internal::InstallSymbolDecorator(
DummySymbolDecorator, &c_message),
0);
char *address = reinterpret_cast<char *>(4);
EXPECT_STREQ("abc", TrySymbolize(address));
EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_b));
EXPECT_STREQ("ac", TrySymbolize(address + 4));
EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_a));
EXPECT_TRUE(absl::debugging_internal::RemoveSymbolDecorator(ticket_c));
}
static int in_data_section = 1;
TEST(Symbolize, ForEachSection) {
int fd = TEMP_FAILURE_RETRY(open("/proc/self/exe", O_RDONLY));
ASSERT_NE(fd, -1);
std::vector<std::string> sections;
ASSERT_TRUE(absl::debugging_internal::ForEachSection(
fd, [§ions](const absl::string_view name, const ElfW(Shdr) &) {
sections.emplace_back(name);
return true;
}));
EXPECT_THAT(sections, Contains(".text"));
EXPECT_THAT(sections, Contains(".rodata"));
EXPECT_THAT(sections, Contains(".bss"));
++in_data_section;
EXPECT_THAT(sections, Contains(".data"));
close(fd);
}
#endif
extern "C" {
inline void *ABSL_ATTRIBUTE_ALWAYS_INLINE inline_func() {
void *pc = nullptr;
#if defined(__i386__)
__asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [PC] "=r"(pc));
#elif defined(__x86_64__)
__asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [PC] "=r"(pc));
#endif
return pc;
}
void *ABSL_ATTRIBUTE_NOINLINE non_inline_func() {
void *pc = nullptr;
#if defined(__i386__)
__asm__ __volatile__("call 1f;\n 1: pop %[PC]" : [PC] "=r"(pc));
#elif defined(__x86_64__)
__asm__ __volatile__("leaq 0(%%rip),%[PC];\n" : [PC] "=r"(pc));
#endif
return pc;
}
void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideNonInlineFunction() {
#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE) && \
(defined(__i386__) || defined(__x86_64__))
void *pc = non_inline_func();
const char *symbol = TrySymbolize(pc);
CHECK_NE(symbol, nullptr) << "TestWithPCInsideNonInlineFunction failed";
CHECK_STREQ(symbol, "non_inline_func")
<< "TestWithPCInsideNonInlineFunction failed";
std::cout << "TestWithPCInsideNonInlineFunction passed" << std::endl;
#endif
}
void ABSL_ATTRIBUTE_NOINLINE TestWithPCInsideInlineFunction() {
#if defined(ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE) && \
(defined(__i386__) || defined(__x86_64__))
void *pc = inline_func();
const char *symbol = TrySymbolize(pc);
CHECK_NE(symbol, nullptr) << "TestWithPCInsideInlineFunction failed";
CHECK_STREQ(symbol, __FUNCTION__) << "TestWithPCInsideInlineFunction failed";
std::cout << "TestWithPCInsideInlineFunction passed" << std::endl;
#endif
}
}
#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
__attribute__((target("thumb"))) int ArmThumbOverlapThumb(int x) {
return x * x * x;
}
__attribute__((target("arm"))) int ArmThumbOverlapArm(int x) {
return x * x * x;
}
void ABSL_ATTRIBUTE_NOINLINE TestArmThumbOverlap() {
#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
const char *symbol = TrySymbolize((void *)&ArmThumbOverlapArm);
CHECK_NE(symbol, nullptr) << "TestArmThumbOverlap failed";
CHECK_STREQ("ArmThumbOverlapArm()", symbol) << "TestArmThumbOverlap failed";
std::cout << "TestArmThumbOverlap passed" << std::endl;
#endif
}
#endif
#elif defined(_WIN32)
#if !defined(ABSL_CONSUME_DLL)
TEST(Symbolize, Basics) {
EXPECT_STREQ("nonstatic_func", TrySymbolize((void *)(&nonstatic_func)));
const char *static_func_symbol = TrySymbolize((void *)(&static_func));
ASSERT_TRUE(static_func_symbol != nullptr);
EXPECT_TRUE(strstr(static_func_symbol, "static_func") != nullptr);
EXPECT_TRUE(nullptr == TrySymbolize(nullptr));
}
TEST(Symbolize, Truncation) {
constexpr char kNonStaticFunc[] = "nonstatic_func";
EXPECT_STREQ("nonstatic_func",
TrySymbolizeWithLimit((void *)(&nonstatic_func),
strlen(kNonStaticFunc) + 1));
EXPECT_STREQ("nonstatic_...",
TrySymbolizeWithLimit((void *)(&nonstatic_func),
strlen(kNonStaticFunc) + 0));
EXPECT_STREQ("nonstatic...",
TrySymbolizeWithLimit((void *)(&nonstatic_func),
strlen(kNonStaticFunc) - 1));
EXPECT_STREQ("n...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 5));
EXPECT_STREQ("...", TrySymbolizeWithLimit((void *)(&nonstatic_func), 4));
EXPECT_STREQ("..", TrySymbolizeWithLimit((void *)(&nonstatic_func), 3));
EXPECT_STREQ(".", TrySymbolizeWithLimit((void *)(&nonstatic_func), 2));
EXPECT_STREQ("", TrySymbolizeWithLimit((void *)(&nonstatic_func), 1));
EXPECT_EQ(nullptr, TrySymbolizeWithLimit((void *)(&nonstatic_func), 0));
}
TEST(Symbolize, SymbolizeWithDemangling) {
const char *result = TrySymbolize((void *)(&Foo::func));
ASSERT_TRUE(result != nullptr);
EXPECT_TRUE(strstr(result, "Foo::func") != nullptr) << result;
}
#endif
#else
TEST(Symbolize, Unimplemented) {
char buf[64];
EXPECT_FALSE(absl::Symbolize((void *)(&nonstatic_func), buf, sizeof(buf)));
EXPECT_FALSE(absl::Symbolize((void *)(&static_func), buf, sizeof(buf)));
EXPECT_FALSE(absl::Symbolize((void *)(&Foo::func), buf, sizeof(buf)));
}
#endif
int main(int argc, char **argv) {
#if !defined(__EMSCRIPTEN__)
if (volatile_bool) {
LOG(INFO) << kHpageTextPadding;
}
#endif
#if ABSL_PER_THREAD_TLS
symbolize_test_thread_small[0] = 0;
symbolize_test_thread_big[0] = 0;
#endif
absl::InitializeSymbolizer(argv[0]);
testing::InitGoogleTest(&argc, argv);
#if defined(ABSL_INTERNAL_HAVE_ELF_SYMBOLIZE) || \
defined(ABSL_INTERNAL_HAVE_EMSCRIPTEN_SYMBOLIZE) || \
defined(ABSL_INTERNAL_HAVE_DARWIN_SYMBOLIZE)
TestWithPCInsideInlineFunction();
TestWithPCInsideNonInlineFunction();
TestWithReturnAddress();
#if defined(__arm__) && ABSL_HAVE_ATTRIBUTE(target) && \
((__ARM_ARCH >= 7) || !defined(__ARM_PCS_VFP))
TestArmThumbOverlap();
#endif
#endif
return RUN_ALL_TESTS();
} |
105 | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_ACTIVATION_BIND_HELPER_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_ACTIVATION_BIND_HELPER_H_
#include "eval/public/activation.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
enum class ProtoUnsetFieldOptions {
kSkip,
kBindDefault
};
absl::Status BindProtoToActivation(
const google::protobuf::Message* message, google::protobuf::Arena* arena,
Activation* activation,
ProtoUnsetFieldOptions options = ProtoUnsetFieldOptions::kSkip);
}
}
}
}
#endif
#include "eval/public/activation_bind_helper.h"
#include "absl/status/status.h"
#include "eval/public/containers/field_access.h"
#include "eval/public/containers/field_backed_list_impl.h"
#include "eval/public/containers/field_backed_map_impl.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using google::protobuf::Arena;
using google::protobuf::Message;
using google::protobuf::FieldDescriptor;
using google::protobuf::Descriptor;
absl::Status CreateValueFromField(const google::protobuf::Message* msg,
const FieldDescriptor* field_desc,
google::protobuf::Arena* arena, CelValue* result) {
if (field_desc->is_map()) {
*result = CelValue::CreateMap(google::protobuf::Arena::Create<FieldBackedMapImpl>(
arena, msg, field_desc, arena));
return absl::OkStatus();
} else if (field_desc->is_repeated()) {
*result = CelValue::CreateList(google::protobuf::Arena::Create<FieldBackedListImpl>(
arena, msg, field_desc, arena));
return absl::OkStatus();
} else {
return CreateValueFromSingleField(msg, field_desc, arena, result);
}
}
}
absl::Status BindProtoToActivation(const Message* message, Arena* arena,
Activation* activation,
ProtoUnsetFieldOptions options) {
if (arena == nullptr) {
return absl::InvalidArgumentError(
"arena must not be null for BindProtoToActivation.");
}
const Descriptor* desc = message->GetDescriptor();
const google::protobuf::Reflection* reflection = message->GetReflection();
for (int i = 0; i < desc->field_count(); i++) {
CelValue value;
const FieldDescriptor* field_desc = desc->field(i);
if (options == ProtoUnsetFieldOptions::kSkip) {
if (!field_desc->is_repeated() &&
!reflection->HasField(*message, field_desc)) {
continue;
}
}
auto status = CreateValueFromField(message, field_desc, arena, &value);
if (!status.ok()) {
return status;
}
activation->InsertValue(field_desc->name(), value);
}
return absl::OkStatus();
}
}
}
}
} | #include "eval/public/activation_bind_helper.h"
#include "absl/status/status.h"
#include "eval/public/activation.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using testutil::EqualsProto;
TEST(ActivationBindHelperTest, TestSingleBoolBind) {
TestMessage message;
message.set_bool_value(true);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation));
auto result = activation.FindValue("bool_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsBool());
EXPECT_EQ(value.BoolOrDie(), true);
}
TEST(ActivationBindHelperTest, TestSingleInt32Bind) {
TestMessage message;
message.set_int32_value(42);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation));
auto result = activation.FindValue("int32_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_EQ(value.Int64OrDie(), 42);
}
TEST(ActivationBindHelperTest, TestUnsetRepeatedIsEmptyList) {
TestMessage message;
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation));
auto result = activation.FindValue("int32_list", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsList());
EXPECT_TRUE(value.ListOrDie()->empty());
}
TEST(ActivationBindHelperTest, TestSkipUnsetFields) {
TestMessage message;
message.set_int32_value(42);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation,
ProtoUnsetFieldOptions::kSkip));
auto result = activation.FindValue("int32_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_EQ(value.Int64OrDie(), 42);
result = activation.FindValue("message_value", &arena);
ASSERT_FALSE(result.has_value());
}
TEST(ActivationBindHelperTest, TestBindDefaultFields) {
TestMessage message;
message.set_int32_value(42);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation,
ProtoUnsetFieldOptions::kBindDefault));
auto result = activation.FindValue("int32_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_EQ(value.Int64OrDie(), 42);
result = activation.FindValue("message_value", &arena);
ASSERT_TRUE(result.has_value());
EXPECT_NE(nullptr, result->MessageOrDie());
EXPECT_THAT(TestMessage::default_instance(),
EqualsProto(*result->MessageOrDie()));
}
TEST(ActivationBindHelperTest, RejectsNullArena) {
TestMessage message;
message.set_bool_value(true);
Activation activation;
ASSERT_EQ(BindProtoToActivation(&message, nullptr, &activation),
absl::InvalidArgumentError(
"arena must not be null for BindProtoToActivation."));
}
}
}
}
}
} |
106 | #ifndef ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
#define ABSL_STRINGS_INTERNAL_CORD_REP_BTREE_READER_H_
#include <cassert>
#include "absl/base/config.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_btree_navigator.h"
#include "absl/strings/internal/cord_rep_flat.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
class CordRepBtreeReader {
public:
using ReadResult = CordRepBtreeNavigator::ReadResult;
using Position = CordRepBtreeNavigator::Position;
explicit operator bool() const { return navigator_.btree() != nullptr; }
CordRepBtree* btree() const { return navigator_.btree(); }
CordRep* node() const { return navigator_.Current(); }
size_t length() const;
size_t remaining() const { return remaining_; }
void Reset() { navigator_.Reset(); }
absl::string_view Init(CordRepBtree* tree);
absl::string_view Next();
absl::string_view Skip(size_t skip);
absl::string_view Read(size_t n, size_t chunk_size, CordRep*& tree);
absl::string_view Seek(size_t offset);
private:
size_t remaining_ = 0;
CordRepBtreeNavigator navigator_;
};
inline size_t CordRepBtreeReader::length() const {
assert(btree() != nullptr);
return btree()->length;
}
inline absl::string_view CordRepBtreeReader::Init(CordRepBtree* tree) {
assert(tree != nullptr);
const CordRep* edge = navigator_.InitFirst(tree);
remaining_ = tree->length - edge->length;
return EdgeData(edge);
}
inline absl::string_view CordRepBtreeReader::Next() {
if (remaining_ == 0) return {};
const CordRep* edge = navigator_.Next();
assert(edge != nullptr);
remaining_ -= edge->length;
return EdgeData(edge);
}
inline absl::string_view CordRepBtreeReader::Skip(size_t skip) {
const size_t edge_length = navigator_.Current()->length;
CordRepBtreeNavigator::Position pos = navigator_.Skip(skip + edge_length);
if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) {
remaining_ = 0;
return {};
}
remaining_ -= skip - pos.offset + pos.edge->length;
return EdgeData(pos.edge).substr(pos.offset);
}
inline absl::string_view CordRepBtreeReader::Seek(size_t offset) {
const CordRepBtreeNavigator::Position pos = navigator_.Seek(offset);
if (ABSL_PREDICT_FALSE(pos.edge == nullptr)) {
remaining_ = 0;
return {};
}
absl::string_view chunk = EdgeData(pos.edge).substr(pos.offset);
remaining_ = length() - offset - chunk.length();
return chunk;
}
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/strings/internal/cord_rep_btree_reader.h"
#include <cassert>
#include "absl/base/config.h"
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_btree_navigator.h"
#include "absl/strings/internal/cord_rep_flat.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
absl::string_view CordRepBtreeReader::Read(size_t n, size_t chunk_size,
CordRep*& tree) {
assert(chunk_size <= navigator_.Current()->length);
CordRep* edge = chunk_size ? navigator_.Current() : navigator_.Next();
const size_t offset = chunk_size ? edge->length - chunk_size : 0;
ReadResult result = navigator_.Read(offset, n);
tree = result.tree;
if (n < chunk_size) return EdgeData(edge).substr(result.n);
const size_t consumed_by_read = n - chunk_size - result.n;
if (consumed_by_read >= remaining_) {
remaining_ = 0;
return {};
}
edge = navigator_.Current();
remaining_ -= consumed_by_read + edge->length;
return EdgeData(edge).substr(result.n);
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cord_rep_btree_reader.h"
#include <iostream>
#include <random>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/strings/cord.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::Ne;
using ::testing::Not;
using ::absl::cordrep_testing::CordRepBtreeFromFlats;
using ::absl::cordrep_testing::MakeFlat;
using ::absl::cordrep_testing::CordToString;
using ::absl::cordrep_testing::CreateFlatsFromString;
using ::absl::cordrep_testing::CreateRandomString;
using ReadResult = CordRepBtreeReader::ReadResult;
TEST(CordRepBtreeReaderTest, Next) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
CordRepBtreeReader reader;
size_t remaining = data.length();
absl::string_view chunk = reader.Init(node);
EXPECT_THAT(chunk, Eq(data.substr(0, chunk.length())));
remaining -= chunk.length();
EXPECT_THAT(reader.remaining(), Eq(remaining));
while (remaining > 0) {
const size_t offset = data.length() - remaining;
chunk = reader.Next();
EXPECT_THAT(chunk, Eq(data.substr(offset, chunk.length())));
remaining -= chunk.length();
EXPECT_THAT(reader.remaining(), Eq(remaining));
}
EXPECT_THAT(reader.remaining(), Eq(0u));
EXPECT_THAT(reader.Next(), testing::IsEmpty());
CordRep::Unref(node);
}
}
TEST(CordRepBtreeReaderTest, Skip) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
for (size_t skip1 = 0; skip1 < data.length() - kChars; ++skip1) {
for (size_t skip2 = 0; skip2 < data.length() - kChars; ++skip2) {
CordRepBtreeReader reader;
size_t remaining = data.length();
absl::string_view chunk = reader.Init(node);
remaining -= chunk.length();
chunk = reader.Skip(skip1);
size_t offset = data.length() - remaining;
ASSERT_THAT(chunk, Eq(data.substr(offset + skip1, chunk.length())));
remaining -= chunk.length() + skip1;
ASSERT_THAT(reader.remaining(), Eq(remaining));
if (remaining == 0) continue;
size_t skip = std::min(remaining - 1, skip2);
chunk = reader.Skip(skip);
offset = data.length() - remaining;
ASSERT_THAT(chunk, Eq(data.substr(offset + skip, chunk.length())));
}
}
CordRep::Unref(node);
}
}
TEST(CordRepBtreeReaderTest, SkipBeyondLength) {
CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
tree = CordRepBtree::Append(tree, MakeFlat("def"));
CordRepBtreeReader reader;
reader.Init(tree);
EXPECT_THAT(reader.Skip(100), IsEmpty());
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
}
TEST(CordRepBtreeReaderTest, Seek) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap, cap * cap + 1, cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
for (size_t seek = 0; seek < data.length() - 1; ++seek) {
CordRepBtreeReader reader;
reader.Init(node);
absl::string_view chunk = reader.Seek(seek);
ASSERT_THAT(chunk, Not(IsEmpty()));
ASSERT_THAT(chunk, Eq(data.substr(seek, chunk.length())));
ASSERT_THAT(reader.remaining(),
Eq(data.length() - seek - chunk.length()));
}
CordRep::Unref(node);
}
}
TEST(CordRepBtreeReaderTest, SeekBeyondLength) {
CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
tree = CordRepBtree::Append(tree, MakeFlat("def"));
CordRepBtreeReader reader;
reader.Init(tree);
EXPECT_THAT(reader.Seek(6), IsEmpty());
EXPECT_THAT(reader.remaining(), Eq(0u));
EXPECT_THAT(reader.Seek(100), IsEmpty());
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
}
TEST(CordRepBtreeReaderTest, Read) {
std::string data = "abcdefghijklmno";
std::vector<CordRep*> flats = CreateFlatsFromString(data, 5);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
CordRep* tree;
CordRepBtreeReader reader;
absl::string_view chunk;
chunk = reader.Init(node);
chunk = reader.Read(0, chunk.length(), tree);
EXPECT_THAT(tree, Eq(nullptr));
EXPECT_THAT(chunk, Eq("abcde"));
EXPECT_THAT(reader.remaining(), Eq(10u));
EXPECT_THAT(reader.Next(), Eq("fghij"));
chunk = reader.Init(node);
chunk = reader.Read(15, chunk.length(), tree);
EXPECT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("abcdefghijklmno"));
EXPECT_THAT(chunk, Eq(""));
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(3, chunk.length(), tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("abc"));
EXPECT_THAT(chunk, Eq("de"));
EXPECT_THAT(reader.remaining(), Eq(10u));
EXPECT_THAT(reader.Next(), Eq("fghij"));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(2, chunk.length() - 2, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("cd"));
EXPECT_THAT(chunk, Eq("e"));
EXPECT_THAT(reader.remaining(), Eq(10u));
EXPECT_THAT(reader.Next(), Eq("fghij"));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(3, 0, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("fgh"));
EXPECT_THAT(chunk, Eq("ij"));
EXPECT_THAT(reader.remaining(), Eq(5u));
EXPECT_THAT(reader.Next(), Eq("klmno"));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(12, chunk.length() - 2, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("cdefghijklmn"));
EXPECT_THAT(chunk, Eq("o"));
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
chunk = reader.Init(node);
chunk = reader.Read(10 - 2, chunk.length() - 2, tree);
ASSERT_THAT(tree, Ne(nullptr));
EXPECT_THAT(CordToString(tree), Eq("cdefghij"));
EXPECT_THAT(chunk, Eq("klmno"));
EXPECT_THAT(reader.remaining(), Eq(0u));
CordRep::Unref(tree);
CordRep::Unref(node);
}
TEST(CordRepBtreeReaderTest, ReadExhaustive) {
constexpr size_t kChars = 3;
const size_t cap = CordRepBtree::kMaxCapacity;
size_t counts[] = {1, 2, cap, cap * cap + 1, cap * cap * cap * 2 + 17};
for (size_t count : counts) {
std::string data = CreateRandomString(count * kChars);
std::vector<CordRep*> flats = CreateFlatsFromString(data, kChars);
CordRepBtree* node = CordRepBtreeFromFlats(flats);
for (size_t read_size : {kChars - 1, kChars, kChars + 7, cap * cap}) {
CordRepBtreeReader reader;
absl::string_view chunk = reader.Init(node);
size_t consumed = 0;
size_t remaining = data.length();
while (remaining > 0) {
CordRep* tree;
size_t n = (std::min)(remaining, read_size);
chunk = reader.Read(n, chunk.length(), tree);
EXPECT_THAT(tree, Ne(nullptr));
if (tree) {
EXPECT_THAT(CordToString(tree), Eq(data.substr(consumed, n)));
CordRep::Unref(tree);
}
consumed += n;
remaining -= n;
EXPECT_THAT(reader.remaining(), Eq(remaining - chunk.length()));
if (remaining > 0) {
ASSERT_FALSE(chunk.empty());
ASSERT_THAT(chunk, Eq(data.substr(consumed, chunk.length())));
} else {
ASSERT_TRUE(chunk.empty()) << chunk;
}
}
}
CordRep::Unref(node);
}
}
}
}
ABSL_NAMESPACE_END
} |
107 | #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_COMMON_CASE_FORMAT_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_GEN_COMMON_CASE_FORMAT_H_
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
string toLowerCamel(const string &s, const char delimiter = '_');
string toLowerSnake(const string &s, const char delimiter = '_');
string toUpperCamel(const string &s, const char delimiter = '_');
string toUpperSnake(const string &s, const char delimiter = '_');
}
}
#endif
#include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "tensorflow/core/platform/str_util.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
enum CaseFormatType {
LOWER_CAMEL,
UPPER_CAMEL,
LOWER_SNAKE,
UPPER_SNAKE,
};
string FormatStringCase(const string &str, CaseFormatType to,
const char delimiter = '_') {
const bool from_snake =
(str == str_util::Uppercase(str)) || (str == str_util::Lowercase(str));
const bool toUpper = (to == UPPER_CAMEL || to == UPPER_SNAKE);
const bool toSnake = (to == LOWER_SNAKE || to == UPPER_SNAKE);
string result;
bool inputStart = true;
bool wordStart = true;
for (const char c : str) {
if (c == delimiter) {
if (wordStart) {
result.push_back(delimiter);
}
wordStart = true;
continue;
}
if (!from_snake && isupper(c)) {
wordStart = true;
}
if (wordStart && toSnake && !inputStart) {
result.push_back(delimiter);
}
const bool shouldCapIfSnake = toUpper;
const bool shouldCapIfCamel = wordStart && (toUpper || !inputStart);
if ((toSnake && shouldCapIfSnake) || (!toSnake && shouldCapIfCamel)) {
result += toupper(c);
} else {
result += tolower(c);
}
wordStart = false;
inputStart = false;
}
if (wordStart) {
result.push_back(delimiter);
}
return result;
}
}
string toLowerCamel(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_CAMEL, delimiter);
}
string toLowerSnake(const string &s, const char delimiter) {
return FormatStringCase(s, LOWER_SNAKE, delimiter);
}
string toUpperCamel(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_CAMEL, delimiter);
}
string toUpperSnake(const string &s, const char delimiter) {
return FormatStringCase(s, UPPER_SNAKE, delimiter);
}
}
} | #include "tensorflow/c/experimental/ops/gen/common/case_format.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace {
struct Variations {
string lower_camel;
string lower_snake;
string upper_camel;
string upper_snake;
};
void TestSingleVariation(const string &str, Variations expected,
char delimiter = '_') {
EXPECT_EQ(expected.lower_camel, toLowerCamel(str, delimiter));
EXPECT_EQ(expected.lower_snake, toLowerSnake(str, delimiter));
EXPECT_EQ(expected.upper_camel, toUpperCamel(str, delimiter));
EXPECT_EQ(expected.upper_snake, toUpperSnake(str, delimiter));
}
void TestAllVariations(Variations variations, char delimiter = '_') {
TestSingleVariation(variations.lower_camel, variations, delimiter);
TestSingleVariation(variations.lower_snake, variations, delimiter);
TestSingleVariation(variations.upper_camel, variations, delimiter);
TestSingleVariation(variations.upper_snake, variations, delimiter);
}
TEST(CppOpGenCaseFormat, test_single_word) {
TestAllVariations(Variations{
"three",
"three",
"Three",
"THREE",
});
}
TEST(CppOpGenCaseFormat, test_complex_string) {
TestAllVariations(Variations{
"threeNTest33Words",
"three_n_test33_words",
"ThreeNTest33Words",
"THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_hyphen_delimiter) {
TestAllVariations(
Variations{
"threeNTest33Words",
"three-n-test33-words",
"ThreeNTest33Words",
"THREE-N-TEST33-WORDS",
},
'-');
}
TEST(CppOpGenCaseFormat, test_trailing_underscore) {
TestAllVariations(Variations{
"threeNTest33Words_",
"three_n_test33_words_",
"ThreeNTest33Words_",
"THREE_N_TEST33_WORDS_",
});
}
TEST(CppOpGenCaseFormat, test_double_trailing_underscores) {
TestAllVariations(Variations{
"xxY__",
"xx_y__",
"XxY__",
"XX_Y__",
});
}
TEST(CppOpGenCaseFormat, test_leading_underscore) {
TestAllVariations(Variations{
"_threeNTest33Words",
"_three_n_test33_words",
"_ThreeNTest33Words",
"_THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_double_leading_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words",
"__three_n_test33_words",
"__ThreeNTest33Words",
"__THREE_N_TEST33_WORDS",
});
}
TEST(CppOpGenCaseFormat, test_leading_and_trailing_underscores) {
TestAllVariations(Variations{
"__threeNTest33Words____",
"__three_n_test33_words____",
"__ThreeNTest33Words____",
"__THREE_N_TEST33_WORDS____",
});
}
}
}
} |
108 | #ifndef THIRD_PARTY_CEL_CPP_INTERNAL_TIME_H_
#define THIRD_PARTY_CEL_CPP_INTERNAL_TIME_H_
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
namespace cel::internal {
inline absl::Duration
MaxDuration() {
return absl::Seconds(315576000000) + absl::Nanoseconds(999999999);
}
inline absl::Duration
MinDuration() {
return absl::Seconds(-315576000000) + absl::Nanoseconds(-999999999);
}
inline absl::Time
MaxTimestamp() {
return absl::UnixEpoch() + absl::Seconds(253402300799) +
absl::Nanoseconds(999999999);
}
inline absl::Time
MinTimestamp() {
return absl::UnixEpoch() + absl::Seconds(-62135596800);
}
absl::Status ValidateDuration(absl::Duration duration);
absl::StatusOr<absl::Duration> ParseDuration(absl::string_view input);
absl::StatusOr<std::string> FormatDuration(absl::Duration duration);
absl::StatusOr<std::string> EncodeDurationToJson(absl::Duration duration);
std::string DebugStringDuration(absl::Duration duration);
absl::Status ValidateTimestamp(absl::Time timestamp);
absl::StatusOr<absl::Time> ParseTimestamp(absl::string_view input);
absl::StatusOr<std::string> FormatTimestamp(absl::Time timestamp);
absl::StatusOr<std::string> EncodeTimestampToJson(absl::Time timestamp);
std::string DebugStringTimestamp(absl::Time timestamp);
}
#endif
#include "internal/time.h"
#include <cstdint>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "internal/status_macros.h"
namespace cel::internal {
namespace {
std::string RawFormatTimestamp(absl::Time timestamp) {
return absl::FormatTime("%Y-%m-%d%ET%H:%M:%E*SZ", timestamp,
absl::UTCTimeZone());
}
}
absl::Status ValidateDuration(absl::Duration duration) {
if (duration < MinDuration()) {
return absl::InvalidArgumentError(
absl::StrCat("Duration \"", absl::FormatDuration(duration),
"\" below minimum allowed duration \"",
absl::FormatDuration(MinDuration()), "\""));
}
if (duration > MaxDuration()) {
return absl::InvalidArgumentError(
absl::StrCat("Duration \"", absl::FormatDuration(duration),
"\" above maximum allowed duration \"",
absl::FormatDuration(MaxDuration()), "\""));
}
return absl::OkStatus();
}
absl::StatusOr<absl::Duration> ParseDuration(absl::string_view input) {
absl::Duration duration;
if (!absl::ParseDuration(input, &duration)) {
return absl::InvalidArgumentError("Failed to parse duration from string");
}
return duration;
}
absl::StatusOr<std::string> FormatDuration(absl::Duration duration) {
CEL_RETURN_IF_ERROR(ValidateDuration(duration));
return absl::FormatDuration(duration);
}
std::string DebugStringDuration(absl::Duration duration) {
return absl::FormatDuration(duration);
}
absl::Status ValidateTimestamp(absl::Time timestamp) {
if (timestamp < MinTimestamp()) {
return absl::InvalidArgumentError(
absl::StrCat("Timestamp \"", RawFormatTimestamp(timestamp),
"\" below minimum allowed timestamp \"",
RawFormatTimestamp(MinTimestamp()), "\""));
}
if (timestamp > MaxTimestamp()) {
return absl::InvalidArgumentError(
absl::StrCat("Timestamp \"", RawFormatTimestamp(timestamp),
"\" above maximum allowed timestamp \"",
RawFormatTimestamp(MaxTimestamp()), "\""));
}
return absl::OkStatus();
}
absl::StatusOr<absl::Time> ParseTimestamp(absl::string_view input) {
absl::Time timestamp;
std::string err;
if (!absl::ParseTime(absl::RFC3339_full, input, absl::UTCTimeZone(),
×tamp, &err)) {
return err.empty() ? absl::InvalidArgumentError(
"Failed to parse timestamp from string")
: absl::InvalidArgumentError(absl::StrCat(
"Failed to parse timestamp from string: ", err));
}
CEL_RETURN_IF_ERROR(ValidateTimestamp(timestamp));
return timestamp;
}
absl::StatusOr<std::string> FormatTimestamp(absl::Time timestamp) {
CEL_RETURN_IF_ERROR(ValidateTimestamp(timestamp));
return RawFormatTimestamp(timestamp);
}
std::string FormatNanos(int32_t nanos) {
constexpr int32_t kNanosPerMillisecond = 1000000;
constexpr int32_t kNanosPerMicrosecond = 1000;
if (nanos % kNanosPerMillisecond == 0) {
return absl::StrFormat("%03d", nanos / kNanosPerMillisecond);
} else if (nanos % kNanosPerMicrosecond == 0) {
return absl::StrFormat("%06d", nanos / kNanosPerMicrosecond);
}
return absl::StrFormat("%09d", nanos);
}
absl::StatusOr<std::string> EncodeDurationToJson(absl::Duration duration) {
CEL_RETURN_IF_ERROR(ValidateDuration(duration));
std::string result;
int64_t seconds = absl::IDivDuration(duration, absl::Seconds(1), &duration);
int64_t nanos = absl::IDivDuration(duration, absl::Nanoseconds(1), &duration);
if (seconds < 0 || nanos < 0) {
result = "-";
seconds = -seconds;
nanos = -nanos;
}
absl::StrAppend(&result, seconds);
if (nanos != 0) {
absl::StrAppend(&result, ".", FormatNanos(nanos));
}
absl::StrAppend(&result, "s");
return result;
}
absl::StatusOr<std::string> EncodeTimestampToJson(absl::Time timestamp) {
static constexpr absl::string_view kTimestampFormat = "%E4Y-%m-%dT%H:%M:%S";
CEL_RETURN_IF_ERROR(ValidateTimestamp(timestamp));
absl::Time unix_seconds =
absl::FromUnixSeconds(absl::ToUnixSeconds(timestamp));
int64_t n = (timestamp - unix_seconds) / absl::Nanoseconds(1);
std::string result =
absl::FormatTime(kTimestampFormat, unix_seconds, absl::UTCTimeZone());
if (n > 0) {
absl::StrAppend(&result, ".", FormatNanos(n));
}
absl::StrAppend(&result, "Z");
return result;
}
std::string DebugStringTimestamp(absl::Time timestamp) {
return RawFormatTimestamp(timestamp);
}
} | #include "internal/time.h"
#include <string>
#include "google/protobuf/util/time_util.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
using cel::internal::StatusIs;
TEST(MaxDuration, ProtoEquiv) {
EXPECT_EQ(MaxDuration(),
absl::Seconds(google::protobuf::util::TimeUtil::kDurationMaxSeconds) +
absl::Nanoseconds(999999999));
}
TEST(MinDuration, ProtoEquiv) {
EXPECT_EQ(MinDuration(),
absl::Seconds(google::protobuf::util::TimeUtil::kDurationMinSeconds) +
absl::Nanoseconds(-999999999));
}
TEST(MaxTimestamp, ProtoEquiv) {
EXPECT_EQ(MaxTimestamp(),
absl::UnixEpoch() +
absl::Seconds(google::protobuf::util::TimeUtil::kTimestampMaxSeconds) +
absl::Nanoseconds(999999999));
}
TEST(MinTimestamp, ProtoEquiv) {
EXPECT_EQ(MinTimestamp(),
absl::UnixEpoch() +
absl::Seconds(google::protobuf::util::TimeUtil::kTimestampMinSeconds));
}
TEST(ParseDuration, Conformance) {
absl::Duration parsed;
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("1s"));
EXPECT_EQ(parsed, absl::Seconds(1));
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("0.010s"));
EXPECT_EQ(parsed, absl::Milliseconds(10));
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("0.000010s"));
EXPECT_EQ(parsed, absl::Microseconds(10));
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("0.000000010s"));
EXPECT_EQ(parsed, absl::Nanoseconds(10));
EXPECT_THAT(internal::ParseDuration("abc"),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::ParseDuration("1c"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(FormatDuration, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatDuration(absl::Seconds(1)));
EXPECT_EQ(formatted, "1s");
ASSERT_OK_AND_ASSIGN(formatted,
internal::FormatDuration(absl::Milliseconds(10)));
EXPECT_EQ(formatted, "10ms");
ASSERT_OK_AND_ASSIGN(formatted,
internal::FormatDuration(absl::Microseconds(10)));
EXPECT_EQ(formatted, "10us");
ASSERT_OK_AND_ASSIGN(formatted,
internal::FormatDuration(absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "10ns");
EXPECT_THAT(internal::FormatDuration(absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::FormatDuration(-absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ParseTimestamp, Conformance) {
absl::Time parsed;
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseTimestamp("1-01-01T00:00:00Z"));
EXPECT_EQ(parsed, MinTimestamp());
ASSERT_OK_AND_ASSIGN(
parsed, internal::ParseTimestamp("9999-12-31T23:59:59.999999999Z"));
EXPECT_EQ(parsed, MaxTimestamp());
ASSERT_OK_AND_ASSIGN(parsed,
internal::ParseTimestamp("1970-01-01T00:00:00Z"));
EXPECT_EQ(parsed, absl::UnixEpoch());
ASSERT_OK_AND_ASSIGN(parsed,
internal::ParseTimestamp("1970-01-01T00:00:00.010Z"));
EXPECT_EQ(parsed, absl::UnixEpoch() + absl::Milliseconds(10));
ASSERT_OK_AND_ASSIGN(parsed,
internal::ParseTimestamp("1970-01-01T00:00:00.000010Z"));
EXPECT_EQ(parsed, absl::UnixEpoch() + absl::Microseconds(10));
ASSERT_OK_AND_ASSIGN(
parsed, internal::ParseTimestamp("1970-01-01T00:00:00.000000010Z"));
EXPECT_EQ(parsed, absl::UnixEpoch() + absl::Nanoseconds(10));
EXPECT_THAT(internal::ParseTimestamp("abc"),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::ParseTimestamp("10000-01-01T00:00:00Z"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(FormatTimestamp, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatTimestamp(MinTimestamp()));
EXPECT_EQ(formatted, "1-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatTimestamp(MaxTimestamp()));
EXPECT_EQ(formatted, "9999-12-31T23:59:59.999999999Z");
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatTimestamp(absl::UnixEpoch()));
EXPECT_EQ(formatted, "1970-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(
formatted,
internal::FormatTimestamp(absl::UnixEpoch() + absl::Milliseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.01Z");
ASSERT_OK_AND_ASSIGN(
formatted,
internal::FormatTimestamp(absl::UnixEpoch() + absl::Microseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.00001Z");
ASSERT_OK_AND_ASSIGN(
formatted,
internal::FormatTimestamp(absl::UnixEpoch() + absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.00000001Z");
EXPECT_THAT(internal::FormatTimestamp(absl::InfiniteFuture()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::FormatTimestamp(absl::InfinitePast()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(EncodeDurationToJson, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Seconds(1)));
EXPECT_EQ(formatted, "1s");
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Milliseconds(10)));
EXPECT_EQ(formatted, "0.010s");
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Microseconds(10)));
EXPECT_EQ(formatted, "0.000010s");
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "0.000000010s");
EXPECT_THAT(EncodeDurationToJson(absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(EncodeDurationToJson(-absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(EncodeTimestampToJson, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(MinTimestamp()));
EXPECT_EQ(formatted, "0001-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(MaxTimestamp()));
EXPECT_EQ(formatted, "9999-12-31T23:59:59.999999999Z");
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(absl::UnixEpoch()));
EXPECT_EQ(formatted, "1970-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(
formatted,
EncodeTimestampToJson(absl::UnixEpoch() + absl::Milliseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.010Z");
ASSERT_OK_AND_ASSIGN(
formatted,
EncodeTimestampToJson(absl::UnixEpoch() + absl::Microseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.000010Z");
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(absl::UnixEpoch() +
absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.000000010Z");
EXPECT_THAT(EncodeTimestampToJson(absl::InfiniteFuture()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(EncodeTimestampToJson(absl::InfinitePast()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
} |
109 | #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
#define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_CUSTOM_GMOCK_PORT_H_
#endif | #include "gmock/internal/gmock-port.h"
#include "gtest/gtest.h"
TEST(DummyTest, Dummy) {} |
110 | #ifndef TENSORFLOW_CORE_KERNELS_STRING_UTIL_H_
#define TENSORFLOW_CORE_KERNELS_STRING_UTIL_H_
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/stringpiece.h"
namespace tensorflow {
enum class UnicodeEncoding { UTF8, UTF16BE, UTF32BE };
enum class CharUnit { BYTE, UTF8_CHAR };
inline bool IsTrailByte(char x) { return static_cast<signed char>(x) < -0x40; }
Status ParseUnicodeEncoding(const string& str, UnicodeEncoding* encoding);
Status ParseCharUnit(const string& str, CharUnit* unit);
int32 UTF8StrLen(const string& str);
template <typename T>
bool ForwardNUTF8CharPositions(const StringPiece in,
const T num_utf8_chars_to_shift, T* pos) {
const size_t size = in.size();
T utf8_chars_counted = 0;
while (utf8_chars_counted < num_utf8_chars_to_shift && *pos < size) {
do {
++*pos;
} while (*pos < size && IsTrailByte(in[*pos]));
++utf8_chars_counted;
}
return utf8_chars_counted == num_utf8_chars_to_shift;
}
template <typename T>
bool BackNUTF8CharPositions(const StringPiece in,
const T num_utf8_chars_to_shift, T* pos) {
const size_t start = 0;
T utf8_chars_counted = 0;
while (utf8_chars_counted < num_utf8_chars_to_shift && (*pos > start)) {
do {
--*pos;
} while (IsTrailByte(in[*pos]) && *pos > start);
++utf8_chars_counted;
}
return utf8_chars_counted == num_utf8_chars_to_shift;
}
}
#endif
#include "tensorflow/core/kernels/string_util.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
Status ParseUnicodeEncoding(const string& str, UnicodeEncoding* encoding) {
if (str == "UTF-8") {
*encoding = UnicodeEncoding::UTF8;
} else if (str == "UTF-16-BE") {
*encoding = UnicodeEncoding::UTF16BE;
} else if (str == "UTF-32-BE") {
*encoding = UnicodeEncoding::UTF32BE;
} else {
return errors::InvalidArgument(
strings::StrCat("Invalid encoding \"", str,
"\": Should be one of: UTF-8, UTF-16-BE, UTF-32-BE"));
}
return absl::OkStatus();
}
Status ParseCharUnit(const string& str, CharUnit* unit) {
if (str == "BYTE") {
*unit = CharUnit::BYTE;
} else if (str == "UTF8_CHAR") {
*unit = CharUnit::UTF8_CHAR;
} else {
return errors::InvalidArgument(strings::StrCat(
"Invalid unit \"", str, "\": Should be one of: BYTE, UTF8_CHAR"));
}
return absl::OkStatus();
}
int32 UTF8StrLen(const string& str) {
const int32_t byte_size = str.size();
const char* const end = str.data() + byte_size;
const char* ptr = str.data();
int32_t skipped_count = 0;
while (ptr < end) {
skipped_count += IsTrailByte(*ptr++) ? 1 : 0;
}
const int32_t result = byte_size - skipped_count;
return result;
}
} | #include "tensorflow/lite/string_util.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
TEST(StringUtil, TestStringUtil) {
Interpreter interpreter;
interpreter.AddTensors(3);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
TfLiteTensor* t1 = interpreter.tensor(1);
t1->type = kTfLiteString;
t1->allocation_type = kTfLiteDynamic;
union {
char raw_bytes[15];
struct {
int32_t num_strs;
int32_t offsets[2];
char str_data[3];
} tensor_data;
} data;
data.tensor_data = {1, {12, 15}, {'X', 'Y', 'Z'}};
TfLiteQuantization quant;
quant.type = kTfLiteNoQuantization;
quant.params = nullptr;
interpreter.SetTensorParametersReadOnly(
2, kTfLiteString, "", {1}, quant, data.raw_bytes, sizeof(data.raw_bytes));
TfLiteTensor* t2 = interpreter.tensor(2);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
char s0[] = "ABC";
string s1 = "DEFG";
char s2[] = "";
DynamicBuffer buf0;
ASSERT_EQ(buf0.AddString(s0, 3), kTfLiteOk);
DynamicBuffer buf1;
ASSERT_EQ(buf1.AddString(s1.data(), s1.length()), kTfLiteOk);
ASSERT_EQ(buf0.AddString(s2, 0), kTfLiteOk);
auto new_shape = TfLiteIntArrayCreate(2);
new_shape->data[0] = 2;
new_shape->data[1] = 1;
buf0.WriteToTensor(t0, new_shape);
buf1.WriteToTensorAsVector(t1);
EXPECT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 2);
EXPECT_EQ(t0->dims->data[1], 1);
EXPECT_EQ(t1->dims->size, 1);
EXPECT_EQ(t1->dims->data[0], 1);
ASSERT_EQ(GetStringCount(t0), 2);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "ABC");
str_ref = GetString(t0, 1);
ASSERT_EQ(string(str_ref.str, str_ref.len), "");
ASSERT_EQ(t0->bytes, 19);
ASSERT_EQ(GetStringCount(t1), 1);
str_ref = GetString(t1, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "DEFG");
ASSERT_EQ(t1->bytes, 16);
ASSERT_EQ(GetStringCount(t2), 1);
str_ref = GetString(t2, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "XYZ");
ASSERT_EQ(t2->bytes, 15);
}
TEST(StringUtil, AddStringOverflow32Length) {
const size_t max_size = 100;
DynamicBuffer buf{max_size};
std::string big_string(max_size + 1, 'A');
ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}),
kTfLiteError);
}
TEST(StringUtil, AddStringToFullBufferOverflow32Length) {
const size_t max_size = 100;
DynamicBuffer buf{max_size};
std::string big_string((max_size / 2) + 1, 'A');
ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}), kTfLiteOk);
EXPECT_EQ(buf.AddString({big_string.data(), big_string.length()}),
kTfLiteError);
}
TEST(StringUtil, TruncatesCharDataToLen) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
DynamicBuffer buf;
char fake_big[] = "ABCADASDA";
ASSERT_EQ(buf.AddString({fake_big, 3}), kTfLiteOk);
buf.WriteToTensorAsVector(t0);
StringRef added_string = GetString(t0, 0);
EXPECT_EQ(added_string.len, 3);
EXPECT_EQ(string(added_string.str, 3), "ABC");
}
TEST(StringUtil, TestAddJoinedStringCharSeparator) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
char s0[] = "";
char s1[] = "ABC";
char s2[] = "DEFG";
char s3[] = "";
char s4[] = "XYZ";
DynamicBuffer buf;
buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, ' ');
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 1);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), " ABC DEFG XYZ");
ASSERT_EQ(t0->bytes, 26);
}
TEST(StringUtil, TestAddJoinedStringStringRefSeparator) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
char s[] = " - ";
char s0[] = "";
char s1[] = "ABC";
char s2[] = "DEFG";
char s3[] = "";
char s4[] = "XYZ";
DynamicBuffer buf;
buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, {s, 3});
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 1);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), " - ABC - DEFG - - XYZ");
ASSERT_EQ(t0->bytes, 34);
}
TEST(StringUtil, TestEmptyList) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
DynamicBuffer buf;
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 0);
ASSERT_EQ(t0->bytes, 8);
}
TEST(StringUtil, TestShapes) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
t0->dims = TfLiteIntArrayCreate(2);
t0->dims->data[0] = 2;
t0->dims->data[1] = 1;
DynamicBuffer buf;
buf.AddString("ABC", 3);
buf.AddString("X", 1);
buf.WriteToTensor(t0, nullptr);
ASSERT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 2);
EXPECT_EQ(t0->dims->data[1], 1);
auto new_shape = TfLiteIntArrayCreate(2);
new_shape->data[0] = 1;
new_shape->data[1] = 2;
buf.WriteToTensor(t0, new_shape);
ASSERT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 1);
EXPECT_EQ(t0->dims->data[1], 2);
}
} |
111 | #ifndef TENSORFLOW_CORE_TFRT_KERNELS_IFRT_PROGRAM_OPS_H_
#define TENSORFLOW_CORE_TFRT_KERNELS_IFRT_PROGRAM_OPS_H_
#include <stdint.h>
#include <string>
#include <vector>
#include "absl/base/call_once.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
namespace tensorflow {
namespace tfrt_stub {
class IfrtCallOp : public tensorflow::OpKernel {
public:
explicit IfrtCallOp(tensorflow::OpKernelConstruction* ctx);
IfrtCallOp(const IfrtCallOp& other) = delete;
IfrtCallOp& operator=(const IfrtCallOp& other) = delete;
void Compute(tensorflow::OpKernelContext* ctx) override;
private:
int64_t program_id_;
std::vector<std::string> variable_names_;
std::vector<int> variable_arg_indices_;
absl::once_flag init_once_;
tensorflow::ifrt_serving::IfrtServingExecutable* executable_;
};
}
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
namespace tensorflow {
namespace tfrt_stub {
REGISTER_OP("IfrtCall")
.Input("args: Tin")
.Output("results: Tout")
.Attr("Tin: list(type) >= 0")
.Attr("Tout: list(type) >= 0")
.Attr("program_id: int")
.Attr("variable_arg_indices: list(int)")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
Calls an IFRT program identified by the given program id.
This op looks up a `ServingExecutable` from `ServingExecutableRegistry` using
the program id, calls the executable with the op's inputs as arguments, and
returns its results as the op's outputs.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
program_id: int64 id that can be used to look up compiled programs from
ServingExecutableRegistry`.
variable_arg_indices: must be in sorted ascending order. The argument at position
variable_arg_indices[k] in tpu program is already loaded as an ifrt array and
the input `args[variable_arg_indices[k]]` is the key to look for this loaded array.
)");
REGISTER_OP("IfrtLoadVariable")
.Input("variable: Tin")
.Output("array_key: Tout")
.Output("tensor: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.Attr("used_by_host: bool")
.SetIsStateful()
.SetShapeFn(tensorflow::shape_inference::UnknownShape)
.Doc(R"(
This op loads a restored variable tensor as a tensor future. It is areplacement of `tf.ReadVariableOp`.
This op returns a scalar string tensor containing the restored variable name, which
is composed from `container_name` and `shared_name` from a `var_handle` and can be
used as a key within the runtime, as well as a future for the tensor.
Note that this op is not part of a stable interface. Users must not use this op
in their SavedModel and instead rely on Ifrt Serving's mechanism that
automatically inserts this op with graph rewrite.
variable: the variable handle of the variable tensor to be loaded.
array_key: the key to be used to look up the loaded array by the 'IfrtCall' op.
tensor: the future of the loaded tensor. The future contains a valid tensor if `use_by_host` is true.
'used_by_host': a boolean indicating whether the variable is used by the host OP
or excelusively by the TPU.
)");
}
} | #include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using tensorflow::ifrt_serving::ServingExecutableRegistry;
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using tensorflow::ifrt_serving::test_utils::IfrtServingExecutableTestHelper;
using tensorflow::test::AsTensor;
using tensorflow::test::TensorEq;
using ::testing::Return;
class IfrtCallOpTest : public OpsTestBase {
protected:
Status Init(int64_t program_id, int num_inputs, DataType input_type,
const std::vector<int>& variable_arg_indices,
const std::vector<DataType>& output_type_list) {
TF_CHECK_OK(NodeDefBuilder("op", "IfrtCall")
.Input(FakeInput(num_inputs, input_type))
.Attr("program_id", program_id)
.Attr("variable_arg_indices", variable_arg_indices)
.Attr("Tout", output_type_list)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IfrtCallOpTest, Basic) {
int64_t program_id = 123;
TF_ASSERT_OK(Init(
program_id,
2,
DT_INT32,
{},
{DT_INT32}));
tsl::test_util::MockServingDeviceSelector selector;
IfrtServingExecutableTestHelper helper(&selector);
EXPECT_CALL(selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper.MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
TF_ASSERT_OK_AND_ASSIGN(
ServingExecutableRegistry::Handle handle,
ServingExecutableRegistry::Register(program_id, std::move(executable)));
auto handle_cleaner = gtl::MakeCleanup([&handle] { handle.Release(); });
AddInputFromArray<int32_t>(TensorShape({1, 3}), {1, 2, 3});
AddInputFromArray<int32_t>(TensorShape({3, 1}), {1, 2, 3});
for (int i = 0; i < helper.num_cores() + 1; ++i) {
TF_ASSERT_OK(RunOpKernel());
}
Tensor expected_out = AsTensor<int32_t>({14}, TensorShape({1, 1}));
EXPECT_THAT(*GetOutput(0), TensorEq(expected_out));
}
}
}
} |
112 | #ifndef XLA_TSL_FRAMEWORK_ALLOCATOR_H_
#define XLA_TSL_FRAMEWORK_ALLOCATOR_H_
#include <stdlib.h>
#include <functional>
#include <limits>
#include <optional>
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/framework/numeric_types.h"
#include "xla/tsl/framework/type_traits.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/numa.h"
#include "tsl/platform/types.h"
namespace tsl {
struct AllocationAttributes {
AllocationAttributes() = default;
AllocationAttributes(bool retry_on_failure, bool allocation_will_be_logged,
std::function<uint64()>* freed_by_func)
: retry_on_failure(retry_on_failure),
allocation_will_be_logged(allocation_will_be_logged),
freed_by_func(freed_by_func) {}
bool retry_on_failure = true;
bool allocation_will_be_logged = false;
std::function<uint64()>* freed_by_func = nullptr;
AllocationAttributes(const AllocationAttributes&) = delete;
void operator=(const AllocationAttributes&) = delete;
};
struct AllocatorStats {
int64_t num_allocs;
int64_t bytes_in_use;
int64_t peak_bytes_in_use;
int64_t largest_alloc_size;
std::optional<int64_t> bytes_limit;
int64_t bytes_reserved;
int64_t peak_bytes_reserved;
std::optional<int64_t> bytes_reservable_limit;
int64_t largest_free_block_bytes;
std::optional<int64_t> pool_bytes;
std::optional<int64_t> peak_pool_bytes;
AllocatorStats()
: num_allocs(0),
bytes_in_use(0),
peak_bytes_in_use(0),
largest_alloc_size(0),
bytes_reserved(0),
peak_bytes_reserved(0),
largest_free_block_bytes(0) {}
std::string DebugString() const;
};
enum class AllocatorMemoryType {
kUnknown = 0,
kDevice = 1,
kHostPageable = 2,
kHostPinned = 3,
};
class Allocator {
public:
static constexpr size_t kAllocatorAlignment = 64;
virtual ~Allocator();
virtual std::string Name() = 0;
virtual void* AllocateRaw(size_t alignment, size_t num_bytes) = 0;
virtual void* AllocateRaw(size_t alignment, size_t num_bytes,
const AllocationAttributes& allocation_attr) {
return AllocateRaw(alignment, num_bytes);
}
virtual void DeallocateRaw(void* ptr) = 0;
virtual bool TracksAllocationSizes() const { return false; }
virtual bool AllocatesOpaqueHandle() const { return false; }
virtual size_t RequestedSize(const void* ptr) const {
CHECK(false) << "allocator doesn't track sizes";
return size_t(0);
}
virtual size_t AllocatedSize(const void* ptr) const {
return RequestedSize(ptr);
}
virtual int64_t AllocationId(const void* ptr) const { return 0; }
virtual size_t AllocatedSizeSlow(const void* ptr) const {
if (TracksAllocationSizes()) {
return AllocatedSize(ptr);
}
return 0;
}
virtual absl::optional<AllocatorStats> GetStats() { return absl::nullopt; }
virtual bool ClearStats() TF_MUST_USE_RESULT { return false; }
virtual void SetSafeFrontier(uint64 count) {}
virtual void SetStreamAndPreallocateMemory(void* stream) {}
virtual AllocatorMemoryType GetMemoryType() const {
return AllocatorMemoryType::kUnknown;
}
};
class AllocatorWrapper : public Allocator {
public:
explicit AllocatorWrapper(Allocator* wrapped) : wrapped_(wrapped) {}
~AllocatorWrapper() override {}
Allocator* wrapped() const { return wrapped_; }
std::string Name() override { return wrapped_->Name(); }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return wrapped_->AllocateRaw(alignment, num_bytes);
}
void* AllocateRaw(size_t alignment, size_t num_bytes,
const AllocationAttributes& allocation_attr) override {
return wrapped_->AllocateRaw(alignment, num_bytes, allocation_attr);
}
void DeallocateRaw(void* ptr) override { wrapped_->DeallocateRaw(ptr); }
bool TracksAllocationSizes() const override {
return wrapped_->TracksAllocationSizes();
}
bool AllocatesOpaqueHandle() const override {
return wrapped_->AllocatesOpaqueHandle();
}
size_t RequestedSize(const void* ptr) const override {
return wrapped_->RequestedSize(ptr);
}
size_t AllocatedSize(const void* ptr) const override {
return wrapped_->AllocatedSize(ptr);
}
int64_t AllocationId(const void* ptr) const override {
return wrapped_->AllocationId(ptr);
}
size_t AllocatedSizeSlow(const void* ptr) const override {
return wrapped_->AllocatedSizeSlow(ptr);
}
AllocatorMemoryType GetMemoryType() const override {
return wrapped_->GetMemoryType();
}
private:
Allocator* const wrapped_;
};
struct AllocatorAttributes {
void set_on_host(bool v) { value |= (static_cast<int>(v)); }
bool on_host() const { return value & 0x1; }
void set_nic_compatible(bool v) { value |= (static_cast<int>(v) << 1); }
bool nic_compatible() const { return value & (0x1 << 1); }
void set_gpu_compatible(bool v) { value |= (static_cast<int>(v) << 2); }
bool gpu_compatible() const { return value & (0x1 << 2); }
void set_use_pjrt_allocator(bool v) { value |= (static_cast<int>(v) << 3); }
bool use_pjrt_allocator() const { return value & (0x1 << 3); }
void Merge(AllocatorAttributes other) {
value |= other.value;
if (scope_id != other.scope_id) {
CHECK(scope_id == 0 || other.scope_id == 0)
<< "At least one scope_id should be zero to merge "
"AllocatorAttributes but found this.scope_id="
<< scope_id << " and other.scope_id=" << other.scope_id;
scope_id = scope_id == 0 ? other.scope_id : scope_id;
}
}
bool IsEqualOrLessRestrictiveThan(const AllocatorAttributes& other) const {
return (value | other.value) == other.value;
}
uint32 value = 0;
int32 scope_id = 0;
std::string DebugString() const;
};
Allocator* cpu_allocator_base();
Allocator* cpu_allocator(int numa_node = port::kNUMANoAffinity);
void EnableCPUAllocatorStats();
void DisableCPUAllocatorStats();
bool CPUAllocatorStatsEnabled();
void EnableCPUAllocatorFullStats();
bool CPUAllocatorFullStatsEnabled();
class SubAllocator {
public:
typedef std::function<void(void*, int index, size_t)> Visitor;
SubAllocator(const std::vector<Visitor>& alloc_visitors,
const std::vector<Visitor>& free_visitors);
virtual ~SubAllocator() {}
virtual void* Alloc(size_t alignment, size_t num_bytes,
size_t* bytes_received) = 0;
virtual void Free(void* ptr, size_t num_bytes) = 0;
virtual bool SupportsCoalescing() const = 0;
virtual AllocatorMemoryType GetMemoryType() const {
return AllocatorMemoryType::kUnknown;
}
protected:
void VisitAlloc(void* ptr, int index, size_t num_bytes);
void VisitFree(void* ptr, int index, size_t num_bytes);
const std::vector<Visitor> alloc_visitors_;
const std::vector<Visitor> free_visitors_;
};
}
#endif
#include "xla/tsl/framework/allocator.h"
#include <atomic>
#include "xla/tsl/framework/allocator_registry.h"
#include "xla/tsl/framework/tracking_allocator.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/platform/types.h"
namespace tsl {
string AllocatorStats::DebugString() const {
return strings::Printf(
"Limit: %20lld\n"
"InUse: %20lld\n"
"MaxInUse: %20lld\n"
"NumAllocs: %20lld\n"
"MaxAllocSize: %20lld\n"
"Reserved: %20lld\n"
"PeakReserved: %20lld\n"
"LargestFreeBlock: %20lld\n",
static_cast<long long>(this->bytes_limit ? *this->bytes_limit : 0),
static_cast<long long>(this->bytes_in_use),
static_cast<long long>(this->peak_bytes_in_use),
static_cast<long long>(this->num_allocs),
static_cast<long long>(this->largest_alloc_size),
static_cast<long long>(this->bytes_reserved),
static_cast<long long>(this->peak_bytes_reserved),
static_cast<long long>(this->largest_free_block_bytes));
}
constexpr size_t Allocator::kAllocatorAlignment;
Allocator::~Allocator() {}
static bool cpu_allocator_collect_full_stats = false;
void EnableCPUAllocatorFullStats() { cpu_allocator_collect_full_stats = true; }
bool CPUAllocatorFullStatsEnabled() { return cpu_allocator_collect_full_stats; }
string AllocatorAttributes::DebugString() const {
return strings::StrCat("AllocatorAttributes(on_host=", on_host(),
" nic_compatible=", nic_compatible(),
" gpu_compatible=", gpu_compatible(), ")");
}
Allocator* cpu_allocator_base() {
static Allocator* cpu_alloc =
AllocatorFactoryRegistry::singleton()->GetAllocator();
if (cpu_allocator_collect_full_stats && !cpu_alloc->TracksAllocationSizes()) {
cpu_alloc = new TrackingAllocator(cpu_alloc, true);
}
return cpu_alloc;
}
Allocator* cpu_allocator(int numa_node) {
static ProcessStateInterface* ps =
AllocatorFactoryRegistry::singleton()->process_state();
if (ps) {
return ps->GetCPUAllocator(numa_node);
} else {
return cpu_allocator_base();
}
}
SubAllocator::SubAllocator(const std::vector<Visitor>& alloc_visitors,
const std::vector<Visitor>& free_visitors)
: alloc_visitors_(alloc_visitors), free_visitors_(free_visitors) {}
void SubAllocator::VisitAlloc(void* ptr, int index, size_t num_bytes) {
for (const auto& v : alloc_visitors_) {
v(ptr, index, num_bytes);
}
}
void SubAllocator::VisitFree(void* ptr, int index, size_t num_bytes) {
for (int i = free_visitors_.size() - 1; i >= 0; --i) {
free_visitors_[i](ptr, index, num_bytes);
}
}
} | #include "tensorflow/core/framework/allocator.h"
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/typed_allocator.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/profiler/lib/profiler_session.h"
#include "tensorflow/core/profiler/protobuf/memory_profile.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
#include "tsl/profiler/utils/xplane_utils.h"
namespace tensorflow {
static void CheckStats(Allocator* a, int64_t num_allocs, int64_t bytes_in_use,
int64_t peak_bytes_in_use, int64_t largest_alloc_size) {
absl::optional<AllocatorStats> stats = a->GetStats();
EXPECT_TRUE(stats);
if (!stats) {
return;
}
LOG(INFO) << "Alloc stats: \n" << stats->DebugString();
#if defined(PLATFORM_GOOGLE) && defined(NDEBUG)
static const int64 kSlop = 5 * 1024;
EXPECT_GT(stats->bytes_in_use, bytes_in_use - kSlop);
EXPECT_LT(stats->bytes_in_use, bytes_in_use + kSlop);
EXPECT_GT(stats->peak_bytes_in_use, peak_bytes_in_use - kSlop);
EXPECT_LT(stats->peak_bytes_in_use, peak_bytes_in_use + kSlop);
EXPECT_EQ(stats->num_allocs, num_allocs);
EXPECT_EQ(stats->largest_alloc_size, largest_alloc_size);
#endif
}
TEST(AllocatorAttributesTest, AllCombos) {
for (bool on_host : {false, true}) {
for (bool nic_compatible : {false, true}) {
for (bool gpu_compatible : {false, true}) {
AllocatorAttributes aa;
aa.set_on_host(on_host);
aa.set_nic_compatible(nic_compatible);
aa.set_gpu_compatible(gpu_compatible);
EXPECT_EQ(on_host, aa.on_host());
EXPECT_EQ(nic_compatible, aa.nic_compatible());
EXPECT_EQ(gpu_compatible, aa.gpu_compatible());
}
}
}
}
TEST(AllocatorAttributesTest, IsEqualOrLessRestrictiveThan) {
AllocatorAttributes a, b;
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(b));
b.set_gpu_compatible(true);
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_FALSE(b.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(a.IsEqualOrLessRestrictiveThan(a));
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(b));
a.set_nic_compatible(true);
EXPECT_FALSE(a.IsEqualOrLessRestrictiveThan(b));
EXPECT_FALSE(b.IsEqualOrLessRestrictiveThan(a));
a.set_gpu_compatible(true);
EXPECT_TRUE(b.IsEqualOrLessRestrictiveThan(a));
EXPECT_FALSE(a.IsEqualOrLessRestrictiveThan(b));
}
TEST(AllocatorAttributesTest, Merge) {
AllocatorAttributes a, b;
EXPECT_EQ(a.value, 0);
EXPECT_EQ(b.value, 0);
EXPECT_FALSE(a.nic_compatible());
EXPECT_FALSE(b.nic_compatible());
b.set_nic_compatible(true);
a.Merge(b);
EXPECT_TRUE(a.nic_compatible());
EXPECT_TRUE(b.nic_compatible());
EXPECT_EQ(a.scope_id, 0);
EXPECT_EQ(b.scope_id, 0);
a.scope_id = 1;
a.Merge(b);
EXPECT_EQ(a.scope_id, 1);
EXPECT_EQ(b.scope_id, 0);
a.scope_id = 1;
b.scope_id = 0;
b.Merge(a);
EXPECT_EQ(a.scope_id, 1);
EXPECT_EQ(b.scope_id, 1);
a.scope_id = 2;
b.scope_id = 2;
a.Merge(b);
EXPECT_EQ(a.scope_id, 2);
EXPECT_EQ(b.scope_id, 2);
}
TEST(AllocatorAttributesDeathTest, MergeDifferentScopeIds) {
AllocatorAttributes a, b;
a.scope_id = 3;
b.scope_id = 4;
EXPECT_DEATH({ a.Merge(b); }, "");
}
TEST(CPUAllocatorTest, Simple) {
EnableCPUAllocatorStats();
Allocator* a = cpu_allocator();
std::vector<void*> ptrs;
for (int s = 1; s < 1024; s++) {
void* raw = a->AllocateRaw(1, s);
ptrs.push_back(raw);
}
std::sort(ptrs.begin(), ptrs.end());
CheckStats(a, 1023, 552640, 552640, 1024);
for (size_t i = 0; i < ptrs.size(); i++) {
if (i > 0) {
CHECK_NE(ptrs[i], ptrs[i - 1]);
}
a->DeallocateRaw(ptrs[i]);
}
CheckStats(a, 1023, 0, 552640, 1024);
float* t1 = TypedAllocator::Allocate<float>(a, 1024, {});
double* t2 = TypedAllocator::Allocate<double>(a, 1048576, {});
CheckStats(a, 1025, 1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double));
TypedAllocator::Deallocate(a, t1, 1024);
TypedAllocator::Deallocate(a, t2, 1048576);
CheckStats(a, 1025, 0, 1048576 * sizeof(double) + 1024 * sizeof(float),
1048576 * sizeof(double));
CHECK(a->ClearStats());
CheckStats(a, 0, 0, 0, 0);
DisableCPUAllocatorStats();
}
struct TestStruct {
int x;
};
TEST(CPUAllocatorTest, CheckStructSize) { CHECK_GT(sizeof(TestStruct), 1); }
TEST(CPUAllocatorTest, AllocateOverflowMaxSizeT) {
Allocator* a = cpu_allocator();
size_t count_to_allocate = std::numeric_limits<size_t>::max();
TestStruct* const test_pointer =
TypedAllocator::Allocate<TestStruct>(a, count_to_allocate, {});
CHECK_EQ(test_pointer, reinterpret_cast<TestStruct*>(NULL));
}
TEST(CPUAllocatorTest, AllocateOverflowSmallest) {
Allocator* a = cpu_allocator();
const size_t count_to_allocate =
(std::numeric_limits<size_t>::max() / sizeof(TestStruct)) + 1;
TestStruct* const test_pointer =
TypedAllocator::Allocate<TestStruct>(a, count_to_allocate, {});
CHECK_EQ(test_pointer, reinterpret_cast<TestStruct*>(NULL));
}
TEST(CPUAllocatorTest, Sizes) {
Allocator* a = cpu_allocator();
EXPECT_EQ(false, a->TracksAllocationSizes());
}
TEST(CPUAllocatorTest, ProfilerReporting) {
void* p = port::AlignedMalloc(8, 1);
const std::size_t alloc_size = port::MallocExtension_GetAllocatedSize(p);
port::AlignedFree(p);
if (alloc_size == 0) {
LOG(WARNING) << "Skipping Memory Debugging test. It requires "
<< "port::MallocExtension_GetAllocatedSize to work.";
return;
}
EnableCPUAllocatorStats();
Allocator* a = cpu_allocator();
void* p1 = a->AllocateRaw(1, 16);
std::unique_ptr<ProfilerSession> profiler =
tensorflow::ProfilerSession::Create(
tensorflow::ProfilerSession::DefaultOptions());
void* p2 = a->AllocateRaw(1, 32);
a->DeallocateRaw(p1);
tensorflow::profiler::XSpace xspace;
EXPECT_EQ(OkStatus(), profiler->CollectData(&xspace));
const auto plane = ::tsl::profiler::FindPlaneWithName(
xspace, ::tensorflow::profiler::kHostThreadsPlaneName);
::tensorflow::profiler::XPlaneVisitor xplane(plane);
ASSERT_EQ(plane->name(), ::tensorflow::profiler::kHostThreadsPlaneName)
<< "XSpace: " << xspace.DebugString();
ASSERT_EQ(plane->event_metadata_size(), 2)
<< "XSpace: " << xspace.DebugString();
const auto& line = plane->lines(0);
ASSERT_EQ(line.events_size(), 2) << "XSpace: " << xspace.DebugString();
const auto& events = line.events();
::tensorflow::profiler::XEventVisitor e0(&xplane, &line, &events[0]);
EXPECT_EQ(e0.Name(), "MemoryAllocation")
<< "XSpace: " << xspace.DebugString();
{
absl::optional<std::string> bytes_allocated, peak_bytes_in_use,
requested_bytes, allocation_bytes;
e0.ForEachStat([&](const ::tensorflow::profiler::XStatVisitor& stat) {
LOG(ERROR) << "STAT " << stat.Name() << ": " << stat.ToString();
if (stat.Name() == "bytes_allocated") {
bytes_allocated = stat.ToString();
} else if (stat.Name() == "peak_bytes_in_use") {
peak_bytes_in_use = stat.ToString();
} else if (stat.Name() == "requested_bytes") {
requested_bytes = stat.ToString();
} else if (stat.Name() == "allocation_bytes") {
allocation_bytes = stat.ToString();
}
});
ASSERT_TRUE(bytes_allocated && peak_bytes_in_use && requested_bytes &&
allocation_bytes)
<< "XSpace: " << xspace.DebugString();
EXPECT_EQ(*bytes_allocated, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*peak_bytes_in_use, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*requested_bytes, "32") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*allocation_bytes, "32") << "XSpace: " << xspace.DebugString();
}
::tensorflow::profiler::XEventVisitor e1(&xplane, &line, &events[1]);
EXPECT_EQ(e1.Name(), "MemoryDeallocation")
<< "XSpace: " << xspace.DebugString();
{
absl::optional<std::string> bytes_allocated, peak_bytes_in_use,
allocation_bytes;
e1.ForEachStat([&](const ::tensorflow::profiler::XStatVisitor& stat) {
if (stat.Name() == "bytes_allocated") {
bytes_allocated = stat.ToString();
} else if (stat.Name() == "peak_bytes_in_use") {
peak_bytes_in_use = stat.ToString();
} else if (stat.Name() == "allocation_bytes") {
allocation_bytes = stat.ToString();
}
});
ASSERT_TRUE(bytes_allocated && peak_bytes_in_use && allocation_bytes)
<< "XSpace: " << xspace.DebugString();
EXPECT_EQ(*bytes_allocated, "32") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*peak_bytes_in_use, "48") << "XSpace: " << xspace.DebugString();
EXPECT_EQ(*allocation_bytes, "16") << "XSpace: " << xspace.DebugString();
}
a->DeallocateRaw(p2);
DisableCPUAllocatorStats();
}
namespace {
AllocatorAttributes DeviceAllocatorAttribute() {
AllocatorAttributes attr;
attr.value |= (0x1 << 24);
return attr;
}
bool HasDeviceAllocatorAttribute(const AllocatorAttributes& attr) {
return attr.value & (0x1 << 24);
}
}
TEST(CustomAllocatorAttributes, TestSetterAndGetter) {
AllocatorAttributes attr = DeviceAllocatorAttribute();
EXPECT_TRUE(HasDeviceAllocatorAttribute(attr));
EXPECT_FALSE(HasDeviceAllocatorAttribute(AllocatorAttributes()));
}
static void BM_Allocation(::testing::benchmark::State& state) {
const int arg = state.range(0);
Allocator* a = cpu_allocator();
std::vector<int> sizes = {256, 4096, 16384, 524288, 512, 1048576};
int size_index = 0;
if (arg) EnableCPUAllocatorStats();
for (auto s : state) {
int bytes = sizes[size_index++ % sizes.size()];
void* p = a->AllocateRaw(1, bytes);
a->DeallocateRaw(p);
}
if (arg) DisableCPUAllocatorStats();
}
BENCHMARK(BM_Allocation)->Arg(0)->Arg(1);
} |
113 | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_ADD_NEW_DIMS_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_ADD_NEW_DIMS_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/string_like.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyAddNewDims(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only);
struct AddNewDimsOp {
static constexpr bool selected_dimensions_are_new = true;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return RankConstraint::Add(input_rank, num_input_dims);
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyAddNewDims(std::move(transform), dimensions, domain_only);
}
};
}
}
#endif
#include "tensorstore/index_space/internal/add_new_dims_op.h"
#include <cassert>
#include <utility>
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
void AddNewDims(TransformRep* original, TransformRep* result,
DimensionIndexBuffer* dimensions, bool domain_only) {
const DimensionIndex orig_input_rank = original->input_rank;
const DimensionIndex new_input_rank = orig_input_rank + dimensions->size();
assert(result->input_rank_capacity >= new_input_rank);
const DimensionIndex output_rank = domain_only ? 0 : original->output_rank;
assert(result->output_rank_capacity >= output_rank);
DimensionSet newly_added_input_dims;
for (DimensionIndex new_input_dim : *dimensions) {
newly_added_input_dims[new_input_dim] = true;
}
DimensionIndex orig_to_new_input_dim[kMaxRank];
for (DimensionIndex new_input_dim = 0, orig_input_dim = 0;
new_input_dim < new_input_rank; ++new_input_dim) {
if (newly_added_input_dims[new_input_dim]) continue;
orig_to_new_input_dim[orig_input_dim] = new_input_dim;
++orig_input_dim;
}
span<const OutputIndexMap> orig_maps =
original->output_index_maps().first(output_rank);
span<OutputIndexMap> result_maps =
result->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& orig_map = orig_maps[output_dim];
auto& result_map = result_maps[output_dim];
result_map.stride() = orig_map.stride();
result_map.offset() = orig_map.offset();
switch (orig_map.method()) {
case OutputIndexMethod::constant:
result_map.SetConstant();
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex orig_input_dim = orig_map.input_dimension();
assert(orig_input_dim >= 0 && orig_input_dim < orig_input_rank);
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
result_map.SetSingleInputDimension(new_input_dim);
break;
}
case OutputIndexMethod::array: {
auto& result_index_array = result_map.SetArrayIndexing(new_input_rank);
const auto& orig_index_array = orig_map.index_array_data();
for (DimensionIndex orig_input_dim = orig_input_rank - 1;
orig_input_dim >= 0; --orig_input_dim) {
const DimensionIndex new_input_dim =
orig_to_new_input_dim[orig_input_dim];
assert(new_input_dim >= orig_input_dim);
result_index_array.byte_strides[new_input_dim] =
orig_index_array.byte_strides[orig_input_dim];
}
for (const DimensionIndex new_input_dim : *dimensions) {
result_index_array.byte_strides[new_input_dim] = 0;
}
result_index_array.index_range = orig_index_array.index_range;
result_index_array.element_pointer = orig_index_array.element_pointer;
break;
}
}
}
for (DimensionIndex orig_input_dim = orig_input_rank - 1; orig_input_dim >= 0;
--orig_input_dim) {
const DimensionIndex new_input_dim = orig_to_new_input_dim[orig_input_dim];
result->input_dimension(new_input_dim) =
original->input_dimension(orig_input_dim);
}
for (DimensionIndex new_input_dim : *dimensions) {
const auto d = result->input_dimension(new_input_dim);
d.domain() = IndexInterval::UncheckedSized(-kInfIndex, kInfSize);
d.implicit_lower_bound() = true;
d.implicit_upper_bound() = true;
d.SetEmptyLabel();
}
result->input_rank = new_input_rank;
result->output_rank = output_rank;
}
}
Result<IndexTransform<>> ApplyAddNewDims(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
const DimensionIndex new_input_rank =
transform.input_rank() + dimensions->size();
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(new_input_rank));
auto new_rep =
NewOrMutableRep(TransformAccess::rep(transform), new_input_rank,
transform.output_rank(), domain_only);
AddNewDims(TransformAccess::rep(transform), new_rep.get(), dimensions,
domain_only);
internal_index_space::DebugCheckInvariants(new_rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(new_rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionError;
TEST(AddNewTest, Example) {
const auto expected_new_transform =
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"", "x", ""})
.output_single_input_dimension(0, 1)
.Finalize()
.value();
TestDimExpression(
IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.Finalize()
.value(),
Dims(0, -1).AddNew(),
{0, 2},
expected_new_transform,
expected_new_transform,
{
{{2}, {1, 2, 8}},
{{2}, {5, 2, 9}},
},
false);
}
TEST(AddNewTest, Simple) {
TestDimExpression(
IndexTransformBuilder<2, 3>()
.input_origin({2, 3})
.input_shape({3, 4})
.output_single_input_dimension(0, 1, 3, 1)
.output_single_input_dimension(1, 2, 4, 0)
.output_index_array(2, 3, 5,
MakeArray<Index>({{1, 2, 3, 4}}),
IndexInterval::Closed(-1, 10))
.Finalize()
.value(),
Dims(0, -1).AddNew(),
{0, 3},
IndexTransformBuilder<4, 2>()
.input_origin({-kInfIndex, 2, 3, -kInfIndex})
.input_shape({kInfSize, 3, 4, kInfSize})
.implicit_lower_bounds({1, 0, 0, 1})
.implicit_upper_bounds({1, 0, 0, 1})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.Finalize()
.value(),
IndexTransformBuilder<4, 3>()
.input_origin({-kInfIndex, 2, 3, -kInfIndex})
.input_shape({kInfSize, 3, 4, kInfSize})
.implicit_lower_bounds({1, 0, 0, 1})
.implicit_upper_bounds({1, 0, 0, 1})
.output_single_input_dimension(0, 1, 3, 2)
.output_single_input_dimension(1, 2, 4, 1)
.output_index_array(
2, 3, 5, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-1, 10))
.Finalize()
.value(),
{
{{3, 4}, {100, 3, 4, 500}},
{{3, 4}, {-100, 3, 4, -500}},
},
false);
}
TEST(AddNewTest, Constant) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.output_constant(0, 1)
.Finalize()
.value(),
Dims(0).AddNew(),
{0},
IndexTransformBuilder<2, 1>()
.input_origin({-kInfIndex, 1})
.input_shape({kInfSize, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 0})
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<2, 1>()
.input_origin({-kInfIndex, 1})
.input_shape({kInfSize, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 0})
.output_constant(0, 1)
.Finalize()
.value(),
{
{{1}, {-100, 1}},
{{1}, {100, 1}},
},
false);
}
TEST(AddNewTest, Labeled) {
TestDimExpression(IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"a"})
.output_constant(0, 1)
.Finalize()
.value(),
Dims(-1, 0).AddNew().Label("x", "y"),
{2, 0},
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"y", "a", "x"})
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<3, 1>()
.input_origin({-kInfIndex, 1, -kInfIndex})
.input_shape({kInfSize, 5, kInfSize})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"y", "a", "x"})
.output_constant(0, 1)
.Finalize()
.value(),
{
{{2}, {1, 2, 8}},
{{2}, {5, 2, 9}},
},
false);
}
TEST(AddNewTest, EmptyDimensionSelection) {
const auto transform = IndexTransformBuilder<1, 1>()
.input_origin({1})
.input_shape({5})
.input_labels({"x"})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
TestDimExpression(
transform,
Dims().AddNew(),
{},
transform,
transform,
{
{{2}, {2}},
{{3}, {3}},
},
true);
}
TEST(AddNewTest, InvalidRank) {
TestDimExpressionError(tensorstore::IdentityTransform(31),
Dims(0, 1).AddNew(),
absl::StatusCode::kInvalidArgument,
".*Rank 33 is outside valid range \\[0, 32\\]");
}
} |
114 | #ifndef TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CPU_UTILS_H_
#define TENSORFLOW_TSL_PLATFORM_PROFILE_UTILS_CPU_UTILS_H_
#include <chrono>
#include <memory>
#include "tsl/platform/macros.h"
#include "tsl/platform/profile_utils/i_cpu_utils_helper.h"
#include "tsl/platform/types.h"
#if defined(ARMV6) || defined(__ARM_ARCH_7A__)
#include <sys/time.h>
#endif
#if defined(_WIN32)
#include <intrin.h>
#endif
namespace tsl {
namespace profile_utils {
class CpuUtils {
public:
static constexpr int64_t INVALID_FREQUENCY = -1;
static constexpr uint64 DUMMY_CYCLE_CLOCK = 1;
static inline uint64 GetCurrentClockCycle() {
#if defined(__ANDROID__)
return GetCpuUtilsHelperSingletonInstance().GetCurrentClockCycle();
#elif defined(_WIN32)
return __rdtsc();
#elif defined(__x86_64__) || defined(__amd64__)
uint64_t high, low;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
#elif defined(__aarch64__)
uint64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
#elif defined(ARMV6) || defined(__ARM_ARCH_7A__)
uint32_t pmccntr;
uint32_t pmuseren;
uint32_t pmcntenset;
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1) {
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul) {
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
return static_cast<uint64>(pmccntr) * 64;
}
}
return DUMMY_CYCLE_CLOCK;
#elif defined(__powerpc64__) || defined(__ppc64__)
uint64 __t;
__asm__ __volatile__("mfspr %0,268" : "=r"(__t));
return __t;
#elif defined(__powerpc__) || defined(__ppc__)
uint64 upper, lower, tmp;
__asm__ volatile(
"0: \n"
"\tmftbu %0 \n"
"\tmftb %1 \n"
"\tmftbu %2 \n"
"\tcmpw %2,%0 \n"
"\tbne 0b \n"
: "=r"(upper), "=r"(lower), "=r"(tmp));
return ((static_cast<uint64>(upper) << 32) | lower);
#elif defined(__s390x__)
uint64 t;
__asm__ __volatile__("stckf %0" : "=Q"(t));
return t;
#else
return DUMMY_CYCLE_CLOCK;
#endif
}
#if (defined(__powerpc__) || \
defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
(defined(__s390x__))
static uint64 GetCycleCounterFrequency();
#else
static int64_t GetCycleCounterFrequency();
#endif
static double GetMicroSecPerClock();
static void ResetClockCycle();
static void EnableClockCycleProfiling();
static void DisableClockCycleProfiling();
static std::chrono::duration<double> ConvertClockCycleToTime(
const int64_t clock_cycle);
private:
class DefaultCpuUtilsHelper : public ICpuUtilsHelper {
public:
DefaultCpuUtilsHelper() = default;
void ResetClockCycle() final {}
uint64 GetCurrentClockCycle() final { return DUMMY_CYCLE_CLOCK; }
void EnableClockCycleProfiling() final {}
void DisableClockCycleProfiling() final {}
int64_t CalculateCpuFrequency() final { return INVALID_FREQUENCY; }
private:
DefaultCpuUtilsHelper(const DefaultCpuUtilsHelper&) = delete;
void operator=(const DefaultCpuUtilsHelper&) = delete;
};
static int64_t GetCycleCounterFrequencyImpl();
static ICpuUtilsHelper& GetCpuUtilsHelperSingletonInstance();
CpuUtils(const CpuUtils&) = delete;
void operator=(const CpuUtils&) = delete;
};
}
}
#endif
#include "tsl/platform/profile_utils/cpu_utils.h"
#include <fstream>
#include <limits>
#include <mutex>
#if defined(_WIN32)
#include <windows.h>
#endif
#if defined(__APPLE__)
#include <sys/sysctl.h>
#endif
#include "absl/base/call_once.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/profile_utils/android_armv7a_cpu_utils_helper.h"
namespace tsl {
namespace profile_utils {
constexpr int64_t CpuUtils::INVALID_FREQUENCY;
static ICpuUtilsHelper* cpu_utils_helper_instance_ = nullptr;
#if (defined(__powerpc__) || \
defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
(defined(__s390x__))
uint64 CpuUtils::GetCycleCounterFrequency() {
static const uint64 cpu_frequency = GetCycleCounterFrequencyImpl();
return cpu_frequency;
}
#else
int64_t CpuUtils::GetCycleCounterFrequency() {
static const int64_t cpu_frequency = GetCycleCounterFrequencyImpl();
return cpu_frequency;
}
#endif
double CpuUtils::GetMicroSecPerClock() {
static const double micro_sec_per_clock =
(1000.0 * 1000.0) / static_cast<double>(GetCycleCounterFrequency());
return micro_sec_per_clock;
}
void CpuUtils::ResetClockCycle() {
GetCpuUtilsHelperSingletonInstance().ResetClockCycle();
}
void CpuUtils::EnableClockCycleProfiling() {
GetCpuUtilsHelperSingletonInstance().EnableClockCycleProfiling();
}
void CpuUtils::DisableClockCycleProfiling() {
GetCpuUtilsHelperSingletonInstance().DisableClockCycleProfiling();
}
std::chrono::duration<double> CpuUtils::ConvertClockCycleToTime(
const int64_t clock_cycle) {
return std::chrono::duration<double>(static_cast<double>(clock_cycle) /
GetCycleCounterFrequency());
}
int64_t CpuUtils::GetCycleCounterFrequencyImpl() {
#if defined(__ANDROID__)
return GetCpuUtilsHelperSingletonInstance().CalculateCpuFrequency();
#elif defined(__linux__)
std::ifstream cpuinfo("/proc/cpuinfo");
if (!cpuinfo) {
LOG(WARNING) << "Failed to open /proc/cpuinfo";
return INVALID_FREQUENCY;
}
string line;
while (std::getline(cpuinfo, line)) {
double cpu_freq = 0.0;
int retval = 0;
double freq_factor = 2.0;
#if (defined(__powerpc__) || \
defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__))
retval = sscanf(line.c_str(), "clock : %lfMHz", &cpu_freq);
freq_factor = 1.0;
#elif defined(__s390x__)
retval = sscanf(line.c_str(), "bogomips per cpu: %lf", &cpu_freq);
#elif defined(__aarch64__)
retval = sscanf(line.c_str(), "BogoMIPS : %lf", &cpu_freq);
#else
retval = sscanf(line.c_str(), "bogomips : %lf", &cpu_freq);
#endif
if (retval > 0) {
const double freq_ghz = cpu_freq / 1000.0 / freq_factor;
if (retval != 1 || freq_ghz < 0.01) {
LOG(WARNING) << "Failed to get CPU frequency: " << freq_ghz << " GHz";
return INVALID_FREQUENCY;
}
const int64_t freq_n =
static_cast<int64_t>(freq_ghz * 1000.0 * 1000.0 * 1000.0);
VLOG(1) << "CPU Frequency: " << freq_n << " Hz";
return freq_n;
}
}
LOG(WARNING)
<< "Failed to find bogomips or clock in /proc/cpuinfo; cannot determine "
"CPU frequency";
return INVALID_FREQUENCY;
#elif defined(__APPLE__)
int64_t freq_hz = 0;
size_t freq_hz_size = sizeof(freq_hz);
int retval =
sysctlbyname("hw.cpufrequency_max", &freq_hz, &freq_hz_size, NULL, 0);
if (retval != 0 || freq_hz < 1e6) {
int64_t tbfrequency = 0;
size_t tbfrequency_size = sizeof(tbfrequency);
retval = sysctlbyname("hw.tbfrequency", &tbfrequency, &tbfrequency_size,
NULL, 0);
if (retval == 0) {
clockinfo clock_info;
size_t clock_info_size = sizeof(clock_info);
retval = sysctlbyname("kern.clockrate", &clock_info, &clock_info_size,
NULL, 0);
if (retval == 0) {
freq_hz = clock_info.hz * tbfrequency;
}
}
if (retval != 0 || freq_hz < 1e6) {
LOG(WARNING) << "Failed to get CPU frequency: " << freq_hz << " Hz";
return INVALID_FREQUENCY;
}
}
return freq_hz;
#elif defined(_WIN32)
LARGE_INTEGER freq;
QueryPerformanceFrequency(&freq);
return freq.QuadPart;
#else
return INVALID_FREQUENCY;
#endif
}
ICpuUtilsHelper& CpuUtils::GetCpuUtilsHelperSingletonInstance() {
static absl::once_flag flag;
absl::call_once(flag, []() {
if (cpu_utils_helper_instance_ != nullptr) {
LOG(FATAL) << "cpu_utils_helper_instance_ is already instantiated.";
}
#if defined(__ANDROID__) && (__ANDROID_API__ >= 21) && \
(defined(__ARM_ARCH_7A__) || defined(__aarch64__))
cpu_utils_helper_instance_ = new AndroidArmV7ACpuUtilsHelper();
#else
cpu_utils_helper_instance_ = new DefaultCpuUtilsHelper();
#endif
});
return *cpu_utils_helper_instance_;
}
}
} | #include "tsl/platform/profile_utils/cpu_utils.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/profile_utils/clock_cycle_profiler.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profile_utils {
static constexpr bool DBG = false;
class CpuUtilsTest : public ::testing::Test {
protected:
void SetUp() override { CpuUtils::EnableClockCycleProfiling(); }
};
TEST_F(CpuUtilsTest, SetUpTestCase) {}
TEST_F(CpuUtilsTest, TearDownTestCase) {}
TEST_F(CpuUtilsTest, CheckGetCurrentClockCycle) {
static constexpr int LOOP_COUNT = 10;
const uint64 start_clock_count = CpuUtils::GetCurrentClockCycle();
CHECK_GT(start_clock_count, 0);
uint64 prev_clock_count = start_clock_count;
for (int i = 0; i < LOOP_COUNT; ++i) {
const uint64 clock_count = CpuUtils::GetCurrentClockCycle();
CHECK_GE(clock_count, prev_clock_count);
prev_clock_count = clock_count;
}
const uint64 end_clock_count = CpuUtils::GetCurrentClockCycle();
if (DBG) {
LOG(INFO) << "start clock = " << start_clock_count;
LOG(INFO) << "end clock = " << end_clock_count;
LOG(INFO) << "average clock = "
<< ((end_clock_count - start_clock_count) / LOOP_COUNT);
}
}
TEST_F(CpuUtilsTest, CheckCycleCounterFrequency) {
#if (defined(__powerpc__) || \
defined(__ppc__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)) || \
(defined(__s390x__))
const uint64 cpu_frequency = CpuUtils::GetCycleCounterFrequency();
CHECK_GT(cpu_frequency, 0);
CHECK_NE(cpu_frequency, unsigned(CpuUtils::INVALID_FREQUENCY));
#else
const int64_t cpu_frequency = CpuUtils::GetCycleCounterFrequency();
CHECK_GT(cpu_frequency, 0);
CHECK_NE(cpu_frequency, CpuUtils::INVALID_FREQUENCY);
#endif
if (DBG) {
LOG(INFO) << "Cpu frequency = " << cpu_frequency;
}
}
TEST_F(CpuUtilsTest, CheckMicroSecPerClock) {
const double micro_sec_per_clock = CpuUtils::GetMicroSecPerClock();
CHECK_GT(micro_sec_per_clock, 0.0);
if (DBG) {
LOG(INFO) << "Micro sec per clock = " << micro_sec_per_clock;
}
}
TEST_F(CpuUtilsTest, SimpleUsageOfClockCycleProfiler) {
static constexpr int LOOP_COUNT = 10;
ClockCycleProfiler prof;
for (int i = 0; i < LOOP_COUNT; ++i) {
prof.Start();
prof.Stop();
}
EXPECT_EQ(LOOP_COUNT, static_cast<int>(prof.GetCount() + 0.5));
if (DBG) {
prof.DumpStatistics("CpuUtilsTest");
}
}
}
} |
115 | #ifndef XLA_SERVICE_GATHER_EXPANDER_H_
#define XLA_SERVICE_GATHER_EXPANDER_H_
#include "xla/service/op_expander_pass.h"
namespace xla {
class GatherExpander : public OpExpanderPass {
public:
enum Mode {
kEliminateAllGathers,
kEliminateSimpleGathers,
};
explicit GatherExpander(Mode m) : mode_(m) {}
absl::string_view name() const override { return "gather_expander"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* gather_inst) override;
private:
Mode mode_;
};
}
#endif
#include "xla/service/gather_expander.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> TransposeIndexVectorDimToLast(
HloInstruction* start_indices, int64_t index_vector_dim) {
const Shape& start_indices_shape = start_indices->shape();
if (start_indices_shape.dimensions_size() == index_vector_dim) {
return start_indices;
}
if (index_vector_dim == (start_indices_shape.dimensions_size() - 1)) {
return start_indices;
}
std::vector<int64_t> permutation;
permutation.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
permutation.push_back(i);
}
}
permutation.push_back(index_vector_dim);
return MakeTransposeHlo(start_indices, permutation);
}
absl::StatusOr<HloInstruction*> CanonicalizeGatherIndices(
HloInstruction* start_indices, int64_t index_vector_dim) {
TF_ASSIGN_OR_RETURN(
HloInstruction * transposed_start_indices,
TransposeIndexVectorDimToLast(start_indices, index_vector_dim));
bool indices_are_scalar =
index_vector_dim == start_indices->shape().dimensions_size();
const int64_t index_dims_in_start_indices = indices_are_scalar ? 0 : 1;
const Shape& shape = transposed_start_indices->shape();
if (shape.dimensions_size() == index_dims_in_start_indices) {
return PrependDegenerateDims(transposed_start_indices, 1);
} else {
return CollapseFirstNDims(
transposed_start_indices,
shape.dimensions_size() - index_dims_in_start_indices);
}
}
absl::StatusOr<HloInstruction*> AdjustBatchDimsInAccumulator(
const Shape& start_indices_shape, HloInstruction* accumulator,
int64_t index_vector_dim) {
std::vector<int64_t> batch_dim_bounds;
batch_dim_bounds.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
batch_dim_bounds.push_back(start_indices_shape.dimensions(i));
}
}
if (batch_dim_bounds.empty()) {
return ElideDegenerateDims(accumulator, {0});
}
return ExpandFirstDimIntoNDims(accumulator, batch_dim_bounds);
}
absl::StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace(
HloInstruction* index_vector, const GatherDimensionNumbers& dim_numbers,
int64_t operand_rank) {
HloComputation* computation = index_vector->parent();
const Shape& index_shape = index_vector->shape();
if (operand_rank == 0) {
return computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {0})));
}
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1})));
std::vector<HloInstruction*> expanded_index_components;
for (int i = 0; i < operand_rank; i++) {
int64_t index_vector_dim_index =
FindIndex(dim_numbers.start_index_map(), i);
if (index_vector_dim_index != dim_numbers.start_index_map_size()) {
TF_ASSIGN_OR_RETURN(
HloInstruction * component_to_concat,
MakeSliceHlo(index_vector, {index_vector_dim_index},
{index_vector_dim_index + 1},
{1}));
expanded_index_components.push_back(component_to_concat);
} else {
expanded_index_components.push_back(zero);
}
}
return MakeConcatHlo(expanded_index_components, 0);
}
absl::StatusOr<std::vector<HloInstruction*>> GatherLoopBody(
const HloInstruction& gather, HloInstruction* induction_var,
const std::vector<HloInstruction*>& incoming_loop_state) {
const GatherDimensionNumbers& dim_numbers = gather.gather_dimension_numbers();
CHECK_EQ(incoming_loop_state.size(), 3);
HloInstruction* const operand = incoming_loop_state[0];
HloInstruction* const start_indices = incoming_loop_state[1];
HloInstruction* const output_accumulator = incoming_loop_state[2];
bool has_scalar_indices = start_indices->shape().dimensions_size() == 1;
CHECK_EQ(has_scalar_indices,
dim_numbers.index_vector_dim() ==
gather.operand(1)->shape().dimensions_size());
HloInstruction* induction_var_as_vector =
MakeBroadcastHlo(induction_var, {},
{1});
HloInstruction* index_vector;
if (has_scalar_indices) {
TF_ASSIGN_OR_RETURN(
index_vector,
MakeDynamicSliceHlo(start_indices, induction_var_as_vector, {1}));
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_start_indices,
PadVectorWithZeros(induction_var_as_vector,
0, 1));
int64_t index_vector_size = start_indices->shape().dimensions(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * index_vector_2d,
MakeDynamicSliceHlo(start_indices, index_into_start_indices,
{1, index_vector_size}));
TF_ASSIGN_OR_RETURN(index_vector,
ElideDegenerateDims(index_vector_2d, {0}));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * gathered_slice_start,
ExpandIndexVectorIntoOperandSpace(index_vector, dim_numbers,
operand->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(HloInstruction * gathered_slice,
MakeDynamicSliceHlo(operand, gathered_slice_start,
gather.gather_slice_sizes()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_with_dims_collapsed,
ElideDegenerateDims(gathered_slice, dim_numbers.collapsed_slice_dims()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_for_update,
PrependDegenerateDims(gathered_slice_with_dims_collapsed, 1));
TF_ASSIGN_OR_RETURN(
HloInstruction* const index_vector_into_accumulator,
PadVectorWithZeros(
induction_var_as_vector, 0,
gathered_slice_with_dims_collapsed->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const updated_accumulator,
MakeDynamicUpdateSliceHlo(output_accumulator, gathered_slice_for_update,
index_vector_into_accumulator));
return absl::StatusOr<std::vector<HloInstruction*>>{
{operand, start_indices, updated_accumulator}};
}
HloInstruction* CreateGatherLoopAccumulatorInitValue(
HloComputation* computation, PrimitiveType element_type,
absl::Span<const int64_t> slice_sizes, int64_t gather_loop_trip_count,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64_t> accumulator_state_shape_dims;
accumulator_state_shape_dims.reserve(1 + slice_sizes.size());
accumulator_state_shape_dims.push_back(gather_loop_trip_count);
for (int64_t i = 0; i < slice_sizes.size(); i++) {
if (!absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
accumulator_state_shape_dims.push_back(slice_sizes[i]);
}
}
return BroadcastZeros(computation, element_type,
accumulator_state_shape_dims);
}
absl::StatusOr<HloInstruction*> PermuteBatchAndOffsetDims(
HloInstruction* accumulator, absl::Span<const int64_t> offset_dims,
int64_t output_rank) {
std::vector<int64_t> permutation;
permutation.reserve(output_rank);
int64_t batch_idx_counter = 0;
int64_t offset_idx_counter = output_rank - offset_dims.size();
for (int64_t i = 0; i < output_rank; i++) {
bool is_offset_dim = absl::c_binary_search(offset_dims, i);
if (is_offset_dim) {
permutation.push_back(offset_idx_counter++);
} else {
permutation.push_back(batch_idx_counter++);
}
}
return MakeTransposeHlo(accumulator, permutation);
}
int64_t GatherLoopTripCount(HloInstruction* gather_instr) {
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& start_indices_shape = start_indices->shape();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t trip_count = 1;
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != dim_numbers.index_vector_dim()) {
trip_count *= start_indices_shape.dimensions(i);
}
}
return trip_count;
}
int64_t GatherIsBroadcast(HloInstruction* gather_instr) {
return absl::c_equal(gather_instr->gather_slice_sizes(),
gather_instr->operand(0)->shape().dimensions());
}
}
absl::StatusOr<HloInstruction*> GatherExpander::ExpandInstruction(
HloInstruction* gather_instr) {
CHECK(!ShapeUtil::IsZeroElementArray(gather_instr->shape()));
if (GatherIsBroadcast(gather_instr)) {
if (ShapeUtil::IsZeroElementArray(gather_instr->operand(0)->shape())) {
return MakeScalarLike(gather_instr, 0);
}
Shape broadcast_operand_shape = ShapeUtil::DeleteDimensions(
gather_instr->gather_dimension_numbers().collapsed_slice_dims(),
gather_instr->operand(0)->shape());
TF_ASSIGN_OR_RETURN(HloInstruction * broadcast_operand,
MakeReshapeHlo(broadcast_operand_shape,
gather_instr->mutable_operand(0)));
gather_instr->SetupDerivedInstruction(broadcast_operand);
HloInstruction* broadcast =
MakeBroadcastHlo(broadcast_operand,
gather_instr->gather_dimension_numbers().offset_dims(),
gather_instr->shape());
gather_instr->SetupDerivedInstruction(broadcast);
return broadcast;
}
HloComputation* computation = gather_instr->parent();
HloInstruction* operand = gather_instr->mutable_operand(0);
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& output_shape = gather_instr->shape();
int64_t output_rank = output_shape.dimensions_size();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t gather_loop_trip_count = GatherLoopTripCount(gather_instr);
if (!IsInt32(gather_loop_trip_count)) {
return Unimplemented(
"Gather operations with more than 2147483647 gather indices are not "
"supported. This error occurred for %s.",
gather_instr->ToString());
}
TF_ASSIGN_OR_RETURN(
HloInstruction * canonical_start_indices,
CanonicalizeGatherIndices(start_indices, dim_numbers.index_vector_dim()));
CHECK_EQ(gather_loop_trip_count,
canonical_start_indices->shape().dimensions(0));
HloInstruction* accumulator_init = CreateGatherLoopAccumulatorInitValue(
computation, output_shape.element_type(),
gather_instr->gather_slice_sizes(), gather_loop_trip_count,
gather_instr->gather_dimension_numbers());
absl::StatusOr<std::vector<HloInstruction*>> gather_loop_result_or_error =
WhileUtil::MakeCountedLoop(
computation, gather_loop_trip_count,
{operand, canonical_start_indices, accumulator_init},
[&](HloInstruction* indvar,
const std::vector<HloInstruction*>& loop_state) {
return GatherLoopBody(*gather_instr, indvar, loop_state);
},
gather_instr->metadata());
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> gather_loop_result,
gather_loop_result_or_error);
HloInstruction* accumulator_result = gather_loop_result.back();
TF_ASSIGN_OR_RETURN(
HloInstruction* const accumulator_with_batch_dims_decanonicalized,
AdjustBatchDimsInAccumulator(start_indices->shape(), accumulator_result,
dim_numbers.index_vector_dim()));
return PermuteBatchAndOffsetDims(accumulator_with_batch_dims_decanonicalized,
dim_numbers.offset_dims(), output_rank);
}
bool GatherExpander::InstructionMatchesPattern(HloInstruction* inst) {
return inst->opcode() == HloOpcode::kGather &&
!ShapeUtil::IsZeroElementArray(inst->shape()) &&
(mode_ == kEliminateAllGathers || GatherLoopTripCount(inst) == 1 ||
absl::c_equal(inst->gather_slice_sizes(),
inst->operand(0)->shape().dimensions()));
}
} | #include "xla/service/gather_expander.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace {
using GatherExpanderTest = HloTestBase;
TEST_F(GatherExpanderTest, ErrorStatusOnTooManyIndices) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherMultipleBatchDims
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2147483647,5] parameter(1)
ROOT gather = s32[2147483647,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
absl::Status status = GatherExpander{GatherExpander::kEliminateAllGathers}
.Run(module.get())
.status();
EXPECT_EQ(status.code(), tsl::error::UNIMPLEMENTED);
ASSERT_THAT(
status.message(),
::testing::HasSubstr("Gather operations with more than 2147483647 gather "
"indices are not supported."));
}
TEST_F(GatherExpanderTest, AvoidDegenerateDims) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
const Shape& while_shape = while_instr->shape();
ASSERT_TRUE(while_shape.IsTuple());
ASSERT_EQ(ShapeUtil::TupleElementCount(while_shape), 4);
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {3, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 1)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2}),
ShapeUtil::GetTupleElementShape(while_shape, 2)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 3)));
}
TEST_F(GatherExpanderTest, CheckOpMetadata) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
OpMetadata metadata;
metadata.set_op_name("Gather");
module->entry_computation()->root_instruction()->set_metadata(metadata);
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
EXPECT_EQ(while_instr->metadata().op_name(), "Gather");
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersSkipsNontrivialGather) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_FALSE(changed);
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersRewritesTrivialGather) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[100] parameter(0)
indices = s32[1] parameter(1)
ROOT gather = s32[10] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=0,
slice_sizes={10}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateAllGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
}
TEST_F(GatherExpanderTest, GatherIsBroadcast) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[1,3] parameter(0)
indices = s32[7,5] parameter(1)
ROOT gather = s32[7,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
ASSERT_TRUE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kBroadcast}));
module->VerifyOrAddFailure("after-gather-expander.");
}
}
} |
116 | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_FUNCTION_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_FUNCTION_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/sized_input_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeView;
class FunctionType;
class FunctionTypeView;
namespace common_internal {
struct FunctionTypeData;
}
class FunctionType final {
public:
using view_alternative_type = FunctionTypeView;
static constexpr TypeKind kKind = TypeKind::kFunction;
explicit FunctionType(FunctionTypeView other);
FunctionType(MemoryManagerRef memory_manager, TypeView result,
const SizedInputView<TypeView>& args);
FunctionType() = delete;
FunctionType(const FunctionType&) = default;
FunctionType(FunctionType&&) = default;
FunctionType& operator=(const FunctionType&) = default;
FunctionType& operator=(FunctionType&&) = default;
constexpr TypeKind kind() const { return kKind; }
absl::string_view name() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
return "function";
}
absl::Span<const Type> parameters() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
std::string DebugString() const;
const Type& result() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
absl::Span<const Type> args() const ABSL_ATTRIBUTE_LIFETIME_BOUND;
void swap(FunctionType& other) noexcept {
using std::swap;
swap(data_, other.data_);
}
private:
friend class FunctionTypeView;
friend struct NativeTypeTraits<FunctionType>;
Shared<const common_internal::FunctionTypeData> data_;
};
inline void swap(FunctionType& lhs, FunctionType& rhs) noexcept {
lhs.swap(rhs);
}
bool operator==(const FunctionType& lhs, const FunctionType& rhs);
inline bool operator!=(const FunctionType& lhs, const FunctionType& rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, const FunctionType& type);
inline std::ostream& operator<<(std::ostream& out, const FunctionType& type) {
return out << type.DebugString();
}
template <>
struct NativeTypeTraits<FunctionType> final {
static bool SkipDestructor(const FunctionType& type) {
return NativeType::SkipDestructor(type.data_);
}
};
class FunctionTypeView final {
public:
using alternative_type = FunctionType;
static constexpr TypeKind kKind = FunctionType::kKind;
FunctionTypeView(
const FunctionType& type ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept;
FunctionTypeView& operator=(
const FunctionType& type ABSL_ATTRIBUTE_LIFETIME_BOUND) {
data_ = type.data_;
return *this;
}
FunctionTypeView& operator=(FunctionType&&) = delete;
FunctionTypeView() = delete;
FunctionTypeView(const FunctionTypeView&) = default;
FunctionTypeView(FunctionTypeView&&) = default;
FunctionTypeView& operator=(const FunctionTypeView&) = default;
FunctionTypeView& operator=(FunctionTypeView&&) = default;
constexpr TypeKind kind() const { return kKind; }
absl::string_view name() const { return "function"; }
std::string DebugString() const;
absl::Span<const Type> parameters() const;
const Type& result() const;
absl::Span<const Type> args() const;
void swap(FunctionTypeView& other) noexcept {
using std::swap;
swap(data_, other.data_);
}
private:
friend class FunctionType;
SharedView<const common_internal::FunctionTypeData> data_;
};
inline void swap(FunctionTypeView& lhs, FunctionTypeView& rhs) noexcept {
lhs.swap(rhs);
}
bool operator==(FunctionTypeView lhs, FunctionTypeView rhs);
inline bool operator!=(FunctionTypeView lhs, FunctionTypeView rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, FunctionTypeView type);
inline std::ostream& operator<<(std::ostream& out, FunctionTypeView type) {
return out << type.DebugString();
}
}
#endif
#include <cstddef>
#include <string>
#include "absl/container/fixed_array.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "common/memory.h"
#include "common/sized_input_view.h"
#include "common/type.h"
namespace cel {
namespace {
struct TypeFormatter {
void operator()(std::string* out, const Type& type) const {
out->append(type.DebugString());
}
};
std::string FunctionDebugString(const Type& result,
absl::Span<const Type> args) {
return absl::StrCat("(", absl::StrJoin(args, ", ", TypeFormatter{}), ") -> ",
result.DebugString());
}
absl::FixedArray<Type, 3> SizedInputViewToFixedArray(
TypeView result, const SizedInputView<TypeView>& args) {
absl::FixedArray<Type, 3> fixed_args(1 + args.size());
size_t index = 0;
fixed_args[index++] = Type(result);
for (const auto& arg : args) {
fixed_args[index++] = Type(arg);
}
ABSL_DCHECK_EQ(index, 1 + args.size());
return fixed_args;
}
}
FunctionType::FunctionType(MemoryManagerRef memory_manager, TypeView result,
const SizedInputView<TypeView>& args)
: data_(memory_manager.MakeShared<common_internal::FunctionTypeData>(
SizedInputViewToFixedArray(result, args))) {}
std::string FunctionType::DebugString() const {
return FunctionDebugString(result(), args());
}
std::string FunctionTypeView::DebugString() const {
return FunctionDebugString(result(), args());
}
} | #include <sstream>
#include <string>
#include "absl/hash/hash.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/memory_testing.h"
#include "common/native_type.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
using testing::TestParamInfo;
using testing::TestWithParam;
class FunctionTypeTest : public common_internal::ThreadCompatibleMemoryTest<> {
};
TEST_P(FunctionTypeTest, Kind) {
EXPECT_EQ(FunctionType(memory_manager(), DynType{}, {BytesType()}).kind(),
FunctionType::kKind);
EXPECT_EQ(
Type(FunctionType(memory_manager(), DynType{}, {BytesType()})).kind(),
FunctionType::kKind);
}
TEST_P(FunctionTypeTest, Name) {
EXPECT_EQ(FunctionType(memory_manager(), DynType{}, {BytesType()}).name(),
"function");
EXPECT_EQ(
Type(FunctionType(memory_manager(), DynType{}, {BytesType()})).name(),
"function");
}
TEST_P(FunctionTypeTest, DebugString) {
{
std::ostringstream out;
out << FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_EQ(out.str(), "(bytes) -> dyn");
}
{
std::ostringstream out;
out << Type(FunctionType(memory_manager(), DynType{}, {BytesType()}));
EXPECT_EQ(out.str(), "(bytes) -> dyn");
}
}
TEST_P(FunctionTypeTest, Hash) {
EXPECT_EQ(
absl::HashOf(FunctionType(memory_manager(), DynType{}, {BytesType()})),
absl::HashOf(FunctionType(memory_manager(), DynType{}, {BytesType()})));
}
TEST_P(FunctionTypeTest, Equal) {
EXPECT_EQ(FunctionType(memory_manager(), DynType{}, {BytesType()}),
FunctionType(memory_manager(), DynType{}, {BytesType()}));
EXPECT_EQ(Type(FunctionType(memory_manager(), DynType{}, {BytesType()})),
FunctionType(memory_manager(), DynType{}, {BytesType()}));
EXPECT_EQ(FunctionType(memory_manager(), DynType{}, {BytesType()}),
Type(FunctionType(memory_manager(), DynType{}, {BytesType()})));
EXPECT_EQ(Type(FunctionType(memory_manager(), DynType{}, {BytesType()})),
Type(FunctionType(memory_manager(), DynType{}, {BytesType()})));
}
TEST_P(FunctionTypeTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(
FunctionType(memory_manager(), DynType{}, {BytesType()})),
NativeTypeId::For<FunctionType>());
EXPECT_EQ(NativeTypeId::Of(
Type(FunctionType(memory_manager(), DynType{}, {BytesType()}))),
NativeTypeId::For<FunctionType>());
}
TEST_P(FunctionTypeTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<FunctionType>(
FunctionType(memory_manager(), DynType{}, {BytesType()})));
EXPECT_TRUE(InstanceOf<FunctionType>(
Type(FunctionType(memory_manager(), DynType{}, {BytesType()}))));
}
TEST_P(FunctionTypeTest, Cast) {
EXPECT_THAT(Cast<FunctionType>(
FunctionType(memory_manager(), DynType{}, {BytesType()})),
An<FunctionType>());
EXPECT_THAT(Cast<FunctionType>(Type(
FunctionType(memory_manager(), DynType{}, {BytesType()}))),
An<FunctionType>());
}
TEST_P(FunctionTypeTest, As) {
EXPECT_THAT(As<FunctionType>(
FunctionType(memory_manager(), DynType{}, {BytesType()})),
Ne(absl::nullopt));
EXPECT_THAT(As<FunctionType>(Type(
FunctionType(memory_manager(), DynType{}, {BytesType()}))),
Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
FunctionTypeTest, FunctionTypeTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
FunctionTypeTest::ToString);
class FunctionTypeViewTest
: public common_internal::ThreadCompatibleMemoryTest<> {};
TEST_P(FunctionTypeViewTest, Kind) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_EQ(FunctionTypeView(type).kind(), FunctionTypeView::kKind);
EXPECT_EQ(TypeView(FunctionTypeView(type)).kind(), FunctionTypeView::kKind);
}
TEST_P(FunctionTypeViewTest, Name) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_EQ(FunctionTypeView(type).name(), "function");
EXPECT_EQ(TypeView(FunctionTypeView(type)).name(), "function");
}
TEST_P(FunctionTypeViewTest, DebugString) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
{
std::ostringstream out;
out << FunctionTypeView(type);
EXPECT_EQ(out.str(), "(bytes) -> dyn");
}
{
std::ostringstream out;
out << TypeView(FunctionTypeView(type));
EXPECT_EQ(out.str(), "(bytes) -> dyn");
}
}
TEST_P(FunctionTypeViewTest, Hash) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_EQ(absl::HashOf(FunctionTypeView(type)),
absl::HashOf(FunctionTypeView(type)));
EXPECT_EQ(absl::HashOf(FunctionTypeView(type)),
absl::HashOf(FunctionType(type)));
}
TEST_P(FunctionTypeViewTest, Equal) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_EQ(FunctionTypeView(type), FunctionTypeView(type));
EXPECT_EQ(TypeView(FunctionTypeView(type)), FunctionTypeView(type));
EXPECT_EQ(FunctionTypeView(type), TypeView(FunctionTypeView(type)));
EXPECT_EQ(TypeView(FunctionTypeView(type)), TypeView(FunctionTypeView(type)));
EXPECT_EQ(FunctionTypeView(type), FunctionType(type));
EXPECT_EQ(TypeView(FunctionTypeView(type)), FunctionType(type));
EXPECT_EQ(TypeView(FunctionTypeView(type)), Type(FunctionType(type)));
EXPECT_EQ(FunctionType(type), FunctionTypeView(type));
EXPECT_EQ(FunctionType(type), FunctionTypeView(type));
EXPECT_EQ(FunctionType(type), TypeView(FunctionTypeView(type)));
EXPECT_EQ(Type(FunctionType(type)), TypeView(FunctionTypeView(type)));
EXPECT_EQ(FunctionTypeView(type), FunctionType(type));
}
TEST_P(FunctionTypeViewTest, NativeTypeId) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_EQ(NativeTypeId::Of(FunctionTypeView(type)),
NativeTypeId::For<FunctionTypeView>());
EXPECT_EQ(NativeTypeId::Of(TypeView(FunctionTypeView(type))),
NativeTypeId::For<FunctionTypeView>());
}
TEST_P(FunctionTypeViewTest, InstanceOf) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_TRUE(InstanceOf<FunctionTypeView>(FunctionTypeView(type)));
EXPECT_TRUE(InstanceOf<FunctionTypeView>(TypeView(FunctionTypeView(type))));
}
TEST_P(FunctionTypeViewTest, Cast) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_THAT(Cast<FunctionTypeView>(FunctionTypeView(type)),
An<FunctionTypeView>());
EXPECT_THAT(Cast<FunctionTypeView>(TypeView(FunctionTypeView(type))),
An<FunctionTypeView>());
}
TEST_P(FunctionTypeViewTest, As) {
auto type = FunctionType(memory_manager(), DynType{}, {BytesType()});
EXPECT_THAT(As<FunctionTypeView>(FunctionTypeView(type)), Ne(absl::nullopt));
EXPECT_THAT(As<FunctionTypeView>(TypeView(FunctionTypeView(type))),
Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
FunctionTypeViewTest, FunctionTypeViewTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
FunctionTypeViewTest::ToString);
}
} |
117 | #ifndef QUICHE_HTTP2_HPACK_DECODER_HPACK_STRING_DECODER_H_
#define QUICHE_HTTP2_HPACK_DECODER_HPACK_STRING_DECODER_H_
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <string>
#include "absl/base/macros.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/hpack/varint/hpack_varint_decoder.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
class QUICHE_EXPORT HpackStringDecoder {
public:
enum StringDecoderState {
kStartDecodingLength,
kDecodingString,
kResumeDecodingLength,
};
template <class Listener>
DecodeStatus Start(DecodeBuffer* db, Listener* cb) {
if (db->HasData() && (*db->cursor() & 0x7f) != 0x7f) {
uint8_t h_and_prefix = db->DecodeUInt8();
uint8_t length = h_and_prefix & 0x7f;
bool huffman_encoded = (h_and_prefix & 0x80) == 0x80;
cb->OnStringStart(huffman_encoded, length);
if (length <= db->Remaining()) {
cb->OnStringData(db->cursor(), length);
db->AdvanceCursor(length);
cb->OnStringEnd();
return DecodeStatus::kDecodeDone;
}
huffman_encoded_ = huffman_encoded;
remaining_ = length;
state_ = kDecodingString;
return Resume(db, cb);
}
state_ = kStartDecodingLength;
return Resume(db, cb);
}
template <class Listener>
DecodeStatus Resume(DecodeBuffer* db, Listener* cb) {
DecodeStatus status;
while (true) {
switch (state_) {
case kStartDecodingLength:
QUICHE_DVLOG(2) << "kStartDecodingLength: db->Remaining="
<< db->Remaining();
if (!StartDecodingLength(db, cb, &status)) {
return status;
}
ABSL_FALLTHROUGH_INTENDED;
case kDecodingString:
QUICHE_DVLOG(2) << "kDecodingString: db->Remaining="
<< db->Remaining() << " remaining_=" << remaining_;
return DecodeString(db, cb);
case kResumeDecodingLength:
QUICHE_DVLOG(2) << "kResumeDecodingLength: db->Remaining="
<< db->Remaining();
if (!ResumeDecodingLength(db, cb, &status)) {
return status;
}
}
}
}
std::string DebugString() const;
private:
static std::string StateToString(StringDecoderState v);
template <class Listener>
bool StartDecodingLength(DecodeBuffer* db, Listener* cb,
DecodeStatus* status) {
if (db->Empty()) {
*status = DecodeStatus::kDecodeInProgress;
state_ = kStartDecodingLength;
return false;
}
uint8_t h_and_prefix = db->DecodeUInt8();
huffman_encoded_ = (h_and_prefix & 0x80) == 0x80;
*status = length_decoder_.Start(h_and_prefix, 7, db);
if (*status == DecodeStatus::kDecodeDone) {
OnStringStart(cb, status);
return true;
}
state_ = kResumeDecodingLength;
return false;
}
template <class Listener>
bool ResumeDecodingLength(DecodeBuffer* db, Listener* cb,
DecodeStatus* status) {
QUICHE_DCHECK_EQ(state_, kResumeDecodingLength);
*status = length_decoder_.Resume(db);
if (*status == DecodeStatus::kDecodeDone) {
state_ = kDecodingString;
OnStringStart(cb, status);
return true;
}
return false;
}
template <class Listener>
void OnStringStart(Listener* cb, DecodeStatus* ) {
remaining_ = static_cast<size_t>(length_decoder_.value());
cb->OnStringStart(huffman_encoded_, remaining_);
}
template <class Listener>
DecodeStatus DecodeString(DecodeBuffer* db, Listener* cb) {
size_t len = std::min(remaining_, db->Remaining());
if (len > 0) {
cb->OnStringData(db->cursor(), len);
db->AdvanceCursor(len);
remaining_ -= len;
}
if (remaining_ == 0) {
cb->OnStringEnd();
return DecodeStatus::kDecodeDone;
}
state_ = kDecodingString;
return DecodeStatus::kDecodeInProgress;
}
HpackVarintDecoder length_decoder_;
size_t remaining_ = 0;
StringDecoderState state_ = kStartDecodingLength;
bool huffman_encoded_ = false;
};
QUICHE_EXPORT std::ostream& operator<<(std::ostream& out,
const HpackStringDecoder& v);
}
#endif
#include "quiche/http2/hpack/decoder/hpack_string_decoder.h"
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace http2 {
std::string HpackStringDecoder::DebugString() const {
return absl::StrCat("HpackStringDecoder(state=", StateToString(state_),
", length=", length_decoder_.DebugString(),
", remaining=", remaining_,
", huffman=", huffman_encoded_ ? "true)" : "false)");
}
std::string HpackStringDecoder::StateToString(StringDecoderState v) {
switch (v) {
case kStartDecodingLength:
return "kStartDecodingLength";
case kDecodingString:
return "kDecodingString";
case kResumeDecodingLength:
return "kResumeDecodingLength";
}
return absl::StrCat("UNKNOWN_STATE(", static_cast<uint32_t>(v), ")");
}
std::ostream& operator<<(std::ostream& out, const HpackStringDecoder& v) {
return out << v.DebugString();
}
} | #include "quiche/http2/hpack/decoder/hpack_string_decoder.h"
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/http2/hpack/decoder/hpack_string_decoder_listener.h"
#include "quiche/http2/test_tools/hpack_block_builder.h"
#include "quiche/http2/test_tools/hpack_string_collector.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
const bool kMayReturnZeroOnFirst = false;
const bool kCompressed = true;
const bool kUncompressed = false;
class HpackStringDecoderTest : public RandomDecoderTest {
protected:
HpackStringDecoderTest() : listener_(&collector_) {}
DecodeStatus StartDecoding(DecodeBuffer* b) override {
++start_decoding_calls_;
collector_.Clear();
return decoder_.Start(b, &listener_);
}
DecodeStatus ResumeDecoding(DecodeBuffer* b) override {
QUICHE_VLOG(1) << decoder_.DebugString();
QUICHE_VLOG(2) << collector_;
return decoder_.Resume(b, &listener_);
}
AssertionResult Collected(absl::string_view s, bool huffman_encoded) {
QUICHE_VLOG(1) << collector_;
return collector_.Collected(s, huffman_encoded);
}
Validator MakeValidator(const std::string& expected_str,
bool expected_huffman) {
return [expected_str, expected_huffman, this](
const DecodeBuffer& ,
DecodeStatus ) -> AssertionResult {
AssertionResult result = Collected(expected_str, expected_huffman);
if (result) {
HTTP2_VERIFY_EQ(collector_,
HpackStringCollector(expected_str, expected_huffman));
} else {
HTTP2_VERIFY_NE(collector_,
HpackStringCollector(expected_str, expected_huffman));
}
QUICHE_VLOG(2) << collector_.ToString();
collector_.Clear();
QUICHE_VLOG(2) << collector_;
return result;
};
}
HpackStringDecoder decoder_;
HpackStringCollector collector_;
HpackStringDecoderVLoggingListener listener_;
size_t start_decoding_calls_ = 0;
};
TEST_F(HpackStringDecoderTest, DecodeEmptyString) {
{
Validator validator = ValidateDoneAndEmpty(MakeValidator("", kCompressed));
const char kData[] = {'\x80'};
DecodeBuffer b(kData);
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
}
{
Validator validator =
ValidateDoneAndOffset(1, MakeValidator("", kUncompressed));
const char kData[] = {'\x00', '\xff'};
DecodeBuffer b(kData);
EXPECT_EQ(2u, b.Remaining());
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
EXPECT_EQ(1u, b.Remaining());
}
}
TEST_F(HpackStringDecoderTest, DecodeShortString) {
{
Validator validator =
ValidateDoneAndOffset(11, MakeValidator("start end.", kCompressed));
const char kData[] = "\x8astart end.Don't peek at this.";
DecodeBuffer b(kData);
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
}
{
Validator validator =
ValidateDoneAndOffset(11, MakeValidator("start end.", kUncompressed));
absl::string_view data("\x0astart end.");
DecodeBuffer b(data);
EXPECT_TRUE(
DecodeAndValidateSeveralWays(&b, kMayReturnZeroOnFirst, validator));
}
}
TEST_F(HpackStringDecoderTest, DecodeLongStrings) {
std::string name = Random().RandString(1024);
std::string value = Random().RandString(65536);
HpackBlockBuilder hbb;
hbb.AppendString(false, name);
uint32_t offset_after_name = hbb.size();
EXPECT_EQ(3 + name.size(), offset_after_name);
hbb.AppendString(true, value);
uint32_t offset_after_value = hbb.size();
EXPECT_EQ(3 + name.size() + 4 + value.size(), offset_after_value);
DecodeBuffer b(hbb.buffer());
EXPECT_TRUE(DecodeAndValidateSeveralWays(
&b, kMayReturnZeroOnFirst,
ValidateDoneAndOffset(offset_after_name,
MakeValidator(name, kUncompressed))));
EXPECT_EQ(offset_after_name, b.Offset());
EXPECT_EQ(offset_after_value - offset_after_name, b.Remaining());
EXPECT_TRUE(DecodeAndValidateSeveralWays(
&b, kMayReturnZeroOnFirst,
ValidateDoneAndOffset(offset_after_value - offset_after_name,
MakeValidator(value, kCompressed))));
EXPECT_EQ(offset_after_value, b.Offset());
EXPECT_EQ(0u, b.Remaining());
}
}
}
} |
118 | #ifndef TENSORSTORE_KVSTORE_OCDBT_IO_INDIRECT_DATA_WRITER_H_
#define TENSORSTORE_KVSTORE_OCDBT_IO_INDIRECT_DATA_WRITER_H_
#include <stddef.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
namespace internal_ocdbt {
class IndirectDataWriter;
using IndirectDataWriterPtr = internal::IntrusivePtr<IndirectDataWriter>;
void intrusive_ptr_increment(IndirectDataWriter* p);
void intrusive_ptr_decrement(IndirectDataWriter* p);
IndirectDataWriterPtr MakeIndirectDataWriter(kvstore::KvStore kvstore,
std::string prefix,
size_t target_size);
Future<const void> Write(IndirectDataWriter& self, absl::Cord data,
IndirectDataReference& ref);
}
}
#endif
#include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <stddef.h>
#include <cassert>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/histogram.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/data_file_id.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_ocdbt {
namespace {
auto& indirect_data_writer_histogram =
internal_metrics::Histogram<internal_metrics::DefaultBucketer>::New(
"/tensorstore/kvstore/ocdbt/indirect_data_write_size",
"Histogram of OCDBT buffered write sizes.");
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
}
class IndirectDataWriter
: public internal::AtomicReferenceCount<IndirectDataWriter> {
public:
explicit IndirectDataWriter(kvstore::KvStore kvstore, std::string prefix,
size_t target_size)
: kvstore_(std::move(kvstore)),
prefix_(std::move(prefix)),
target_size_(target_size) {}
kvstore::KvStore kvstore_;
std::string prefix_;
size_t target_size_;
absl::Mutex mutex_;
size_t in_flight_ = 0;
bool flush_requested_ = false;
absl::Cord buffer_;
Promise<void> promise_;
DataFileId data_file_id_;
};
void intrusive_ptr_increment(IndirectDataWriter* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
void intrusive_ptr_decrement(IndirectDataWriter* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<IndirectDataWriter>*>(p));
}
namespace {
void MaybeFlush(IndirectDataWriter& self, UniqueWriterLock<absl::Mutex> lock) {
bool buffer_at_target =
self.target_size_ > 0 && self.buffer_.size() >= self.target_size_;
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "MaybeFlush: flush_requested=" << self.flush_requested_
<< ", in_flight=" << self.in_flight_
<< ", buffer_at_target=" << buffer_at_target;
if (buffer_at_target) {
} else if (!self.flush_requested_ || self.in_flight_ > 0) {
return;
}
self.in_flight_++;
self.flush_requested_ = false;
Promise<void> promise = std::exchange(self.promise_, {});
absl::Cord buffer = std::exchange(self.buffer_, {});
DataFileId data_file_id = self.data_file_id_;
lock.unlock();
indirect_data_writer_histogram.Observe(buffer.size());
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Flushing " << buffer.size() << " bytes to " << data_file_id;
auto write_future =
kvstore::Write(self.kvstore_, data_file_id.FullPath(), std::move(buffer));
write_future.Force();
write_future.ExecuteWhenReady(
[promise = std::move(promise), data_file_id = std::move(data_file_id),
self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
ReadyFuture<TimestampedStorageGeneration> future) {
auto& r = future.result();
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Done flushing data to " << data_file_id << ": " << r.status();
if (!r.ok()) {
promise.SetResult(r.status());
} else if (StorageGeneration::IsUnknown(r->generation)) {
promise.SetResult(absl::UnavailableError("Non-unique file id"));
} else {
promise.SetResult(absl::OkStatus());
}
UniqueWriterLock lock{self->mutex_};
assert(self->in_flight_ > 0);
self->in_flight_--;
MaybeFlush(*self, std::move(lock));
});
}
}
Future<const void> Write(IndirectDataWriter& self, absl::Cord data,
IndirectDataReference& ref) {
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Write indirect data: size=" << data.size();
if (data.empty()) {
ref.file_id = DataFileId{};
ref.offset = 0;
ref.length = 0;
return absl::OkStatus();
}
UniqueWriterLock lock{self.mutex_};
Future<const void> future;
if (self.promise_.null() || (future = self.promise_.future()).null()) {
self.data_file_id_ = GenerateDataFileId(self.prefix_);
auto p = PromiseFuturePair<void>::Make();
self.promise_ = std::move(p.promise);
future = std::move(p.future);
self.promise_.ExecuteWhenForced(
[self = internal::IntrusivePtr<IndirectDataWriter>(&self)](
Promise<void> promise) {
ABSL_LOG_IF(INFO, ocdbt_logging) << "Force called";
UniqueWriterLock lock{self->mutex_};
if (!HaveSameSharedState(promise, self->promise_)) return;
self->flush_requested_ = true;
MaybeFlush(*self, std::move(lock));
});
}
ref.file_id = self.data_file_id_;
ref.offset = self.buffer_.size();
ref.length = data.size();
self.buffer_.Append(std::move(data));
if (self.target_size_ > 0 && self.buffer_.size() >= self.target_size_) {
MaybeFlush(self, std::move(lock));
}
return future;
}
IndirectDataWriterPtr MakeIndirectDataWriter(kvstore::KvStore kvstore,
std::string prefix,
size_t target_size) {
return internal::MakeIntrusivePtr<IndirectDataWriter>(
std::move(kvstore), std::move(prefix), target_size);
}
}
} | #include "tensorstore/kvstore/ocdbt/io/indirect_data_writer.h"
#include <algorithm>
#include <cstring>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Future;
using ::tensorstore::internal::FlatCordBuilder;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal_ocdbt::IndirectDataReference;
using ::tensorstore::internal_ocdbt::MakeIndirectDataWriter;
using ::tensorstore::internal_ocdbt::Write;
namespace {
absl::Cord GetCord(size_t size) {
FlatCordBuilder cord_builder(size);
memset(cord_builder.data(), 0x37, cord_builder.size());
return std::move(cord_builder).Build();
}
template <typename T>
std::vector<std::string> ListEntriesToFiles(T& entries) {
std::vector<std::string> files;
files.reserve(entries.size());
for (auto& e : entries) {
files.push_back(std::move(e.key));
}
std::sort(files.begin(), files.end());
return files;
}
TEST(IndirectDataWriter, UnlimitedSize) {
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", 0);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Eq(2)));
while (!mock_key_value_store->write_requests.empty()) {
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Eq(1));
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(2));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
TEST(IndirectDataWriter, LimitedSize) {
constexpr size_t kTargetSize = 1024;
auto data = GetCord(260);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
auto mock_key_value_store = MockKeyValueStore::Make();
auto writer = MakeIndirectDataWriter(
tensorstore::kvstore::KvStore(mock_key_value_store), "d/", kTargetSize);
std::vector<Future<const void>> futures;
std::vector<std::string> refs;
for (int i = 0; i < 1000; ++i) {
IndirectDataReference ref;
auto f = Write(*writer, data, ref);
EXPECT_THAT(ref.offset, testing::Le(kTargetSize));
if (refs.empty() || refs.back() != ref.file_id.FullPath()) {
refs.push_back(ref.file_id.FullPath());
}
f.Force();
futures.push_back(std::move(f));
}
std::sort(refs.begin(), refs.end());
EXPECT_THAT(refs, ::testing::SizeIs(::testing::Ge(250)));
EXPECT_THAT(mock_key_value_store->write_requests.size(), ::testing::Gt(1));
while (!mock_key_value_store->write_requests.empty()) {
auto r = mock_key_value_store->write_requests.pop();
r(memory_store);
}
for (auto& f : futures) {
TENSORSTORE_ASSERT_OK(f.status());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto entries,
tensorstore::kvstore::ListFuture(memory_store.get()).result());
auto files = ListEntriesToFiles(entries);
EXPECT_THAT(files, ::testing::SizeIs(refs.size()));
EXPECT_THAT(files, ::testing::ElementsAreArray(refs));
}
} |
119 | #include "tsl/lib/core/status_test_util.h"
#ifndef XLA_PYTHON_IFRT_TEST_UTIL_H_
#define XLA_PYTHON_IFRT_TEST_UTIL_H_
#include <functional>
#include <memory>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace test_util {
void RegisterClientFactory(
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory);
bool IsClientFactoryRegistered();
absl::StatusOr<std::shared_ptr<Client>> GetClient();
void SetTestFilterIfNotUserSpecified(absl::string_view custom_filter);
template <typename ElementT>
void AssertPerShardData(
tsl::RCReference<Array> actual, DType expected_dtype,
Shape expected_per_shard_shape,
absl::Span<const absl::Span<const ElementT>> expected_per_shard_data,
DeviceList expected_device_list) {
ASSERT_EQ(actual->dtype(), expected_dtype);
EXPECT_THAT(GetDeviceIds(actual->sharding().devices()),
testing::ElementsAreArray(GetDeviceIds(expected_device_list)));
TF_ASSERT_OK_AND_ASSIGN(auto actual_per_shard_arrays,
actual->DisassembleIntoSingleDeviceArrays(
ArrayCopySemantics::kAlwaysCopy));
ASSERT_EQ(actual_per_shard_arrays.size(), expected_per_shard_data.size());
for (int i = 0; i < actual_per_shard_arrays.size(); ++i) {
SCOPED_TRACE(absl::StrCat("Shard ", i));
tsl::RCReference<Array> array = actual_per_shard_arrays[i];
ASSERT_EQ(array->shape(), expected_per_shard_shape);
std::vector<ElementT> actual_data(expected_per_shard_shape.num_elements());
TF_ASSERT_OK(array
->CopyToHostBuffer(actual_data.data(),
std::nullopt,
ArrayCopySemantics::kAlwaysCopy)
.Await());
EXPECT_THAT(actual_data,
testing::ElementsAreArray(expected_per_shard_data[i]));
}
}
absl::StatusOr<DeviceList> GetDevices(Client* client,
absl::Span<const int> device_indices);
}
}
}
#endif
#include "xla/python/ifrt/test_util.h"
#include <functional>
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
namespace xla {
namespace ifrt {
namespace test_util {
namespace {
class ClientFactory {
public:
void Register(
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory) {
absl::MutexLock lock(&mu_);
CHECK(!factory_) << "Client factory has been already registered.";
factory_ = std::move(factory);
}
std::function<absl::StatusOr<std::shared_ptr<Client>>()> Get() const {
absl::MutexLock lock(&mu_);
return factory_;
}
private:
mutable absl::Mutex mu_;
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory_
ABSL_GUARDED_BY(mu_);
};
ClientFactory& GetGlobalClientFactory() {
static auto* const factory = new ClientFactory;
return *factory;
}
}
void RegisterClientFactory(
std::function<absl::StatusOr<std::shared_ptr<Client>>()> factory) {
GetGlobalClientFactory().Register(std::move(factory));
}
absl::StatusOr<std::shared_ptr<Client>> GetClient() {
auto factory = GetGlobalClientFactory().Get();
CHECK(factory) << "Client factory has not been registered.";
return factory();
}
void SetTestFilterIfNotUserSpecified(absl::string_view custom_filter) {
static constexpr absl::string_view kDefaultTestFilter = "*";
#ifdef GTEST_FLAG_SET
if (GTEST_FLAG_GET(filter) == kDefaultTestFilter) {
GTEST_FLAG_SET(filter, custom_filter);
}
#else
if (testing::GTEST_FLAG(filter) == kDefaultTestFilter) {
testing::GTEST_FLAG(filter) = custom_filter;
}
#endif
}
absl::StatusOr<DeviceList> GetDevices(Client* client,
absl::Span<const int> device_indices) {
DeviceList::Devices devices;
devices.reserve(device_indices.size());
for (int device_index : device_indices) {
if (device_index < 0 || device_index >= client->devices().size()) {
return absl::InvalidArgumentError(
absl::StrCat("Out of range device index: ", device_index));
}
devices.push_back(client->devices()[device_index]);
}
return DeviceList(std::move(devices));
}
}
}
} | #include "tensorflow/core/data/service/test_util.h"
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/standalone.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace testing {
namespace {
using ::tensorflow::testing::IsOkAndHolds;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
template <class T>
StatusOr<std::vector<T>> GetIteratorOutput(standalone::Iterator& iterator) {
std::vector<T> result;
for (bool end_of_sequence = false; !end_of_sequence;) {
std::vector<tensorflow::Tensor> tensors;
TF_RETURN_IF_ERROR(iterator.GetNext(&tensors, &end_of_sequence));
if (end_of_sequence) {
break;
}
if (tensors.size() != 1) {
return errors::Internal("GetNext Tensor size is not 1.");
}
result.push_back(tensors[0].unaligned_flat<T>().data()[0]);
}
return result;
}
TEST(TestUtilTest, RangeDataset) {
const auto dataset_def = RangeDataset(10);
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<int64_t>(*iterator),
IsOkAndHolds(ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)));
}
TEST(TestUtilTest, RangeSquareDataset) {
const auto dataset_def = RangeSquareDataset(10);
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<int64_t>(*iterator),
IsOkAndHolds(ElementsAre(0, 1, 4, 9, 16, 25, 36, 49, 64, 81)));
}
TEST(TestUtilTest, InfiniteDataset) {
const auto dataset_def = InfiniteDataset();
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
for (int64_t i = 0; i < 10; ++i) {
std::vector<tensorflow::Tensor> outputs;
bool end_of_sequence;
TF_ASSERT_OK(iterator->GetNext(&outputs, &end_of_sequence));
test::ExpectEqual(outputs[0], Tensor(i));
}
}
TEST(TestUtilTest, EmptyDataset) {
const auto dataset_def = RangeSquareDataset(0);
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<int64_t>(*iterator), IsOkAndHolds(IsEmpty()));
}
TEST(TestUtilTest, InterleaveTextline) {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
TF_ASSERT_OK_AND_ASSIGN(const DatasetDef dataset_def,
InterleaveTextlineDataset(filenames, {"0", "1"}));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator),
IsOkAndHolds(ElementsAre("0", "1")));
}
TEST(TestUtilTest, InterleaveTextlineWithNewLines) {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
TF_ASSERT_OK_AND_ASSIGN(
const DatasetDef dataset_def,
InterleaveTextlineDataset(filenames, {"0\n2\n4\n6\n8", "1\n3\n5\n7\n9"}));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator),
IsOkAndHolds(ElementsAre("0", "1", "2", "3", "4", "5", "6", "7",
"8", "9")));
}
TEST(TestUtilTest, InterleaveTextlineEmptyFiles) {
std::vector<tstring> filenames = {LocalTempFilename(), LocalTempFilename()};
TF_ASSERT_OK_AND_ASSIGN(const DatasetDef dataset_def,
InterleaveTextlineDataset(filenames, {"", ""}));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator), IsOkAndHolds(IsEmpty()));
}
TEST(TestUtilTest, GetTestDataset) {
TF_ASSERT_OK_AND_ASSIGN(const DatasetDef dataset_def,
GetTestDataset("choose_from_datasets"));
standalone::Dataset::Params params;
std::unique_ptr<standalone::Dataset> dataset;
TF_ASSERT_OK(
standalone::Dataset::FromGraph(params, dataset_def.graph(), &dataset));
std::unique_ptr<standalone::Iterator> iterator;
TF_ASSERT_OK(dataset->MakeIterator(&iterator));
EXPECT_THAT(GetIteratorOutput<tstring>(*iterator),
IsOkAndHolds(ElementsAre("a", "b", "c", "a", "b", "c", "a", "b",
"c", "a", "b", "c", "a", "b", "c")));
}
}
}
}
} |
120 | #ifndef TENSORFLOW_LITE_KERNELS_GRADIENT_BCAST_GRAD_ARGS_H_
#define TENSORFLOW_LITE_KERNELS_GRADIENT_BCAST_GRAD_ARGS_H_
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_BROADCAST_GRADIENT_ARGS();
}
}
}
#endif
#include <algorithm>
#include <array>
#include <cmath>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
static const int kInputOneTensor = 0;
static const int kInputTwoTensor = 1;
static const int kOutputOneTensor = 0;
static const int kOutputTwoTensor = 1;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
const TfLiteTensor* input1 = GetInput(context, node, kInputOneTensor);
TF_LITE_ENSURE(context, input1 != nullptr);
const RuntimeShape input1_shape = GetTensorShape(input1);
TF_LITE_ENSURE(context,
input1->type == kTfLiteInt32 || input1->type == kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, input1_shape.DimensionsCount(), 1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTwoTensor);
TF_LITE_ENSURE(context, input2 != nullptr);
const RuntimeShape input2_shape = GetTensorShape(input2);
TF_LITE_ENSURE_TYPES_EQ(context, input2->type, input1->type);
TF_LITE_ENSURE_EQ(context, input2_shape.DimensionsCount(), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
TfLiteTensor* output1 = GetOutput(context, node, kOutputOneTensor);
TF_LITE_ENSURE(context, output1 != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, output1->type, input1->type);
TfLiteTensor* output2 = GetOutput(context, node, kOutputTwoTensor);
TF_LITE_ENSURE(context, output2 != nullptr);
TF_LITE_ENSURE_TYPES_EQ(context, output2->type, input1->type);
SetTensorToDynamic(output1);
SetTensorToDynamic(output2);
return kTfLiteOk;
}
TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = GetInput(context, node, kInputOneTensor);
TF_LITE_ENSURE(context, input1 != nullptr);
const RuntimeShape input1_shape = GetTensorShape(input1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTwoTensor);
TF_LITE_ENSURE(context, input2 != nullptr);
const RuntimeShape input2_shape = GetTensorShape(input2);
TfLiteTensor* output1 = GetOutput(context, node, kOutputOneTensor);
TF_LITE_ENSURE(context, output1 != nullptr);
TfLiteTensor* output2 = GetOutput(context, node, kOutputTwoTensor);
TF_LITE_ENSURE(context, output2 != nullptr);
std::vector<int64_t> input1_vec;
std::vector<int64_t> input2_vec;
if (input1->type == kTfLiteInt32) {
input1_vec = std::vector<int64_t>(input1->data.i32,
input1->data.i32 + input1_shape.Dims(0));
} else {
input1_vec = std::vector<int64_t>(input1->data.i64,
input1->data.i64 + input1_shape.Dims(0));
}
if (input2->type == kTfLiteInt32) {
input2_vec = std::vector<int64_t>(input2->data.i32,
input2->data.i32 + input2_shape.Dims(0));
} else {
input2_vec = std::vector<int64_t>(input2->data.i64,
input2->data.i64 + input2_shape.Dims(0));
}
if (input1_vec == input2_vec) {
TfLiteIntArray* output1_shape = TfLiteIntArrayCreate(1);
output1_shape->data[0] = 0;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output1, output1_shape));
TfLiteIntArray* output2_shape = TfLiteIntArrayCreate(1);
output2_shape->data[0] = 0;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output2, output2_shape));
return kTfLiteOk;
}
size_t largest_rank = std::max(input1_vec.size(), input2_vec.size());
std::vector<int64_t> copy[2];
copy[0] = std::vector<int64_t>(input1_vec.rbegin(), input1_vec.rend());
copy[1] = std::vector<int64_t>(input2_vec.rbegin(), input2_vec.rend());
for (int i = 0; i < 2; ++i) {
if (copy[i].size() < largest_rank) {
copy[i].resize(largest_rank, 1);
}
}
std::array<bool, 2> prev_is_one = {false, false};
std::array<bool, 2> current_is_one = {false, false};
bool set_one = false;
std::vector<int64_t> grad_reduce_idx[2];
for (int j = 0; j < largest_rank; ++j) {
int output_dim = -1;
int output_dim_set = false;
bool none_is_one = true;
for (int i = 0; i < 2; ++i) {
if (copy[i][j] == 1) {
current_is_one[i] = true;
none_is_one = false;
} else {
current_is_one[i] = false;
if (!output_dim_set || copy[i][j] == output_dim) {
output_dim = copy[i][j];
output_dim_set = true;
} else {
return kTfLiteError;
}
}
}
if (!output_dim_set) {
for (int i = 0; i < 2; ++i) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
continue;
} else if (current_is_one == prev_is_one && set_one) {
for (int i = 0; i < 2; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
}
} else {
for (int i = 0; i < 2; ++i) {
if (current_is_one[i] && !none_is_one) {
grad_reduce_idx[i].push_back(largest_rank - 1 - j);
}
}
}
set_one = true;
for (int i = 0; i < 2; ++i) {
prev_is_one[i] = current_is_one[i];
}
}
for (int i = 0; i < 2; ++i) {
std::reverse(grad_reduce_idx[i].begin(), grad_reduce_idx[i].end());
}
TfLiteIntArray* output1_shape = TfLiteIntArrayCreate(1);
output1_shape->data[0] = grad_reduce_idx[0].size();
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output1, output1_shape));
if (output1->type == kTfLiteInt32) {
for (int i = 0; i < grad_reduce_idx[0].size(); ++i) {
output1->data.i32[i] = grad_reduce_idx[0][i];
}
} else if (output1->type == kTfLiteInt64) {
for (int i = 0; i < grad_reduce_idx[0].size(); ++i) {
output1->data.i64[i] = grad_reduce_idx[0][i];
}
}
TfLiteIntArray* output2_shape = TfLiteIntArrayCreate(1);
output2_shape->data[0] = grad_reduce_idx[1].size();
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output2, output2_shape));
if (output2->type == kTfLiteInt32) {
for (int i = 0; i < grad_reduce_idx[1].size(); ++i) {
output2->data.i32[i] = grad_reduce_idx[1][i];
}
} else if (output2->type == kTfLiteInt64) {
for (int i = 0; i < grad_reduce_idx[1].size(); ++i) {
output2->data.i64[i] = grad_reduce_idx[1][i];
}
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BROADCAST_GRADIENT_ARGS() {
static TfLiteRegistration reg = {nullptr,
nullptr,
Prepare,
Invoke};
return ®
}
}
}
} | #include "tensorflow/lite/kernels/gradient/bcast_grad_args.h"
#include <cstdint>
#include <vector>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class BcastGradArgsInt32OpModel : public SingleOpModel {
public:
BcastGradArgsInt32OpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output1,
const TensorData& output2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
std::vector<uint8_t> custom_option;
SetCustomOp("BroadcastGradientArgs", custom_option,
Register_BROADCAST_GRADIENT_ARGS);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(const std::vector<int>& data) {
PopulateTensor(input1_, data);
}
void SetInput2(const std::vector<int>& data) {
PopulateTensor(input2_, data);
}
std::vector<int> GetOutput1() { return ExtractVector<int>(output1_); }
std::vector<int> GetOutput1Shape() { return GetTensorShape(output1_); }
std::vector<int> GetOutput2() { return ExtractVector<int>(output2_); }
std::vector<int> GetOutput2Shape() { return GetTensorShape(output2_); }
protected:
int input1_;
int input2_;
int output1_;
int output2_;
};
TEST(BcastGradArgsInt32OpModel, AllEqualsInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 1, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput1Int32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 1, 3});
model.SetInput2({3, 4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput2Int32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({1}));
}
TEST(BcastGradArgsInt32OpModel, DifferentInputSizesInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {3}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({0}));
}
TEST(BcastGradArgsInt32OpModel, NonBroadcastableDimsInt32DTypes) {
BcastGradArgsInt32OpModel model(
{TensorType_INT32, {4}},
{TensorType_INT32, {4}},
{TensorType_INT32, {}},
{TensorType_INT32, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({9, 9, 9, 9});
EXPECT_THAT(model.Invoke(), kTfLiteError);
}
class BcastGradArgsInt64OpModel : public SingleOpModel {
public:
BcastGradArgsInt64OpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output1,
const TensorData& output2) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output1_ = AddOutput(output1);
output2_ = AddOutput(output2);
std::vector<uint8_t> custom_option;
SetCustomOp("BroadcastGradientArgs", custom_option,
Register_BROADCAST_GRADIENT_ARGS);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
void SetInput1(const std::vector<int64_t>& data) {
PopulateTensor(input1_, data);
}
void SetInput2(const std::vector<int64_t>& data) {
PopulateTensor(input2_, data);
}
std::vector<int64_t> GetOutput1() { return ExtractVector<int64_t>(output1_); }
std::vector<int> GetOutput1Shape() { return GetTensorShape(output1_); }
std::vector<int64_t> GetOutput2() { return ExtractVector<int64_t>(output2_); }
std::vector<int> GetOutput2Shape() { return GetTensorShape(output2_); }
protected:
int input1_;
int input2_;
int output1_;
int output2_;
};
TEST(BcastGradArgsInt32OpModel, AllEqualsInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 1, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput1Int64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 1, 3});
model.SetInput2({3, 4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1(), ElementsAreArray({2}));
EXPECT_THAT(model.GetOutput2().size(), 0);
}
TEST(BcastGradArgsInt32OpModel, BroadcastableDimAtInput2Int64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({3, 1, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({1}));
}
TEST(BcastGradArgsInt32OpModel, DifferentInputSizesInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {3}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({4, 2, 3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput1().size(), 0);
EXPECT_THAT(model.GetOutput2(), ElementsAreArray({0}));
}
TEST(BcastGradArgsInt32OpModel, NonBroadcastableDimsInt64DTypes) {
BcastGradArgsInt64OpModel model(
{TensorType_INT64, {4}},
{TensorType_INT64, {4}},
{TensorType_INT64, {}},
{TensorType_INT64, {}});
model.SetInput1({3, 4, 2, 3});
model.SetInput2({9, 9, 9, 9});
EXPECT_THAT(model.Invoke(), kTfLiteError);
}
}
}
}
} |
121 | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_REMAT_METADATA_UTIL_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_REMAT_METADATA_UTIL_H_
#include <cstddef>
#include <cstdint>
#include <string>
#include <vector>
#include "tensorflow/compiler/mlir/lite/utils/control_edges.h"
namespace tflite {
using ModelControlDependencies = std::vector<ControlEdges>;
std::string SerializeModelControlDependencies(
const ModelControlDependencies& in);
bool ParseModelControlDependencies(const char* data, size_t size,
ModelControlDependencies* out);
constexpr char kModelControlDependenciesMetadataKey[] =
"model_control_dependencies";
constexpr uint32_t kModelControlDependenciesMetadataVersion = 1;
inline constexpr char kModelUseStablehloTensorKey[] = "keep_stablehlo_constant";
}
#endif
#include "tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.h"
#include <string>
#include <utility>
#include <vector>
namespace {
constexpr int kMod = (1 << 7);
void Serialize(std::string* out, uint32_t value) {
for (; value >= kMod; value /= kMod) {
out->push_back(value % kMod + kMod);
}
out->push_back(value);
}
bool Parse(const char** data, size_t* size, uint32_t* out) {
*out = 0;
uint32_t mul = 1;
for (bool done = false; !done;
mul *= kMod, done = !(**data & kMod), ++*data, --*size) {
if (*size == 0) {
return false;
}
*out += static_cast<unsigned char>(**data) % kMod * mul;
}
return true;
}
void Serialize(std::string* out, int32_t value) {
Serialize(out, static_cast<uint32_t>(
value < 0 ? static_cast<uint32_t>(-(value + 1)) * 2 + 1
: static_cast<uint32_t>(value) * 2));
}
bool Parse(const char** data, size_t* size, int32_t* out) {
uint32_t value = 0;
if (!Parse(data, size, &value)) {
return false;
}
const int32_t magnitude = value / 2;
*out = (value % 2) ? (-magnitude - 1) : magnitude;
return true;
}
template <class First, class Second>
void Serialize(std::string* out, const std::pair<First, Second>& in) {
Serialize(out, in.first);
Serialize(out, in.second);
}
template <class First, class Second>
bool Parse(const char** data, size_t* size, std::pair<First, Second>* out) {
return Parse(data, size, &(out->first)) && Parse(data, size, &(out->second));
}
template <class Value>
void Serialize(std::string* out, const std::vector<Value>& in) {
Serialize(out, static_cast<uint32_t>(in.size()));
for (const auto& val : in) {
Serialize(out, val);
}
}
template <class T>
bool Parse(const char** data, size_t* size, std::vector<T>* out) {
uint32_t num_elems = 0;
if (!Parse(data, size, &num_elems)) {
return false;
}
out->assign(num_elems, T{});
for (auto& elem : *out) {
if (!Parse(data, size, &elem)) {
return false;
}
}
return true;
}
}
namespace tflite {
std::string SerializeModelControlDependencies(
const ModelControlDependencies& in) {
std::string out;
Serialize(&out, kModelControlDependenciesMetadataVersion);
Serialize(&out, in);
return out;
}
bool ParseModelControlDependencies(const char* data, size_t size,
ModelControlDependencies* out) {
out->clear();
uint32_t version = 0;
return Parse(&data, &size, &version) &&
(version == kModelControlDependenciesMetadataVersion) &&
Parse(&data, &size, out) && (size == 0);
}
} | #include "tensorflow/compiler/mlir/lite/experimental/remat/metadata_util.h"
#include <cstdint>
#include <limits>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
class MetadataSerializerTest : public ::testing::Test {
protected:
static constexpr auto kHuge = std::numeric_limits<int32_t>::max();
static constexpr auto kTiny = std::numeric_limits<int32_t>::min();
std::string RoundTrip(const ModelControlDependencies &in) const {
ModelControlDependencies out = {{{-1, -1}}};
const std::string serialized =
tflite::SerializeModelControlDependencies(in);
return tflite::ParseModelControlDependencies(serialized.data(),
serialized.size(), &out)
? (out == in) ? "ok" : "mismatch"
: "malformed";
}
};
TEST_F(MetadataSerializerTest, nothing) { EXPECT_THAT(RoundTrip({}), "ok"); }
TEST_F(MetadataSerializerTest, something) {
EXPECT_THAT(
RoundTrip({{{1, 2}, {2, 3}, {4, 5}},
{},
{{kHuge, kTiny}, {kTiny, kHuge}, {kHuge - 1, kTiny + 1}},
{{1, 0}}}),
"ok");
}
}
} |
122 | #ifndef GLOG_INTERNAL_DEMANGLE_H
#define GLOG_INTERNAL_DEMANGLE_H
#include <cstddef>
#if defined(GLOG_USE_GLOG_EXPORT)
# include "glog/export.h"
#endif
#if !defined(GLOG_NO_EXPORT)
# error "demangle.h" was not included correctly.
#endif
namespace google {
inline namespace glog_internal_namespace_ {
bool GLOG_NO_EXPORT Demangle(const char* mangled, char* out, size_t out_size);
}
}
#endif
#include "demangle.h"
#include <algorithm>
#include <cstdlib>
#include <limits>
#include "utilities.h"
#if defined(HAVE___CXA_DEMANGLE)
# include <cxxabi.h>
#endif
#if defined(GLOG_OS_WINDOWS)
# include <dbghelp.h>
#endif
namespace google {
inline namespace glog_internal_namespace_ {
#if !defined(GLOG_OS_WINDOWS) && !defined(HAVE___CXA_DEMANGLE)
namespace {
struct AbbrevPair {
const char* const abbrev;
const char* const real_name;
};
const AbbrevPair kOperatorList[] = {
{"nw", "new"}, {"na", "new[]"}, {"dl", "delete"}, {"da", "delete[]"},
{"ps", "+"}, {"ng", "-"}, {"ad", "&"}, {"de", "*"},
{"co", "~"}, {"pl", "+"}, {"mi", "-"}, {"ml", "*"},
{"dv", "/"}, {"rm", "%"}, {"an", "&"}, {"or", "|"},
{"eo", "^"}, {"aS", "="}, {"pL", "+="}, {"mI", "-="},
{"mL", "*="}, {"dV", "/="}, {"rM", "%="}, {"aN", "&="},
{"oR", "|="}, {"eO", "^="}, {"ls", "<<"}, {"rs", ">>"},
{"lS", "<<="}, {"rS", ">>="}, {"eq", "=="}, {"ne", "!="},
{"lt", "<"}, {"gt", ">"}, {"le", "<="}, {"ge", ">="},
{"nt", "!"}, {"aa", "&&"}, {"oo", "||"}, {"pp", "++"},
{"mm", "--"}, {"cm", ","}, {"pm", "->*"}, {"pt", "->"},
{"cl", "()"}, {"ix", "[]"}, {"qu", "?"}, {"st", "sizeof"},
{"sz", "sizeof"}, {nullptr, nullptr},
};
const AbbrevPair kBuiltinTypeList[] = {
{"v", "void"}, {"w", "wchar_t"},
{"b", "bool"}, {"c", "char"},
{"a", "signed char"}, {"h", "unsigned char"},
{"s", "short"}, {"t", "unsigned short"},
{"i", "int"}, {"j", "unsigned int"},
{"l", "long"}, {"m", "unsigned long"},
{"x", "long long"}, {"y", "unsigned long long"},
{"n", "__int128"}, {"o", "unsigned __int128"},
{"f", "float"}, {"d", "double"},
{"e", "long double"}, {"g", "__float128"},
{"z", "ellipsis"}, {"Dn", "decltype(nullptr)"},
{nullptr, nullptr}};
const AbbrevPair kSubstitutionList[] = {
{"St", ""},
{"Sa", "allocator"},
{"Sb", "basic_string"},
{"Ss", "string"},
{"Si", "istream"},
{"So", "ostream"},
{"Sd", "iostream"},
{nullptr, nullptr}};
struct State {
const char* mangled_cur;
char* out_cur;
const char* out_begin;
const char* out_end;
const char* prev_name;
ssize_t prev_name_length;
short nest_level;
bool append;
bool overflowed;
uint32 local_level;
uint32 expr_level;
uint32 arg_level;
};
size_t StrLen(const char* str) {
size_t len = 0;
while (*str != '\0') {
++str;
++len;
}
return len;
}
bool AtLeastNumCharsRemaining(const char* str, ssize_t n) {
for (ssize_t i = 0; i < n; ++i) {
if (str[i] == '\0') {
return false;
}
}
return true;
}
bool StrPrefix(const char* str, const char* prefix) {
size_t i = 0;
while (str[i] != '\0' && prefix[i] != '\0' && str[i] == prefix[i]) {
++i;
}
return prefix[i] == '\0';
}
void InitState(State* state, const char* mangled, char* out, size_t out_size) {
state->mangled_cur = mangled;
state->out_cur = out;
state->out_begin = out;
state->out_end = out + out_size;
state->prev_name = nullptr;
state->prev_name_length = -1;
state->nest_level = -1;
state->append = true;
state->overflowed = false;
state->local_level = 0;
state->expr_level = 0;
state->arg_level = 0;
}
bool ParseOneCharToken(State* state, const char one_char_token) {
if (state->mangled_cur[0] == one_char_token) {
++state->mangled_cur;
return true;
}
return false;
}
bool ParseTwoCharToken(State* state, const char* two_char_token) {
if (state->mangled_cur[0] == two_char_token[0] &&
state->mangled_cur[1] == two_char_token[1]) {
state->mangled_cur += 2;
return true;
}
return false;
}
bool ParseCharClass(State* state, const char* char_class) {
const char* p = char_class;
for (; *p != '\0'; ++p) {
if (state->mangled_cur[0] == *p) {
++state->mangled_cur;
return true;
}
}
return false;
}
bool Optional(bool) { return true; }
using ParseFunc = bool (*)(State*);
bool OneOrMore(ParseFunc parse_func, State* state) {
if (parse_func(state)) {
while (parse_func(state)) {
}
return true;
}
return false;
}
bool ZeroOrMore(ParseFunc parse_func, State* state) {
while (parse_func(state)) {
}
return true;
}
void Append(State* state, const char* const str, ssize_t length) {
if (state->out_cur == nullptr) {
state->overflowed = true;
return;
}
for (ssize_t i = 0; i < length; ++i) {
if (state->out_cur + 1 < state->out_end) {
*state->out_cur = str[i];
++state->out_cur;
} else {
state->overflowed = true;
break;
}
}
if (!state->overflowed) {
*state->out_cur = '\0';
}
}
bool IsLower(char c) { return c >= 'a' && c <= 'z'; }
bool IsAlpha(char c) {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
}
bool IsDigit(char c) { return c >= '0' && c <= '9'; }
bool IsFunctionCloneSuffix(const char* str) {
size_t i = 0;
while (str[i] != '\0') {
if (str[i] != '.' || !IsAlpha(str[i + 1])) {
return false;
}
i += 2;
while (IsAlpha(str[i])) {
++i;
}
if (str[i] != '.' || !IsDigit(str[i + 1])) {
return false;
}
i += 2;
while (IsDigit(str[i])) {
++i;
}
}
return true;
}
void MaybeAppendWithLength(State* state, const char* const str,
ssize_t length) {
if (state->append && length > 0) {
if (str[0] == '<' && state->out_begin < state->out_cur &&
state->out_cur[-1] == '<') {
Append(state, " ", 1);
}
if (IsAlpha(str[0]) || str[0] == '_') {
state->prev_name = state->out_cur;
state->prev_name_length = length;
}
Append(state, str, length);
}
}
bool MaybeAppend(State* state, const char* const str) {
if (state->append) {
size_t length = StrLen(str);
MaybeAppendWithLength(state, str, static_cast<ssize_t>(length));
}
return true;
}
bool EnterNestedName(State* state) {
state->nest_level = 0;
return true;
}
bool LeaveNestedName(State* state, short prev_value) {
state->nest_level = prev_value;
return true;
}
bool DisableAppend(State* state) {
state->append = false;
return true;
}
bool RestoreAppend(State* state, bool prev_value) {
state->append = prev_value;
return true;
}
void MaybeIncreaseNestLevel(State* state) {
if (state->nest_level > -1) {
++state->nest_level;
}
}
void MaybeAppendSeparator(State* state) {
if (state->nest_level >= 1) {
MaybeAppend(state, "::");
}
}
void MaybeCancelLastSeparator(State* state) {
if (state->nest_level >= 1 && state->append &&
state->out_begin <= state->out_cur - 2) {
state->out_cur -= 2;
*state->out_cur = '\0';
}
}
bool IdentifierIsAnonymousNamespace(State* state, ssize_t length) {
const char anon_prefix[] = "_GLOBAL__N_";
return (length > static_cast<ssize_t>(sizeof(anon_prefix)) -
1 &&
StrPrefix(state->mangled_cur, anon_prefix));
}
bool ParseMangledName(State* state);
bool ParseEncoding(State* state);
bool ParseName(State* state);
bool ParseUnscopedName(State* state);
bool ParseUnscopedTemplateName(State* state);
bool ParseNestedName(State* state);
bool ParsePrefix(State* state);
bool ParseUnqualifiedName(State* state);
bool ParseSourceName(State* state);
bool ParseLocalSourceName(State* state);
bool ParseNumber(State* state, int* number_out);
bool ParseFloatNumber(State* state);
bool ParseSeqId(State* state);
bool ParseIdentifier(State* state, ssize_t length);
bool ParseAbiTags(State* state);
bool ParseAbiTag(State* state);
bool ParseOperatorName(State* state);
bool ParseSpecialName(State* state);
bool ParseCallOffset(State* state);
bool ParseNVOffset(State* state);
bool ParseVOffset(State* state);
bool ParseCtorDtorName(State* state);
bool ParseType(State* state);
bool ParseCVQualifiers(State* state);
bool ParseBuiltinType(State* state);
bool ParseFunctionType(State* state);
bool ParseBareFunctionType(State* state);
bool ParseClassEnumType(State* state);
bool ParseArrayType(State* state);
bool ParsePointerToMemberType(State* state);
bool ParseTemplateParam(State* state);
bool ParseTemplateTemplateParam(State* state);
bool ParseTemplateArgs(State* state);
bool ParseTemplateArg(State* state);
bool ParseExpression(State* state);
bool ParseExprPrimary(State* state);
bool ParseLocalName(State* state);
bool ParseDiscriminator(State* state);
bool ParseSubstitution(State* state);
bool ParseMangledName(State* state) {
return ParseTwoCharToken(state, "_Z") && ParseEncoding(state);
}
bool ParseEncoding(State* state) {
State copy = *state;
if (ParseName(state) && ParseBareFunctionType(state)) {
return true;
}
*state = copy;
if (ParseName(state) || ParseSpecialName(state)) {
return true;
}
return false;
}
bool ParseName(State* state) {
if (ParseNestedName(state) || ParseLocalName(state)) {
return true;
}
State copy = *state;
if (ParseUnscopedTemplateName(state) && ParseTemplateArgs(state)) {
return true;
}
*state = copy;
if (ParseUnscopedName(state)) {
return true;
}
return false;
}
bool ParseUnscopedName(State* state) {
if (ParseUnqualifiedName(state)) {
return true;
}
State copy = *state;
if (ParseTwoCharToken(state, "St") && MaybeAppend(state, "std::") &&
ParseUnqualifiedName(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseUnscopedTemplateName(State* state) {
return ParseUnscopedName(state) || ParseSubstitution(state);
}
bool ParseNestedName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'N') && EnterNestedName(state) &&
Optional(ParseCVQualifiers(state)) && ParsePrefix(state) &&
LeaveNestedName(state, copy.nest_level) &&
ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
return false;
}
bool ParsePrefix(State* state) {
bool has_something = false;
while (true) {
MaybeAppendSeparator(state);
if (ParseTemplateParam(state) || ParseSubstitution(state) ||
ParseUnscopedName(state)) {
has_something = true;
MaybeIncreaseNestLevel(state);
continue;
}
MaybeCancelLastSeparator(state);
if (has_something && ParseTemplateArgs(state)) {
return ParsePrefix(state);
} else {
break;
}
}
return true;
}
bool ParseUnqualifiedName(State* state) {
return (ParseOperatorName(state) || ParseCtorDtorName(state) ||
(ParseSourceName(state) && Optional(ParseAbiTags(state))) ||
(ParseLocalSourceName(state) && Optional(ParseAbiTags(state))));
}
bool ParseSourceName(State* state) {
State copy = *state;
int length = -1;
if (ParseNumber(state, &length) && ParseIdentifier(state, length)) {
return true;
}
*state = copy;
return false;
}
bool ParseLocalSourceName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'L') && ParseSourceName(state) &&
Optional(ParseDiscriminator(state))) {
return true;
}
*state = copy;
return false;
}
bool ParseNumber(State* state, int* number_out) {
int sign = 1;
if (ParseOneCharToken(state, 'n')) {
sign = -1;
}
const char* p = state->mangled_cur;
int number = 0;
constexpr int int_max_by_10 = std::numeric_limits<int>::max() / 10;
for (; *p != '\0'; ++p) {
if (IsDigit(*p)) {
if (number > int_max_by_10) {
return false;
}
const int digit = *p - '0';
const int shifted = number * 10;
if (digit > std::numeric_limits<int>::max() - shifted) {
return false;
}
number = shifted + digit;
} else {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
if (number_out != nullptr) {
*number_out = number * sign;
}
return true;
}
return false;
}
bool ParseFloatNumber(State* state) {
const char* p = state->mangled_cur;
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'a' && *p <= 'f')) {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
return true;
}
return false;
}
bool ParseSeqId(State* state) {
const char* p = state->mangled_cur;
for (; *p != '\0'; ++p) {
if (!IsDigit(*p) && !(*p >= 'A' && *p <= 'Z')) {
break;
}
}
if (p != state->mangled_cur) {
state->mangled_cur = p;
return true;
}
return false;
}
bool ParseIdentifier(State* state, ssize_t length) {
if (length == -1 || !AtLeastNumCharsRemaining(state->mangled_cur, length)) {
return false;
}
if (IdentifierIsAnonymousNamespace(state, length)) {
MaybeAppend(state, "(anonymous namespace)");
} else {
MaybeAppendWithLength(state, state->mangled_cur, length);
}
if (length < 0 ||
static_cast<std::size_t>(length) > StrLen(state->mangled_cur)) {
return false;
}
state->mangled_cur += length;
return true;
}
bool ParseAbiTags(State* state) {
State copy = *state;
DisableAppend(state);
if (OneOrMore(ParseAbiTag, state)) {
RestoreAppend(state, copy.append);
return true;
}
*state = copy;
return false;
}
bool ParseAbiTag(State* state) {
return ParseOneCharToken(state, 'B') && ParseSourceName(state);
}
bool ParseOperatorName(State* state) {
if (!AtLeastNumCharsRemaining(state->mangled_cur, 2)) {
return false;
}
State copy = *state;
if (ParseTwoCharToken(state, "cv") && MaybeAppend(state, "operator ") &&
EnterNestedName(state) && ParseType(state) &&
LeaveNestedName(state, copy.nest_level)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'v') && ParseCharClass(state, "0123456789") &&
ParseSourceName(state)) {
return true;
}
*state = copy;
if (!(IsLower(state->mangled_cur[0]) && IsAlpha(state->mangled_cur[1]))) {
return false;
}
const AbbrevPair* p;
for (p = kOperatorList; p->abbrev != nullptr; ++p) {
if (state->mangled_cur[0] == p->abbrev[0] &&
state->mangled_cur[1] == p->abbrev[1]) {
MaybeAppend(state, "operator");
if (IsLower(*p->real_name)) {
MaybeAppend(state, " ");
}
MaybeAppend(state, p->real_name);
state->mangled_cur += 2;
return true;
}
}
return false;
}
bool ParseSpecialName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "VTIS") &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "Tc") && ParseCallOffset(state) &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GV") && ParseName(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCallOffset(state) &&
ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "TC") && ParseType(state) &&
ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
DisableAppend(state) && ParseType(state)) {
RestoreAppend(state, copy.append);
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "FJ") &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GR") && ParseName(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "GA") && ParseEncoding(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'T') && ParseCharClass(state, "hv") &&
ParseCallOffset(state) && ParseEncoding(state)) {
return true;
}
*state = copy;
return false;
}
bool ParseCallOffset(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'h') && ParseNVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'v') && ParseVOffset(state) &&
ParseOneCharToken(state, '_')) {
return true;
}
*state = copy;
return false;
}
bool ParseNVOffset(State* state) { return ParseNumber(state, nullptr); }
bool ParseVOffset(State* state) {
State copy = *state;
if (ParseNumber(state, nullptr) && ParseOneCharToken(state, '_') &&
ParseNumber(state, nullptr)) {
return true;
}
*state = copy;
return false;
}
bool ParseCtorDtorName(State* state) {
State copy = *state;
if (ParseOneCharToken(state, 'C') && ParseCharClass(state, "123")) {
const char* const prev_name = state->prev_name;
const ssize_t prev_name_length = state->prev_name_length;
MaybeAppendWithLength(state, prev_name, prev_name_length);
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "012")) {
const char* const prev_name = state->prev_name;
const ssize_t prev_name_length = state->prev_name_length;
MaybeAppend(state, "~");
MaybeAppendWithLength(state, prev_name, prev_name_length);
return true;
}
*state = copy;
return false;
}
bool ParseType(State* state) {
State copy = *state;
if (ParseCVQualifiers(state) && ParseType(state)) {
return true;
}
*state = copy;
if (ParseCharClass(state, "OPRCG") && ParseType(state)) {
return true;
}
*state = copy;
if (ParseTwoCharToken(state, "Dp") && ParseType(state)) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'D') && ParseCharClass(state, "tT") &&
ParseExpression(state) && ParseOneCharToken(state, 'E')) {
return true;
}
*state = copy;
if (ParseOneCharToken(state, 'U') && ParseSourceName(state) &&
ParseType(state)) {
return true;
}
*state = copy;
if (ParseBuiltinType(state) || ParseFunctionType(state) ||
ParseClassEnumType(state) || ParseArrayType(state) ||
ParsePointerToMemberType(s | #include "demangle.h"
#include <fstream>
#include <iostream>
#include <string>
#include "config.h"
#include "glog/logging.h"
#include "googletest.h"
#include "utilities.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
GLOG_DEFINE_bool(demangle_filter, false,
"Run demangle_unittest in filter mode");
using namespace std;
using namespace google;
static const char* DemangleIt(const char* const mangled) {
static char demangled[4096];
if (Demangle(mangled, demangled, sizeof(demangled))) {
return demangled;
} else {
return mangled;
}
}
#if defined(GLOG_OS_WINDOWS)
# if defined(HAVE_DBGHELP) && !defined(NDEBUG)
TEST(Demangle, Windows) {
EXPECT_STREQ("public: static void __cdecl Foo::func(int)",
DemangleIt("?func@Foo@@SAXH@Z"));
EXPECT_STREQ("public: static void __cdecl Foo::func(int)",
DemangleIt("@ILT+1105(?func@Foo@@SAXH@Z)"));
EXPECT_STREQ("int __cdecl foobarArray(int * const)",
DemangleIt("?foobarArray@@YAHQAH@Z"));
}
# endif
#else
TEST(Demangle, CornerCases) {
const size_t size = 10;
char tmp[size] = {0};
const char* demangled = "foobar()";
const char* mangled = "_Z6foobarv";
EXPECT_TRUE(Demangle(mangled, tmp, sizeof(tmp)));
EXPECT_STREQ(demangled, tmp);
EXPECT_TRUE(Demangle(mangled, tmp, size - 1));
EXPECT_STREQ(demangled, tmp);
EXPECT_FALSE(Demangle(mangled, tmp, size - 2));
EXPECT_FALSE(Demangle(mangled, tmp, 1));
EXPECT_FALSE(Demangle(mangled, tmp, 0));
EXPECT_FALSE(Demangle(mangled, nullptr, 0));
}
TEST(Demangle, Clones) {
char tmp[20];
EXPECT_TRUE(Demangle("_ZL3Foov", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.clone.3", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.constprop.80", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_TRUE(Demangle("_ZL3Foov.isra.2.constprop.18", tmp, sizeof(tmp)));
EXPECT_STREQ("Foo()", tmp);
EXPECT_FALSE(Demangle("_ZL3Foov.clo", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.clone.", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.clone.foo", tmp, sizeof(tmp)));
EXPECT_FALSE(Demangle("_ZL3Foov.isra.2.constprop.", tmp, sizeof(tmp)));
}
TEST(Demangle, FromFile) {
string test_file = FLAGS_test_srcdir + "/src/demangle_unittest.txt";
ifstream f(test_file.c_str());
EXPECT_FALSE(f.fail());
string line;
while (getline(f, line)) {
if (line.empty() || line[0] == '#') {
continue;
}
string::size_type tab_pos = line.find('\t');
EXPECT_NE(string::npos, tab_pos);
string mangled = line.substr(0, tab_pos);
string demangled = line.substr(tab_pos + 1);
EXPECT_EQ(demangled, DemangleIt(mangled.c_str()));
}
}
#endif
int main(int argc, char** argv) {
InitGoogleTest(&argc, argv);
#ifdef GLOG_USE_GFLAGS
ParseCommandLineFlags(&argc, &argv, true);
#endif
FLAGS_logtostderr = true;
InitGoogleLogging(argv[0]);
if (FLAGS_demangle_filter) {
string line;
while (getline(cin, line, '\n')) {
cout << DemangleIt(line.c_str()) << endl;
}
return 0;
} else if (argc > 1) {
cout << DemangleIt(argv[1]) << endl;
return 0;
} else {
return RUN_ALL_TESTS();
}
} |
123 | #ifndef TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_NOOP_ELIMINATION_H_
#define TENSORFLOW_CORE_GRAPPLER_OPTIMIZERS_DATA_NOOP_ELIMINATION_H_
#include "tensorflow/core/grappler/optimizers/data/optimizer_base.h"
namespace tensorflow {
namespace grappler {
class NoOpElimination : public TFDataOptimizerBase {
public:
NoOpElimination() = default;
~NoOpElimination() override = default;
string name() const override { return "noop_elimination"; };
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status OptimizeAndCollectStats(Cluster* cluster, const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) override;
};
}
}
#endif
#include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kIdentity[] = "Identity";
bool IsTakeAll(const NodeDef& take_node, const MutableGraphView& graph) {
if (take_node.op() != "TakeDataset") return false;
const auto& count_node = *graph.GetNode(take_node.input(1));
if (count_node.op() != "Const") return false;
const auto& tensor = count_node.attr().at("value").tensor();
if (tensor.int64_val_size()) return tensor.int64_val(0) < 0;
return false;
}
bool IsConstNodeWithValue(const NodeDef& node, int value) {
if (node.op() != "Const") return false;
const auto& tensor = node.attr().at("value").tensor();
if (tensor.int64_val_size()) return tensor.int64_val(0) == value;
return value == 0;
}
bool IsSkipNone(const NodeDef& skip_node, const MutableGraphView& graph) {
if (skip_node.op() != "SkipDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(skip_node.input(1)), 0);
}
bool IsRepeatOne(const NodeDef& repeat_node, const MutableGraphView& graph) {
if (repeat_node.op() != "RepeatDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(repeat_node.input(1)), 1);
}
bool IsShardOne(const NodeDef& shard_node, const MutableGraphView& graph) {
if (shard_node.op() != "ShardDataset") return false;
return IsConstNodeWithValue(*graph.GetNode(shard_node.input(1)), 1);
}
bool IsOutputIdentityOfInput(const FunctionDef& fdef, const string& output_arg,
const string& input_arg) {
if (!fdef.ret().contains(output_arg)) {
LOG(WARNING)
<< "Malformed FunctionDef: ret dict does not contain output arg key.";
return false;
}
const auto& ret_val = fdef.ret().at(output_arg);
auto input = function_utils::FunctionDefTensorDesc(ret_val);
while (function_utils::ContainsFunctionNodeWithName(input.node_name, fdef)) {
int idx = function_utils::FindFunctionNodeWithName(input.node_name, fdef);
const NodeDef& node = fdef.node_def(idx);
if (node.op() != kIdentity) {
return false;
}
input = function_utils::FunctionDefTensorDesc(node.input(0));
}
return input.node_name == input_arg;
}
bool IsMapIdentity(const NodeDef& map_node, const MutableGraphView& graph,
const FunctionLibraryDefinition& function_library) {
if (map_node.op() != "MapDataset" && map_node.op() != "ParallelMapDataset" &&
map_node.op() != "ParallelMapDatasetV2") {
return false;
}
if (map_node.attr().at("Targuments").list().type_size() != 0) return false;
const FunctionDef* fdef =
function_library.Find(map_node.attr().at("f").func().name());
if (function_utils::IsFunctionStateful(function_library, *fdef)) {
return false;
}
const auto& sig = fdef->signature();
if (sig.input_arg_size() != sig.output_arg_size()) return false;
for (int i = 0; i < sig.input_arg_size(); ++i) {
if (!IsOutputIdentityOfInput(*fdef, sig.output_arg(i).name(),
sig.input_arg(i).name())) {
return false;
}
}
return true;
}
bool IsNoOp(const NodeDef& node, const MutableGraphView& graph,
const FunctionLibraryDefinition& function_library) {
return IsTakeAll(node, graph) || IsSkipNone(node, graph) ||
IsRepeatOne(node, graph) || IsShardOne(node, graph) ||
IsMapIdentity(node, graph, function_library);
}
}
Status NoOpElimination::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph.graph()->library());
for (const NodeDef& node : item.graph.node()) {
if (!IsNoOp(node, graph, function_library)) continue;
NodeDef* const parent = graph_utils::GetInputNode(node, graph);
TF_RETURN_IF_ERROR(graph.UpdateFanouts(node.name(), parent->name()));
nodes_to_delete.insert(node.name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(NoOpElimination, "noop_elimination");
}
} | #include "tensorflow/core/grappler/optimizers/data/noop_elimination.h"
#include <tuple>
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
std::vector<std::pair<string, AttrValue>> GetCommonAttributes() {
AttrValue shapes_attr, types_attr;
SetAttrValue("output_shapes", &shapes_attr);
SetAttrValue("output_types", &types_attr);
std::vector<std::pair<string, AttrValue>> commonAttributes = {
{"output_shapes", shapes_attr}, {"output_types", types_attr}};
return commonAttributes;
}
NodeDef *MakeNode(StringPiece node_type, std::vector<int> params,
string input_node, MutableGraphView *graph) {
std::vector<NodeDef *> node_params;
for (int param : params) {
node_params.push_back(
graph_utils::AddScalarConstNode<int64_t>(param, graph));
}
std::vector<string> inputs = {input_node};
for (int i = 0; i < node_params.size(); i++) {
inputs.push_back(node_params[i]->name());
}
return graph_utils::AddNode("", node_type, inputs, GetCommonAttributes(),
graph);
}
NodeDef *MakeNonConstNode(StringPiece node_type,
std::vector<DataType> param_dtypes, string input_node,
MutableGraphView *graph) {
std::vector<NodeDef *> node_params;
for (DataType dtype : param_dtypes) {
node_params.push_back(graph_utils::AddScalarPlaceholder(dtype, graph));
}
std::vector<string> inputs = {input_node};
for (int i = 0; i < node_params.size(); i++) {
inputs.push_back(node_params[i]->name());
}
return graph_utils::AddNode("", node_type, inputs, GetCommonAttributes(),
graph);
}
NodeDef *MakeCacheNode(string input_node, MutableGraphView *graph) {
NodeDef *node_filename =
graph_utils::AddScalarConstNode<StringPiece>("", graph);
return graph_utils::AddNode("", "CacheDataset",
{std::move(input_node), node_filename->name()},
GetCommonAttributes(), graph);
}
NodeDef *MakeRangeNode(MutableGraphView *graph) {
auto *start_node = graph_utils::AddScalarConstNode<int64_t>(0, graph);
auto *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, graph);
auto *step_node = graph_utils::AddScalarConstNode<int64_t>(1, graph);
std::vector<string> range_inputs = {start_node->name(), stop_node->name(),
step_node->name()};
return graph_utils::AddNode("", "RangeDataset", range_inputs,
GetCommonAttributes(), graph);
}
struct NoOpLastEliminationTest
: ::testing::TestWithParam<std::tuple<string, std::vector<int>, bool>> {};
TEST_P(NoOpLastEliminationTest, EliminateLastNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
const string &node_type = std::get<0>(GetParam());
const std::vector<int> node_params = std::get<1>(GetParam());
const bool should_keep_node = std::get<2>(GetParam());
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *node = MakeNode(node_type, node_params, range_node->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName(node->name(), output),
should_keep_node);
}
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpLastEliminationTest,
::testing::Values(
std::make_tuple("TakeDataset", std::vector<int>({-3}), false),
std::make_tuple("TakeDataset", std::vector<int>({-1}), false),
std::make_tuple("TakeDataset", std::vector<int>({0}), true),
std::make_tuple("TakeDataset", std::vector<int>({3}), true),
std::make_tuple("SkipDataset", std::vector<int>({-1}), true),
std::make_tuple("SkipDataset", std::vector<int>({0}), false),
std::make_tuple("SkipDataset", std::vector<int>({3}), true),
std::make_tuple("RepeatDataset", std::vector<int>({1}), false),
std::make_tuple("RepeatDataset", std::vector<int>({2}), true),
std::make_tuple("ShardDataset", std::vector<int>({1, 0}), false),
std::make_tuple("ShardDataset", std::vector<int>({2, 0}), true)));
struct NoOpMiddleEliminationTest
: ::testing::TestWithParam<std::tuple<string, std::vector<int>, bool>> {};
TEST_P(NoOpMiddleEliminationTest, EliminateMiddleNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
const string &node_type = std::get<0>(GetParam());
const std::vector<int> node_params = std::get<1>(GetParam());
const bool should_keep_node = std::get<2>(GetParam());
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *node = MakeNode(node_type, node_params, range_node->name(), &graph);
NodeDef *cache_node = MakeCacheNode(node->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName(node->name(), output),
should_keep_node);
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName(cache_node->name(), output));
NodeDef cache_node_out = output.node(
graph_utils::FindGraphNodeWithName(cache_node->name(), output));
EXPECT_EQ(cache_node_out.input_size(), 2);
auto last_node_input = (should_keep_node ? node : range_node)->name();
EXPECT_EQ(cache_node_out.input(0), last_node_input);
}
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpMiddleEliminationTest,
::testing::Values(
std::make_tuple("TakeDataset", std::vector<int>({-1}), false),
std::make_tuple("TakeDataset", std::vector<int>({-3}), false),
std::make_tuple("TakeDataset", std::vector<int>({0}), true),
std::make_tuple("TakeDataset", std::vector<int>({3}), true),
std::make_tuple("SkipDataset", std::vector<int>({-1}), true),
std::make_tuple("SkipDataset", std::vector<int>({0}), false),
std::make_tuple("SkipDataset", std::vector<int>({3}), true),
std::make_tuple("RepeatDataset", std::vector<int>({1}), false),
std::make_tuple("RepeatDataset", std::vector<int>({2}), true),
std::make_tuple("ShardDataset", std::vector<int>({1, 0}), false),
std::make_tuple("ShardDataset", std::vector<int>({2, 0}), true)));
using NodesTypes = std::tuple<std::pair<string, std::vector<int>>,
std::pair<string, std::vector<int>>>;
struct NoOpMultipleEliminationTest : ::testing::TestWithParam<NodesTypes> {};
TEST_P(NoOpMultipleEliminationTest, EliminateMultipleNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
static_assert(std::tuple_size<NodesTypes>::value == 2,
"Make sure to include everything in the test");
const std::vector<std::pair<string, std::vector<int>>> noop_nodes = {
std::get<0>(GetParam()), std::get<1>(GetParam())};
NodeDef *range_node = MakeRangeNode(&graph);
NodeDef *previous = range_node;
std::vector<string> nodes_to_remove;
nodes_to_remove.reserve(noop_nodes.size());
for (const auto &noop_node : noop_nodes) {
NodeDef *node =
MakeNode(noop_node.first, noop_node.second, previous->name(), &graph);
nodes_to_remove.push_back(node->name());
previous = node;
}
NodeDef *cache_node = MakeCacheNode(previous->name(), &graph);
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
for (const auto &noop_node_name : nodes_to_remove)
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(noop_node_name, output));
EXPECT_TRUE(
graph_utils::ContainsGraphNodeWithName(cache_node->name(), output));
NodeDef cache_node_out = output.node(
graph_utils::FindGraphNodeWithName(cache_node->name(), output));
EXPECT_EQ(cache_node_out.input_size(), 2);
EXPECT_EQ(cache_node_out.input(0), range_node->name());
}
const auto *const kTakeNode =
new std::pair<string, std::vector<int>>{"TakeDataset", {-1}};
const auto *const kSkipNode =
new std::pair<string, std::vector<int>>{"SkipDataset", {0}};
const auto *const kRepeatNode =
new std::pair<string, std::vector<int>>{"RepeatDataset", {1}};
const auto *const kShardNode =
new std::pair<string, std::vector<int>>{"ShardDataset", {1, 0}};
INSTANTIATE_TEST_CASE_P(
BasicRemovalTest, NoOpMultipleEliminationTest,
::testing::Combine(
::testing::Values(*kTakeNode, *kSkipNode, *kRepeatNode, *kShardNode),
::testing::Values(*kTakeNode, *kSkipNode, *kRepeatNode, *kShardNode)));
struct NoOpPlaceholdersTest
: ::testing::TestWithParam<
std::tuple<std::pair<string, std::vector<DataType>>,
std::pair<string, std::vector<DataType>>>> {};
TEST_P(NoOpPlaceholdersTest, NonConstNoOpNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
static_assert(std::tuple_size<NodesTypes>::value == 2,
"Make sure to include everything in the test");
const std::vector<std::pair<string, std::vector<DataType>>> noop_nodes = {
std::get<0>(GetParam()), std::get<1>(GetParam())};
NodeDef *range_node = MakeRangeNode(&graph);
std::vector<string> nodes_to_keep;
nodes_to_keep.reserve(noop_nodes.size());
NodeDef *previous = range_node;
for (const auto &noop_node : noop_nodes) {
NodeDef *node = MakeNonConstNode(noop_node.first, noop_node.second,
previous->name(), &graph);
nodes_to_keep.push_back(node->name());
previous = node;
}
NoOpElimination optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
for (const auto &noop_node_name : nodes_to_keep)
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName(noop_node_name, output));
}
const auto *const kNonConstTakeNode =
new std::pair<string, std::vector<DataType>>{"TakeDataset", {DT_INT32}};
const auto *const kNonConstSkipNode =
new std::pair<string, std::vector<DataType>>{"SkipDataset", {DT_INT32}};
const auto *const kNonConstRepeatNode =
new std::pair<string, std::vector<DataType>>{"RepeatDataset", {DT_INT32}};
const auto *const kNonConstShardNode =
new std::pair<string, std::vector<DataType>>{"ShardDataset",
{DT_INT32, DT_INT32}};
INSTANTIATE_TEST_CASE_P(
DoNotRemovePlaceholders, NoOpPlaceholdersTest,
::testing::Combine(::testing::Values(*kNonConstTakeNode, *kNonConstSkipNode,
*kNonConstRepeatNode,
*kNonConstShardNode),
::testing::Values(*kNonConstTakeNode, *kNonConstSkipNode,
*kNonConstRepeatNode,
*kNonConstShardNode)));
}
}
} |
124 | #ifndef TENSORFLOW_CORE_SUMMARY_SCHEMA_H_
#define TENSORFLOW_CORE_SUMMARY_SCHEMA_H_
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/db/sqlite.h"
namespace tensorflow {
constexpr uint32 kTensorboardSqliteApplicationId = 0xfeedabee;
Status SetupTensorboardSqliteDb(Sqlite* db);
}
#endif
#include "tensorflow/core/summary/schema.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status Run(Sqlite* db, const char* sql) {
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db->Prepare(sql, &stmt));
return stmt.StepAndReset();
}
}
Status SetupTensorboardSqliteDb(Sqlite* db) {
TF_RETURN_IF_ERROR(
db->PrepareOrDie(strings::StrCat("PRAGMA application_id=",
kTensorboardSqliteApplicationId))
.StepAndReset());
db->PrepareOrDie("PRAGMA user_version=0").StepAndResetOrDie();
Status s;
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Ids (
id INTEGER PRIMARY KEY
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Descriptions (
id INTEGER PRIMARY KEY,
description TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tensors (
rowid INTEGER PRIMARY KEY,
series INTEGER,
step INTEGER,
dtype INTEGER,
computed_time REAL,
shape TEXT,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TensorSeriesStepIndex
ON
Tensors (series, step)
WHERE
series IS NOT NULL
AND step IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS TensorStrings (
rowid INTEGER PRIMARY KEY,
tensor_rowid INTEGER NOT NULL,
idx INTEGER NOT NULL,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TensorStringIndex
ON TensorStrings (tensor_rowid, idx)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tags (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
tag_id INTEGER NOT NULL,
inserted_time DOUBLE,
tag_name TEXT,
display_name TEXT,
plugin_name TEXT,
plugin_data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TagIdIndex
ON Tags (tag_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TagRunNameIndex
ON
Tags (run_id, tag_name)
WHERE
run_id IS NOT NULL
AND tag_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Runs (
rowid INTEGER PRIMARY KEY,
experiment_id INTEGER,
run_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
finished_time REAL,
run_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunIdIndex
ON Runs (run_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunNameIndex
ON Runs (experiment_id, run_name)
WHERE run_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Experiments (
rowid INTEGER PRIMARY KEY,
user_id INTEGER,
experiment_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
is_watching INTEGER,
experiment_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentIdIndex
ON Experiments (experiment_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentNameIndex
ON Experiments (user_id, experiment_name)
WHERE experiment_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Users (
rowid INTEGER PRIMARY KEY,
user_id INTEGER NOT NULL,
inserted_time REAL,
user_name TEXT,
email TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserIdIndex
ON Users (user_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserNameIndex
ON Users (user_name)
WHERE user_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserEmailIndex
ON Users (email)
WHERE email IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Graphs (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
graph_id INTEGER NOT NULL,
inserted_time REAL,
graph_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphIdIndex
ON Graphs (graph_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphRunIndex
ON Graphs (run_id)
WHERE run_id IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Nodes (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
node_name TEXT,
op TEXT,
device TEXT,
node_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeIdIndex
ON Nodes (graph_id, node_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeNameIndex
ON Nodes (graph_id, node_name)
WHERE node_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS NodeInputs (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
idx INTEGER NOT NULL,
input_node_id INTEGER NOT NULL,
input_node_idx INTEGER,
is_control INTEGER
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeInputsIndex
ON NodeInputs (graph_id, node_id, idx)
)sql"));
return s;
}
} | #include "tensorflow/core/summary/schema.h"
#include <memory>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(SchemaTest, SmokeTestTensorboardSchema) {
Sqlite* db;
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db));
core::ScopedUnref unref_db(db);
TF_ASSERT_OK(SetupTensorboardSqliteDb(db));
}
}
} |
125 | #ifndef XLA_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_
#define XLA_TSL_FRAMEWORK_TRACKING_ALLOCATOR_H_
#include <unordered_map>
#include "xla/tsl/framework/allocator.h"
#include "tsl/lib/gtl/inlined_vector.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
#include "tsl/platform/types.h"
namespace tsl {
struct AllocRecord {
AllocRecord(int64_t a_btyes, int64_t a_micros)
: alloc_bytes(a_btyes), alloc_micros(a_micros) {}
AllocRecord() : AllocRecord(0, 0) {}
int64_t alloc_bytes;
int64_t alloc_micros;
};
class TrackingAllocator : public Allocator {
public:
explicit TrackingAllocator(Allocator* allocator, bool track_ids);
std::string Name() override { return allocator_->Name(); }
void* AllocateRaw(size_t alignment, size_t num_bytes) override {
return AllocateRaw(alignment, num_bytes, AllocationAttributes());
}
void* AllocateRaw(size_t alignment, size_t num_bytes,
const AllocationAttributes& allocation_attr) override;
void DeallocateRaw(void* ptr) override;
bool TracksAllocationSizes() const override;
size_t RequestedSize(const void* ptr) const override;
size_t AllocatedSize(const void* ptr) const override;
int64_t AllocationId(const void* ptr) const override;
absl::optional<AllocatorStats> GetStats() override;
bool ClearStats() override;
AllocatorMemoryType GetMemoryType() const override {
return allocator_->GetMemoryType();
}
std::tuple<size_t, size_t, size_t> GetSizes();
absl::InlinedVector<AllocRecord, 4UL> GetRecordsAndUnRef();
absl::InlinedVector<AllocRecord, 4UL> GetCurrentRecords();
protected:
~TrackingAllocator() override {}
private:
bool UnRef() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Allocator* allocator_;
mutable mutex mu_;
int ref_ TF_GUARDED_BY(mu_);
size_t allocated_ TF_GUARDED_BY(mu_);
size_t high_watermark_ TF_GUARDED_BY(mu_);
size_t total_bytes_ TF_GUARDED_BY(mu_);
absl::InlinedVector<AllocRecord, 4UL> allocations_ TF_GUARDED_BY(mu_);
const bool track_sizes_locally_;
struct Chunk {
size_t requested_size;
size_t allocated_size;
int64_t allocation_id;
};
std::unordered_map<const void*, Chunk> in_use_ TF_GUARDED_BY(mu_);
int64_t next_allocation_id_ TF_GUARDED_BY(mu_);
};
}
#endif
#include "xla/tsl/framework/tracking_allocator.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
namespace tsl {
TrackingAllocator::TrackingAllocator(Allocator* allocator, bool track_sizes)
: allocator_(allocator),
ref_(1),
allocated_(0),
high_watermark_(0),
total_bytes_(0),
track_sizes_locally_(track_sizes && !allocator_->TracksAllocationSizes()),
next_allocation_id_(0) {}
void* TrackingAllocator::AllocateRaw(
size_t alignment, size_t num_bytes,
const AllocationAttributes& allocation_attr) {
void* ptr = allocator_->AllocateRaw(alignment, num_bytes, allocation_attr);
if (nullptr == ptr) {
return ptr;
}
if (allocator_->TracksAllocationSizes()) {
size_t allocated_bytes = allocator_->AllocatedSize(ptr);
{
mutex_lock lock(mu_);
allocated_ += allocated_bytes;
high_watermark_ = std::max(high_watermark_, allocated_);
total_bytes_ += allocated_bytes;
allocations_.emplace_back(allocated_bytes, Env::Default()->NowMicros());
++ref_;
}
} else if (track_sizes_locally_) {
size_t allocated_bytes = allocator_->AllocatedSizeSlow(ptr);
allocated_bytes = std::max(num_bytes, allocated_bytes);
mutex_lock lock(mu_);
next_allocation_id_ += 1;
Chunk chunk = {num_bytes, allocated_bytes, next_allocation_id_};
in_use_.emplace(std::make_pair(ptr, chunk));
allocated_ += allocated_bytes;
high_watermark_ = std::max(high_watermark_, allocated_);
total_bytes_ += allocated_bytes;
allocations_.emplace_back(allocated_bytes, Env::Default()->NowMicros());
++ref_;
} else {
mutex_lock lock(mu_);
total_bytes_ += num_bytes;
allocations_.emplace_back(num_bytes, Env::Default()->NowMicros());
++ref_;
}
return ptr;
}
void TrackingAllocator::DeallocateRaw(void* ptr) {
if (nullptr == ptr) {
return;
}
bool should_delete;
bool tracks_allocation_sizes = allocator_->TracksAllocationSizes();
size_t allocated_bytes = 0;
if (tracks_allocation_sizes) {
allocated_bytes = allocator_->AllocatedSize(ptr);
} else if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto itr = in_use_.find(ptr);
if (itr != in_use_.end()) {
tracks_allocation_sizes = true;
allocated_bytes = (*itr).second.allocated_size;
in_use_.erase(itr);
}
}
Allocator* allocator = allocator_;
{
mutex_lock lock(mu_);
if (tracks_allocation_sizes) {
CHECK_GE(allocated_, allocated_bytes);
allocated_ -= allocated_bytes;
allocations_.emplace_back(-allocated_bytes, Env::Default()->NowMicros());
}
should_delete = UnRef();
}
allocator->DeallocateRaw(ptr);
if (should_delete) {
delete this;
}
}
bool TrackingAllocator::TracksAllocationSizes() const {
return track_sizes_locally_ || allocator_->TracksAllocationSizes();
}
size_t TrackingAllocator::RequestedSize(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.requested_size;
}
return 0;
} else {
return allocator_->RequestedSize(ptr);
}
}
size_t TrackingAllocator::AllocatedSize(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.allocated_size;
}
return 0;
} else {
return allocator_->AllocatedSize(ptr);
}
}
int64_t TrackingAllocator::AllocationId(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.allocation_id;
}
return 0;
} else {
return allocator_->AllocationId(ptr);
}
}
absl::optional<AllocatorStats> TrackingAllocator::GetStats() {
return allocator_->GetStats();
}
bool TrackingAllocator::ClearStats() { return allocator_->ClearStats(); }
std::tuple<size_t, size_t, size_t> TrackingAllocator::GetSizes() {
size_t high_watermark;
size_t total_bytes;
size_t still_live_bytes;
{
mutex_lock lock(mu_);
high_watermark = high_watermark_;
total_bytes = total_bytes_;
still_live_bytes = allocated_;
}
return std::make_tuple(total_bytes, high_watermark, still_live_bytes);
}
absl::InlinedVector<AllocRecord, 4UL> TrackingAllocator::GetRecordsAndUnRef() {
bool should_delete;
absl::InlinedVector<AllocRecord, 4UL> allocations;
{
mutex_lock lock(mu_);
allocations.swap(allocations_);
should_delete = UnRef();
}
if (should_delete) {
delete this;
}
return allocations;
}
absl::InlinedVector<AllocRecord, 4UL> TrackingAllocator::GetCurrentRecords() {
absl::InlinedVector<AllocRecord, 4UL> allocations;
{
mutex_lock lock(mu_);
for (const AllocRecord& alloc : allocations_) {
allocations.push_back(alloc);
}
}
return allocations;
}
bool TrackingAllocator::UnRef() {
CHECK_GE(ref_, 1);
--ref_;
return (ref_ == 0);
}
} | #include "tensorflow/core/framework/tracking_allocator.h"
#include <unordered_map>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestableSizeTrackingAllocator : public Allocator {
public:
string Name() override { return "test"; }
void* AllocateRaw(size_t , size_t num_bytes) override {
void* ptr = port::Malloc(num_bytes);
size_map_[ptr] = num_bytes;
return ptr;
}
void DeallocateRaw(void* ptr) override {
const auto& iter = size_map_.find(ptr);
EXPECT_NE(size_map_.end(), iter);
size_map_.erase(iter);
port::Free(ptr);
}
bool TracksAllocationSizes() const override { return true; }
size_t RequestedSize(const void* ptr) const override {
const auto& iter = size_map_.find(ptr);
EXPECT_NE(size_map_.end(), iter);
return iter->second;
}
absl::optional<AllocatorStats> GetStats() override { return absl::nullopt; }
private:
std::unordered_map<const void*, size_t> size_map_;
};
class NoMemoryAllocator : public Allocator {
public:
string Name() override { return "test"; }
void* AllocateRaw(size_t , size_t num_bytes) override {
return nullptr;
}
void DeallocateRaw(void* ptr) override {}
bool TracksAllocationSizes() const override { return true; }
absl::optional<AllocatorStats> GetStats() override { return absl::nullopt; }
};
TEST(TrackingAllocatorTest, SimpleNoTracking) {
Allocator* a = cpu_allocator();
EXPECT_FALSE(a->TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(a, false);
void* p1 = ta->AllocateRaw(4, 4);
ta->DeallocateRaw(p1);
void* p2 = ta->AllocateRaw(4, 12);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(16, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
ta->DeallocateRaw(p2);
auto records = ta->GetRecordsAndUnRef();
EXPECT_EQ(4, records[0].alloc_bytes);
EXPECT_EQ(12, records[1].alloc_bytes);
ta = new TrackingAllocator(a, true);
p1 = ta->AllocateRaw(4, 4);
EXPECT_EQ(4, ta->RequestedSize(p1));
EXPECT_LE(4, ta->AllocatedSize(p1));
EXPECT_EQ(1, ta->AllocationId(p1));
ta->DeallocateRaw(p1);
p2 = ta->AllocateRaw(4, 12);
EXPECT_EQ(12, ta->RequestedSize(p2));
EXPECT_LE(12, ta->AllocatedSize(p2));
EXPECT_EQ(2, ta->AllocationId(p2));
sizes = ta->GetSizes();
EXPECT_LE(16, std::get<0>(sizes));
EXPECT_LE(12, std::get<1>(sizes));
EXPECT_LE(12, std::get<2>(sizes));
ta->DeallocateRaw(p2);
records = ta->GetRecordsAndUnRef();
EXPECT_LE(4, records[0].alloc_bytes);
EXPECT_GE(-4, records[1].alloc_bytes);
EXPECT_LE(12, records[2].alloc_bytes);
EXPECT_GE(-12, records[3].alloc_bytes);
}
TEST(TrackingAllocatorTest, SimpleTracking) {
TestableSizeTrackingAllocator a = TestableSizeTrackingAllocator();
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
void* p1 = ta->AllocateRaw(4, 12);
ta->DeallocateRaw(p1);
void* p2 = ta->AllocateRaw(4, 4);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(16, std::get<0>(sizes));
EXPECT_EQ(12, std::get<1>(sizes));
EXPECT_EQ(4, std::get<2>(sizes));
ta->DeallocateRaw(p2);
auto records = ta->GetRecordsAndUnRef();
EXPECT_EQ(12, records[0].alloc_bytes);
EXPECT_EQ(-12, records[1].alloc_bytes);
EXPECT_EQ(4, records[2].alloc_bytes);
EXPECT_EQ(-4, records[3].alloc_bytes);
}
TEST(TrackingAllocatorTest, OutOfMemory) {
NoMemoryAllocator a;
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
void* p1 = ta->AllocateRaw(4, 12);
EXPECT_EQ(nullptr, p1);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(0, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
EXPECT_EQ(0, ta->GetRecordsAndUnRef().size());
}
TEST(TrackingAllocatorTest, FreeNullPtr) {
NoMemoryAllocator a;
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
ta->DeallocateRaw(nullptr);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(0, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
EXPECT_EQ(0, ta->GetRecordsAndUnRef().size());
}
} |
126 | #ifndef QUICHE_QUIC_TOOLS_QUIC_MEMORY_CACHE_BACKEND_H_
#define QUICHE_QUIC_TOOLS_QUIC_MEMORY_CACHE_BACKEND_H_
#include <list>
#include <map>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/spdy_utils.h"
#include "quiche/quic/platform/api/quic_mutex.h"
#include "quiche/quic/tools/quic_backend_response.h"
#include "quiche/quic/tools/quic_simple_server_backend.h"
#include "quiche/spdy/core/http2_header_block.h"
#include "quiche/spdy/core/spdy_framer.h"
namespace quic {
class QuicMemoryCacheBackend : public QuicSimpleServerBackend {
public:
class ResourceFile {
public:
explicit ResourceFile(const std::string& file_name);
ResourceFile(const ResourceFile&) = delete;
ResourceFile& operator=(const ResourceFile&) = delete;
virtual ~ResourceFile();
void Read();
void SetHostPathFromBase(absl::string_view base);
const std::string& file_name() { return file_name_; }
absl::string_view host() { return host_; }
absl::string_view path() { return path_; }
const spdy::Http2HeaderBlock& spdy_headers() { return spdy_headers_; }
absl::string_view body() { return body_; }
const std::vector<absl::string_view>& push_urls() { return push_urls_; }
private:
void HandleXOriginalUrl();
absl::string_view RemoveScheme(absl::string_view url);
std::string file_name_;
std::string file_contents_;
absl::string_view body_;
spdy::Http2HeaderBlock spdy_headers_;
absl::string_view x_original_url_;
std::vector<absl::string_view> push_urls_;
std::string host_;
std::string path_;
};
QuicMemoryCacheBackend();
QuicMemoryCacheBackend(const QuicMemoryCacheBackend&) = delete;
QuicMemoryCacheBackend& operator=(const QuicMemoryCacheBackend&) = delete;
~QuicMemoryCacheBackend() override;
const QuicBackendResponse* GetResponse(absl::string_view host,
absl::string_view path) const;
void AddSimpleResponse(absl::string_view host, absl::string_view path,
int response_code, absl::string_view body);
void AddResponse(absl::string_view host, absl::string_view path,
spdy::Http2HeaderBlock response_headers,
absl::string_view response_body);
void AddResponse(absl::string_view host, absl::string_view path,
spdy::Http2HeaderBlock response_headers,
absl::string_view response_body,
spdy::Http2HeaderBlock response_trailers);
void AddResponseWithEarlyHints(
absl::string_view host, absl::string_view path,
spdy::Http2HeaderBlock response_headers, absl::string_view response_body,
const std::vector<spdy::Http2HeaderBlock>& early_hints);
void AddSpecialResponse(
absl::string_view host, absl::string_view path,
QuicBackendResponse::SpecialResponseType response_type);
void AddSpecialResponse(
absl::string_view host, absl::string_view path,
spdy::Http2HeaderBlock response_headers, absl::string_view response_body,
QuicBackendResponse::SpecialResponseType response_type);
bool SetResponseDelay(absl::string_view host, absl::string_view path,
QuicTime::Delta delay);
void AddDefaultResponse(QuicBackendResponse* response);
void GenerateDynamicResponses();
void EnableWebTransport();
bool InitializeBackend(const std::string& cache_directory) override;
bool IsBackendInitialized() const override;
void FetchResponseFromBackend(
const spdy::Http2HeaderBlock& request_headers,
const std::string& request_body,
QuicSimpleServerBackend::RequestHandler* quic_stream) override;
void CloseBackendResponseStream(
QuicSimpleServerBackend::RequestHandler* quic_stream) override;
WebTransportResponse ProcessWebTransportRequest(
const spdy::Http2HeaderBlock& request_headers,
WebTransportSession* session) override;
bool SupportsWebTransport() override { return enable_webtransport_; }
private:
void AddResponseImpl(absl::string_view host, absl::string_view path,
QuicBackendResponse::SpecialResponseType response_type,
spdy::Http2HeaderBlock response_headers,
absl::string_view response_body,
spdy::Http2HeaderBlock response_trailers,
const std::vector<spdy::Http2HeaderBlock>& early_hints);
std::string GetKey(absl::string_view host, absl::string_view path) const;
absl::flat_hash_map<std::string, std::unique_ptr<QuicBackendResponse>>
responses_ QUIC_GUARDED_BY(response_mutex_);
std::unique_ptr<QuicBackendResponse> default_response_
QUIC_GUARDED_BY(response_mutex_);
std::unique_ptr<QuicBackendResponse> generate_bytes_response_
QUIC_GUARDED_BY(response_mutex_);
mutable QuicMutex response_mutex_;
bool cache_initialized_;
bool enable_webtransport_ = false;
};
}
#endif
#include "quiche/quic/tools/quic_memory_cache_backend.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/spdy_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/tools/web_transport_test_visitors.h"
#include "quiche/common/platform/api/quiche_file_utils.h"
#include "quiche/common/quiche_text_utils.h"
using spdy::Http2HeaderBlock;
using spdy::kV3LowestPriority;
namespace quic {
QuicMemoryCacheBackend::ResourceFile::ResourceFile(const std::string& file_name)
: file_name_(file_name) {}
QuicMemoryCacheBackend::ResourceFile::~ResourceFile() = default;
void QuicMemoryCacheBackend::ResourceFile::Read() {
std::optional<std::string> maybe_file_contents =
quiche::ReadFileContents(file_name_);
if (!maybe_file_contents) {
QUIC_LOG(DFATAL) << "Failed to read file for the memory cache backend: "
<< file_name_;
return;
}
file_contents_ = *maybe_file_contents;
for (size_t start = 0; start < file_contents_.length();) {
size_t pos = file_contents_.find('\n', start);
if (pos == std::string::npos) {
QUIC_LOG(DFATAL) << "Headers invalid or empty, ignoring: " << file_name_;
return;
}
size_t len = pos - start;
if (file_contents_[pos - 1] == '\r') {
len -= 1;
}
absl::string_view line(file_contents_.data() + start, len);
start = pos + 1;
if (line.empty()) {
body_ = absl::string_view(file_contents_.data() + start,
file_contents_.size() - start);
break;
}
if (line.substr(0, 4) == "HTTP") {
pos = line.find(' ');
if (pos == std::string::npos) {
QUIC_LOG(DFATAL) << "Headers invalid or empty, ignoring: "
<< file_name_;
return;
}
spdy_headers_[":status"] = line.substr(pos + 1, 3);
continue;
}
pos = line.find(": ");
if (pos == std::string::npos) {
QUIC_LOG(DFATAL) << "Headers invalid or empty, ignoring: " << file_name_;
return;
}
spdy_headers_.AppendValueOrAddHeader(
quiche::QuicheTextUtils::ToLower(line.substr(0, pos)),
line.substr(pos + 2));
}
spdy_headers_.erase("connection");
if (auto it = spdy_headers_.find("x-original-url");
it != spdy_headers_.end()) {
x_original_url_ = it->second;
HandleXOriginalUrl();
}
}
void QuicMemoryCacheBackend::ResourceFile::SetHostPathFromBase(
absl::string_view base) {
QUICHE_DCHECK(base[0] != '/') << base;
size_t path_start = base.find_first_of('/');
if (path_start == absl::string_view::npos) {
host_ = std::string(base);
path_ = "";
return;
}
host_ = std::string(base.substr(0, path_start));
size_t query_start = base.find_first_of(',');
if (query_start > 0) {
path_ = std::string(base.substr(path_start, query_start - 1));
} else {
path_ = std::string(base.substr(path_start));
}
}
absl::string_view QuicMemoryCacheBackend::ResourceFile::RemoveScheme(
absl::string_view url) {
if (absl::StartsWith(url, "https:
url.remove_prefix(8);
} else if (absl::StartsWith(url, "http:
url.remove_prefix(7);
}
return url;
}
void QuicMemoryCacheBackend::ResourceFile::HandleXOriginalUrl() {
absl::string_view url(x_original_url_);
SetHostPathFromBase(RemoveScheme(url));
}
const QuicBackendResponse* QuicMemoryCacheBackend::GetResponse(
absl::string_view host, absl::string_view path) const {
QuicWriterMutexLock lock(&response_mutex_);
auto it = responses_.find(GetKey(host, path));
if (it == responses_.end()) {
uint64_t ignored = 0;
if (generate_bytes_response_) {
if (absl::SimpleAtoi(absl::string_view(path.data() + 1, path.size() - 1),
&ignored)) {
return generate_bytes_response_.get();
}
}
QUIC_DVLOG(1) << "Get response for resource failed: host " << host
<< " path " << path;
if (default_response_) {
return default_response_.get();
}
return nullptr;
}
return it->second.get();
}
using SpecialResponseType = QuicBackendResponse::SpecialResponseType;
void QuicMemoryCacheBackend::AddSimpleResponse(absl::string_view host,
absl::string_view path,
int response_code,
absl::string_view body) {
Http2HeaderBlock response_headers;
response_headers[":status"] = absl::StrCat(response_code);
response_headers["content-length"] = absl::StrCat(body.length());
AddResponse(host, path, std::move(response_headers), body);
}
void QuicMemoryCacheBackend::AddDefaultResponse(QuicBackendResponse* response) {
QuicWriterMutexLock lock(&response_mutex_);
default_response_.reset(response);
}
void QuicMemoryCacheBackend::AddResponse(absl::string_view host,
absl::string_view path,
Http2HeaderBlock response_headers,
absl::string_view response_body) {
AddResponseImpl(host, path, QuicBackendResponse::REGULAR_RESPONSE,
std::move(response_headers), response_body,
Http2HeaderBlock(), std::vector<spdy::Http2HeaderBlock>());
}
void QuicMemoryCacheBackend::AddResponse(absl::string_view host,
absl::string_view path,
Http2HeaderBlock response_headers,
absl::string_view response_body,
Http2HeaderBlock response_trailers) {
AddResponseImpl(host, path, QuicBackendResponse::REGULAR_RESPONSE,
std::move(response_headers), response_body,
std::move(response_trailers),
std::vector<spdy::Http2HeaderBlock>());
}
bool QuicMemoryCacheBackend::SetResponseDelay(absl::string_view host,
absl::string_view path,
QuicTime::Delta delay) {
QuicWriterMutexLock lock(&response_mutex_);
auto it = responses_.find(GetKey(host, path));
if (it == responses_.end()) return false;
it->second->set_delay(delay);
return true;
}
void QuicMemoryCacheBackend::AddResponseWithEarlyHints(
absl::string_view host, absl::string_view path,
spdy::Http2HeaderBlock response_headers, absl::string_view response_body,
const std::vector<spdy::Http2HeaderBlock>& early_hints) {
AddResponseImpl(host, path, QuicBackendResponse::REGULAR_RESPONSE,
std::move(response_headers), response_body,
Http2HeaderBlock(), early_hints);
}
void QuicMemoryCacheBackend::AddSpecialResponse(
absl::string_view host, absl::string_view path,
SpecialResponseType response_type) {
AddResponseImpl(host, path, response_type, Http2HeaderBlock(), "",
Http2HeaderBlock(), std::vector<spdy::Http2HeaderBlock>());
}
void QuicMemoryCacheBackend::AddSpecialResponse(
absl::string_view host, absl::string_view path,
spdy::Http2HeaderBlock response_headers, absl::string_view response_body,
SpecialResponseType response_type) {
AddResponseImpl(host, path, response_type, std::move(response_headers),
response_body, Http2HeaderBlock(),
std::vector<spdy::Http2HeaderBlock>());
}
QuicMemoryCacheBackend::QuicMemoryCacheBackend() : cache_initialized_(false) {}
bool QuicMemoryCacheBackend::InitializeBackend(
const std::string& cache_directory) {
if (cache_directory.empty()) {
QUIC_BUG(quic_bug_10932_1) << "cache_directory must not be empty.";
return false;
}
QUIC_LOG(INFO)
<< "Attempting to initialize QuicMemoryCacheBackend from directory: "
<< cache_directory;
std::vector<std::string> files;
if (!quiche::EnumerateDirectoryRecursively(cache_directory, files)) {
QUIC_BUG(QuicMemoryCacheBackend unreadable directory)
<< "Can't read QuicMemoryCacheBackend directory: " << cache_directory;
return false;
}
for (const auto& filename : files) {
std::unique_ptr<ResourceFile> resource_file(new ResourceFile(filename));
std::string base(resource_file->file_name());
for (size_t i = 0; i < base.length(); ++i) {
if (base[i] == '\\') {
base[i] = '/';
}
}
base.erase(0, cache_directory.length());
if (base[0] == '/') {
base.erase(0, 1);
}
resource_file->SetHostPathFromBase(base);
resource_file->Read();
AddResponse(resource_file->host(), resource_file->path(),
resource_file->spdy_headers().Clone(), resource_file->body());
}
cache_initialized_ = true;
return true;
}
void QuicMemoryCacheBackend::GenerateDynamicResponses() {
QuicWriterMutexLock lock(&response_mutex_);
spdy::Http2HeaderBlock response_headers;
response_headers[":status"] = "200";
generate_bytes_response_ = std::make_unique<QuicBackendResponse>();
generate_bytes_response_->set_headers(std::move(response_headers));
generate_bytes_response_->set_response_type(
QuicBackendResponse::GENERATE_BYTES);
}
void QuicMemoryCacheBackend::EnableWebTransport() {
enable_webtransport_ = true;
}
bool QuicMemoryCacheBackend::IsBackendInitialized() const {
return cache_initialized_;
}
void QuicMemoryCacheBackend::FetchResponseFromBackend(
const Http2HeaderBlock& request_headers,
const std::string& ,
QuicSimpleServerBackend::RequestHandler* quic_stream) {
const QuicBackendResponse* quic_response = nullptr;
auto authority = request_headers.find(":authority");
auto path = request_headers.find(":path");
if (authority != request_headers.end() && path != request_headers.end()) {
quic_response = GetResponse(authority->second, path->second);
}
std::string request_url;
if (authority != request_headers.end()) {
request_url = std::string(authority->second);
}
if (path != request_headers.end()) {
request_url += std::string(path->second);
}
QUIC_DVLOG(1)
<< "Fetching QUIC response from backend in-memory cache for url "
<< request_url;
quic_stream->OnResponseBackendComplete(quic_response);
}
void QuicMemoryCacheBackend::CloseBackendResponseStream(
QuicSimpleServerBackend::RequestHandler* ) {}
QuicMemoryCacheBackend::WebTransportResponse
QuicMemoryCacheBackend::ProcessWebTransportRequest(
const spdy::Http2HeaderBlock& request_headers,
WebTransportSession* session) {
if (!SupportsWebTransport()) {
return QuicSimpleServerBackend::ProcessWebTransportRequest(request_headers,
session);
}
auto path_it = request_headers.find(":path");
if (path_it == request_headers.end()) {
WebTransportResponse response;
response.response_headers[":status"] = "400";
return response;
}
absl::string_view path = path_it->second;
if (path == "/echo") {
WebTransportResponse response;
response.response_headers[":status"] = "200";
response.visitor =
std::make_unique<EchoWebTransportSessionVisitor>(session);
return response;
}
WebTransportResponse response;
response.response_headers[":status"] = "404";
return response;
}
QuicMemoryCacheBackend::~QuicMemoryCacheBackend() {
{
QuicWriterMutexLock lock(&response_mutex_);
responses_.clear();
}
}
void QuicMemoryCacheBackend::AddResponseImpl(
absl::string_view host, absl::string_view path,
SpecialResponseType response_type, Http2HeaderBlock response_headers,
absl::string_view response_body, Http2HeaderBlock response_trailers,
const std::vector<spdy::Http2HeaderBlock>& early_hints) {
QuicWriterMutexLock lock(&response_mutex_);
QUICHE_DCHECK(!host.empty())
<< "Host must be populated, e.g. \"www.google.com\"";
std::string key = GetKey(host, path);
if (responses_.contains(key)) {
QUIC_BUG(quic_bug_10932_3)
<< "Response for '" << key << "' already exists!";
return;
}
auto new_response = std::make_unique<QuicBackendResponse>();
new_response->set_response_type(response_type);
new_response->set_headers(std::move(response_headers));
new_response->set_body(response_body);
new_response->set_trailers(std::move(response_trailers));
for (auto& headers : early_hints) {
new_response->AddEarlyHints(headers);
}
QUIC_DVLOG(1) << "Add response with key " << key;
responses_[key] = std::move(new_response);
}
std::string QuicMemoryCacheBackend::GetKey(absl::string_view host,
absl::string_view path) const {
std::string host_string = std::string(host);
size_t port = host_string.find(':');
if (port != std::string::npos)
host_string = std::string(host_string.c_str(), port);
return host_string + std::string(path);
}
} | #include "quiche/quic/tools/quic_memory_cache_backend.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/tools/quic_backend_response.h"
#include "quiche/common/platform/api/quiche_file_utils.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quic {
namespace test {
namespace {
using Response = QuicBackendResponse;
}
class QuicMemoryCacheBackendTest : public QuicTest {
protected:
void CreateRequest(std::string host, std::string path,
spdy::Http2HeaderBlock* headers) {
(*headers)[":method"] = "GET";
(*headers)[":path"] = path;
(*headers)[":authority"] = host;
(*headers)[":scheme"] = "https";
}
std::string CacheDirectory() {
return quiche::test::QuicheGetTestMemoryCachePath();
}
QuicMemoryCacheBackend cache_;
};
TEST_F(QuicMemoryCacheBackendTest, GetResponseNoMatch) {
const Response* response =
cache_.GetResponse("mail.google.com", "/index.html");
ASSERT_FALSE(response);
}
TEST_F(QuicMemoryCacheBackendTest, AddSimpleResponseGetResponse) {
std::string response_body("hello response");
cache_.AddSimpleResponse("www.google.com", "/", 200, response_body);
spdy::Http2HeaderBlock request_headers;
CreateRequest("www.google.com", "/", &request_headers);
const Response* response = cache_.GetResponse("www.google.com", "/");
ASSERT_TRUE(response);
ASSERT_TRUE(response->headers().contains(":status"));
EXPECT_EQ("200", response->headers().find(":status")->second);
EXPECT_EQ(response_body.size(), response->body().length());
}
TEST_F(QuicMemoryCacheBackendTest, AddResponse) {
const std::string kRequestHost = "www.foo.com";
const std::string kRequestPath = "/";
const std::string kResponseBody("hello response");
spdy::Http2HeaderBlock response_headers;
response_headers[":status"] = "200";
response_headers["content-length"] = absl::StrCat(kResponseBody.size());
spdy::Http2HeaderBlock response_trailers;
response_trailers["key-1"] = "value-1";
response_trailers["key-2"] = "value-2";
response_trailers["key-3"] = "value-3";
cache_.AddResponse(kRequestHost, "/", response_headers.Clone(), kResponseBody,
response_trailers.Clone());
const Response* response = cache_.GetResponse(kRequestHost, kRequestPath);
EXPECT_EQ(response->headers(), response_headers);
EXPECT_EQ(response->body(), kResponseBody);
EXPECT_EQ(response->trailers(), response_trailers);
}
#if defined(OS_IOS)
#define MAYBE_ReadsCacheDir DISABLED_ReadsCacheDir
#else
#define MAYBE_ReadsCacheDir ReadsCacheDir
#endif
TEST_F(QuicMemoryCacheBackendTest, MAYBE_ReadsCacheDir) {
cache_.InitializeBackend(CacheDirectory());
const Response* response =
cache_.GetResponse("test.example.com", "/index.html");
ASSERT_TRUE(response);
ASSERT_TRUE(response->headers().contains(":status"));
EXPECT_EQ("200", response->headers().find(":status")->second);
EXPECT_FALSE(response->headers().contains("connection"));
EXPECT_LT(0U, response->body().length());
}
#if defined(OS_IOS)
#define MAYBE_UsesOriginalUrl DISABLED_UsesOriginalUrl
#else
#define MAYBE_UsesOriginalUrl UsesOriginalUrl
#endif
TEST_F(QuicMemoryCacheBackendTest, MAYBE_UsesOriginalUrl) {
cache_.InitializeBackend(CacheDirectory());
const Response* response =
cache_.GetResponse("test.example.com", "/site_map.html");
ASSERT_TRUE(response);
ASSERT_TRUE(response->headers().contains(":status"));
EXPECT_EQ("200", response->headers().find(":status")->second);
EXPECT_FALSE(response->headers().contains("connection"));
EXPECT_LT(0U, response->body().length());
}
#if defined(OS_IOS)
#define MAYBE_UsesOriginalUrlOnly DISABLED_UsesOriginalUrlOnly
#else
#define MAYBE_UsesOriginalUrlOnly UsesOriginalUrlOnly
#endif
TEST_F(QuicMemoryCacheBackendTest, MAYBE_UsesOriginalUrlOnly) {
std::string dir;
std::string path = "map.html";
std::vector<std::string> files;
ASSERT_TRUE(quiche::EnumerateDirectoryRecursively(CacheDirectory(), files));
for (const std::string& file : files) {
if (absl::EndsWithIgnoreCase(file, "map.html")) {
dir = file;
dir.erase(dir.length() - path.length() - 1);
break;
}
}
ASSERT_NE("", dir);
cache_.InitializeBackend(dir);
const Response* response =
cache_.GetResponse("test.example.com", "/site_map.html");
ASSERT_TRUE(response);
ASSERT_TRUE(response->headers().contains(":status"));
EXPECT_EQ("200", response->headers().find(":status")->second);
EXPECT_FALSE(response->headers().contains("connection"));
EXPECT_LT(0U, response->body().length());
}
TEST_F(QuicMemoryCacheBackendTest, DefaultResponse) {
const Response* response = cache_.GetResponse("www.google.com", "/");
ASSERT_FALSE(response);
spdy::Http2HeaderBlock response_headers;
response_headers[":status"] = "200";
response_headers["content-length"] = "0";
Response* default_response = new Response;
default_response->set_headers(std::move(response_headers));
cache_.AddDefaultResponse(default_response);
response = cache_.GetResponse("www.google.com", "/");
ASSERT_TRUE(response);
ASSERT_TRUE(response->headers().contains(":status"));
EXPECT_EQ("200", response->headers().find(":status")->second);
cache_.AddSimpleResponse("www.google.com", "/", 302, "");
response = cache_.GetResponse("www.google.com", "/");
ASSERT_TRUE(response);
ASSERT_TRUE(response->headers().contains(":status"));
EXPECT_EQ("302", response->headers().find(":status")->second);
response = cache_.GetResponse("www.google.com", "/asd");
ASSERT_TRUE(response);
ASSERT_TRUE(response->headers().contains(":status"));
EXPECT_EQ("200", response->headers().find(":status")->second);
}
}
} |
127 | #ifndef QUICHE_QUIC_CORE_HTTP_SPDY_UTILS_H_
#define QUICHE_QUIC_CORE_HTTP_SPDY_UTILS_H_
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/core/http/quic_header_list.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/platform/api/quic_export.h"
#include "quiche/spdy/core/http2_header_block.h"
#include "quiche/spdy/core/spdy_alt_svc_wire_format.h"
namespace quic {
class QUICHE_EXPORT SpdyUtils {
public:
SpdyUtils() = delete;
static bool ExtractContentLengthFromHeaders(int64_t* content_length,
spdy::Http2HeaderBlock* headers);
static bool CopyAndValidateHeaders(const QuicHeaderList& header_list,
int64_t* content_length,
spdy::Http2HeaderBlock* headers);
static bool CopyAndValidateTrailers(const QuicHeaderList& header_list,
bool expect_final_byte_offset,
size_t* final_byte_offset,
spdy::Http2HeaderBlock* trailers);
static bool PopulateHeaderBlockFromUrl(const std::string url,
spdy::Http2HeaderBlock* headers);
static ParsedQuicVersion ExtractQuicVersionFromAltSvcEntry(
const spdy::SpdyAltSvcWireFormat::AlternativeService&
alternative_service_entry,
const ParsedQuicVersionVector& supported_versions);
};
}
#endif
#include "quiche/quic/core/http/spdy_utils.h"
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/quiche_text_utils.h"
#include "quiche/spdy/core/spdy_protocol.h"
using spdy::Http2HeaderBlock;
namespace quic {
bool SpdyUtils::ExtractContentLengthFromHeaders(int64_t* content_length,
Http2HeaderBlock* headers) {
auto it = headers->find("content-length");
if (it == headers->end()) {
return false;
} else {
absl::string_view content_length_header = it->second;
std::vector<absl::string_view> values =
absl::StrSplit(content_length_header, '\0');
for (const absl::string_view& value : values) {
uint64_t new_value;
if (!absl::SimpleAtoi(value, &new_value) ||
!quiche::QuicheTextUtils::IsAllDigits(value)) {
QUIC_DLOG(ERROR)
<< "Content length was either unparseable or negative.";
return false;
}
if (*content_length < 0) {
*content_length = new_value;
continue;
}
if (new_value != static_cast<uint64_t>(*content_length)) {
QUIC_DLOG(ERROR)
<< "Parsed content length " << new_value << " is "
<< "inconsistent with previously detected content length "
<< *content_length;
return false;
}
}
return true;
}
}
bool SpdyUtils::CopyAndValidateHeaders(const QuicHeaderList& header_list,
int64_t* content_length,
Http2HeaderBlock* headers) {
for (const auto& p : header_list) {
const std::string& name = p.first;
if (name.empty()) {
QUIC_DLOG(ERROR) << "Header name must not be empty.";
return false;
}
if (quiche::QuicheTextUtils::ContainsUpperCase(name)) {
QUIC_DLOG(ERROR) << "Malformed header: Header name " << name
<< " contains upper-case characters.";
return false;
}
headers->AppendValueOrAddHeader(name, p.second);
}
if (headers->contains("content-length") &&
!ExtractContentLengthFromHeaders(content_length, headers)) {
return false;
}
QUIC_DVLOG(1) << "Successfully parsed headers: " << headers->DebugString();
return true;
}
bool SpdyUtils::CopyAndValidateTrailers(const QuicHeaderList& header_list,
bool expect_final_byte_offset,
size_t* final_byte_offset,
Http2HeaderBlock* trailers) {
bool found_final_byte_offset = false;
for (const auto& p : header_list) {
const std::string& name = p.first;
if (expect_final_byte_offset && !found_final_byte_offset &&
name == kFinalOffsetHeaderKey &&
absl::SimpleAtoi(p.second, final_byte_offset)) {
found_final_byte_offset = true;
continue;
}
if (name.empty() || name[0] == ':') {
QUIC_DLOG(ERROR)
<< "Trailers must not be empty, and must not contain pseudo-"
<< "headers. Found: '" << name << "'";
return false;
}
if (quiche::QuicheTextUtils::ContainsUpperCase(name)) {
QUIC_DLOG(ERROR) << "Malformed header: Header name " << name
<< " contains upper-case characters.";
return false;
}
trailers->AppendValueOrAddHeader(name, p.second);
}
if (expect_final_byte_offset && !found_final_byte_offset) {
QUIC_DLOG(ERROR) << "Required key '" << kFinalOffsetHeaderKey
<< "' not present";
return false;
}
QUIC_DVLOG(1) << "Successfully parsed Trailers: " << trailers->DebugString();
return true;
}
bool SpdyUtils::PopulateHeaderBlockFromUrl(const std::string url,
Http2HeaderBlock* headers) {
(*headers)[":method"] = "GET";
size_t pos = url.find(":
if (pos == std::string::npos) {
return false;
}
(*headers)[":scheme"] = url.substr(0, pos);
size_t start = pos + 3;
pos = url.find('/', start);
if (pos == std::string::npos) {
(*headers)[":authority"] = url.substr(start);
(*headers)[":path"] = "/";
return true;
}
(*headers)[":authority"] = url.substr(start, pos - start);
(*headers)[":path"] = url.substr(pos);
return true;
}
ParsedQuicVersion SpdyUtils::ExtractQuicVersionFromAltSvcEntry(
const spdy::SpdyAltSvcWireFormat::AlternativeService&
alternative_service_entry,
const ParsedQuicVersionVector& supported_versions) {
for (const ParsedQuicVersion& version : supported_versions) {
if (version.AlpnDeferToRFCv1()) {
continue;
}
if (AlpnForVersion(version) == alternative_service_entry.protocol_id) {
return version;
}
}
return ParsedQuicVersion::Unsupported();
}
} | #include "quiche/quic/core/http/spdy_utils.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_test.h"
using spdy::Http2HeaderBlock;
using testing::Pair;
using testing::UnorderedElementsAre;
namespace quic {
namespace test {
namespace {
const bool kExpectFinalByteOffset = true;
const bool kDoNotExpectFinalByteOffset = false;
static std::unique_ptr<QuicHeaderList> FromList(
const QuicHeaderList::ListType& src) {
std::unique_ptr<QuicHeaderList> headers(new QuicHeaderList);
headers->OnHeaderBlockStart();
for (const auto& p : src) {
headers->OnHeader(p.first, p.second);
}
headers->OnHeaderBlockEnd(0, 0);
return headers;
}
}
using CopyAndValidateHeaders = QuicTest;
TEST_F(CopyAndValidateHeaders, NormalUsage) {
auto headers = FromList({
{"cookie", " part 1"},
{"cookie", "part 2 "},
{"cookie", "part3"},
{"passed-through", std::string("foo\0baz", 7)},
{"joined", "value 1"},
{"joined", "value 2"},
{"empty", ""},
{"empty-joined", ""},
{"empty-joined", "foo"},
{"empty-joined", ""},
{"empty-joined", ""},
{"cookie", " fin!"}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block,
UnorderedElementsAre(
Pair("cookie", " part 1; part 2 ; part3; fin!"),
Pair("passed-through", absl::string_view("foo\0baz", 7)),
Pair("joined", absl::string_view("value 1\0value 2", 15)),
Pair("empty", ""),
Pair("empty-joined", absl::string_view("\0foo\0\0", 6))));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, EmptyName) {
auto headers = FromList({{"foo", "foovalue"}, {"", "barvalue"}, {"baz", ""}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, UpperCaseName) {
auto headers =
FromList({{"foo", "foovalue"}, {"bar", "barvalue"}, {"bAz", ""}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, MultipleContentLengths) {
auto headers = FromList({{"content-length", "9"},
{"foo", "foovalue"},
{"content-length", "9"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("content-length", absl::string_view("9\09", 3)),
Pair("baz", "")));
EXPECT_EQ(9, content_length);
}
TEST_F(CopyAndValidateHeaders, InconsistentContentLengths) {
auto headers = FromList({{"content-length", "9"},
{"foo", "foovalue"},
{"content-length", "8"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, LargeContentLength) {
auto headers = FromList({{"content-length", "9000000000"},
{"foo", "foovalue"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block,
UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("content-length", absl::string_view("9000000000")),
Pair("baz", "")));
EXPECT_EQ(9000000000, content_length);
}
TEST_F(CopyAndValidateHeaders, NonDigitContentLength) {
auto headers = FromList({{"content-length", "+123"},
{"foo", "foovalue"},
{"bar", "barvalue"},
{"baz", ""}});
int64_t content_length = -1;
Http2HeaderBlock block;
EXPECT_FALSE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
}
TEST_F(CopyAndValidateHeaders, MultipleValues) {
auto headers = FromList({{"foo", "foovalue"},
{"bar", "barvalue"},
{"baz", ""},
{"foo", "boo"},
{"baz", "buzz"}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", absl::string_view("foovalue\0boo", 12)),
Pair("bar", "barvalue"),
Pair("baz", absl::string_view("\0buzz", 5))));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, MoreThanTwoValues) {
auto headers = FromList({{"set-cookie", "value1"},
{"set-cookie", "value2"},
{"set-cookie", "value3"}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(Pair(
"set-cookie",
absl::string_view("value1\0value2\0value3", 20))));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, Cookie) {
auto headers = FromList({{"foo", "foovalue"},
{"bar", "barvalue"},
{"cookie", "value1"},
{"baz", ""}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("cookie", "value1"), Pair("baz", "")));
EXPECT_EQ(-1, content_length);
}
TEST_F(CopyAndValidateHeaders, MultipleCookies) {
auto headers = FromList({{"foo", "foovalue"},
{"bar", "barvalue"},
{"cookie", "value1"},
{"baz", ""},
{"cookie", "value2"}});
int64_t content_length = -1;
Http2HeaderBlock block;
ASSERT_TRUE(
SpdyUtils::CopyAndValidateHeaders(*headers, &content_length, &block));
EXPECT_THAT(block, UnorderedElementsAre(
Pair("foo", "foovalue"), Pair("bar", "barvalue"),
Pair("cookie", "value1; value2"), Pair("baz", "")));
EXPECT_EQ(-1, content_length);
}
using CopyAndValidateTrailers = QuicTest;
TEST_F(CopyAndValidateTrailers, SimplestValidList) {
auto trailers = FromList({{kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_EQ(1234u, final_byte_offset);
}
TEST_F(CopyAndValidateTrailers, EmptyTrailerListWithFinalByteOffsetExpected) {
QuicHeaderList trailers;
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers,
EmptyTrailerListWithFinalByteOffsetNotExpected) {
QuicHeaderList trailers;
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
trailers, kDoNotExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_TRUE(block.empty());
}
TEST_F(CopyAndValidateTrailers, FinalByteOffsetExpectedButNotPresent) {
auto trailers = FromList({{"key", "value"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, FinalByteOffsetNotExpectedButPresent) {
auto trailers = FromList({{"key", "value"}, {kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kDoNotExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, FinalByteOffsetNotExpectedAndNotPresent) {
auto trailers = FromList({{"key", "value"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kDoNotExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_THAT(block, UnorderedElementsAre(Pair("key", "value")));
}
TEST_F(CopyAndValidateTrailers, EmptyName) {
auto trailers = FromList({{"", "value"}, {kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, PseudoHeaderInTrailers) {
auto trailers =
FromList({{":pseudo_key", "value"}, {kFinalOffsetHeaderKey, "1234"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_FALSE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
}
TEST_F(CopyAndValidateTrailers, DuplicateTrailers) {
auto trailers = FromList({{"key", "value0"},
{"key", "value1"},
{"key", ""},
{"key", ""},
{"key", "value2"},
{"key", ""},
{kFinalOffsetHeaderKey, "1234"},
{"other_key", "value"},
{"key", "non_contiguous_duplicate"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*trailers, kExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_THAT(
block,
UnorderedElementsAre(
Pair("key",
absl::string_view(
"value0\0value1\0\0\0value2\0\0non_contiguous_duplicate",
48)),
Pair("other_key", "value")));
}
TEST_F(CopyAndValidateTrailers, DuplicateCookies) {
auto headers = FromList({{"cookie", " part 1"},
{"cookie", "part 2 "},
{"cookie", "part3"},
{"key", "value"},
{kFinalOffsetHeaderKey, "1234"},
{"cookie", " non_contiguous_cookie!"}});
size_t final_byte_offset = 0;
Http2HeaderBlock block;
EXPECT_TRUE(SpdyUtils::CopyAndValidateTrailers(
*headers, kExpectFinalByteOffset, &final_byte_offset, &block));
EXPECT_THAT(
block,
UnorderedElementsAre(
Pair("cookie", " part 1; part 2 ; part3; non_contiguous_cookie!"),
Pair("key", "value")));
}
using PopulateHeaderBlockFromUrl = QuicTest;
TEST_F(PopulateHeaderBlockFromUrl, NormalUsage) {
std::string url = "https:
Http2HeaderBlock headers;
EXPECT_TRUE(SpdyUtils::PopulateHeaderBlockFromUrl(url, &headers));
EXPECT_EQ("https", headers[":scheme"].as_string());
EXPECT_EQ("www.google.com", headers[":authority"].as_string());
EXPECT_EQ("/index.html", headers[":path"].as_string());
}
TEST_F(PopulateHeaderBlockFromUrl, UrlWithNoPath) {
std::string url = "https:
Http2HeaderBlock headers;
EXPECT_TRUE(SpdyUtils::PopulateHeaderBlockFromUrl(url, &headers));
EXPECT_EQ("https", headers[":scheme"].as_string());
EXPECT_EQ("www.google.com", headers[":authority"].as_string());
EXPECT_EQ("/", headers[":path"].as_string());
}
TEST_F(PopulateHeaderBlockFromUrl, Failure) {
Http2HeaderBlock headers;
EXPECT_FALSE(SpdyUtils::PopulateHeaderBlockFromUrl("/", &headers));
EXPECT_FALSE(SpdyUtils::PopulateHeaderBlockFromUrl("/index.html", &headers));
EXPECT_FALSE(
SpdyUtils::PopulateHeaderBlockFromUrl("www.google.com/", &headers));
}
using ExtractQuicVersionFromAltSvcEntry = QuicTest;
TEST_F(ExtractQuicVersionFromAltSvcEntry, SupportedVersion) {
ParsedQuicVersionVector supported_versions = AllSupportedVersions();
spdy::SpdyAltSvcWireFormat::AlternativeService entry;
for (const ParsedQuicVersion& version : supported_versions) {
entry.protocol_id = AlpnForVersion(version);
ParsedQuicVersion expected_version = version;
if (entry.protocol_id == AlpnForVersion(ParsedQuicVersion::RFCv1()) &&
version != ParsedQuicVersion::RFCv1()) {
expected_version = ParsedQuicVersion::RFCv1();
}
EXPECT_EQ(expected_version, SpdyUtils::ExtractQuicVersionFromAltSvcEntry(
entry, supported_versions))
<< "version: " << version;
}
}
TEST_F(ExtractQuicVersionFromAltSvcEntry, UnsupportedVersion) {
spdy::SpdyAltSvcWireFormat::AlternativeService entry;
entry.protocol_id = "quic";
EXPECT_EQ(ParsedQuicVersion::Unsupported(),
SpdyUtils::ExtractQuicVersionFromAltSvcEntry(
entry, AllSupportedVersions()));
}
}
} |
128 | #ifndef TENSORSTORE_INTERNAL_COMPRESSION_XZ_COMPRESSOR_H_
#define TENSORSTORE_INTERNAL_COMPRESSION_XZ_COMPRESSOR_H_
#include <cstddef>
#include <lzma.h>
#include "tensorstore/internal/compression/json_specified_compressor.h"
namespace tensorstore {
namespace internal {
struct XzOptions {
int level = 6;
bool extreme = false;
::lzma_check check = LZMA_CHECK_CRC64;
};
class XzCompressor : public JsonSpecifiedCompressor, public XzOptions {
public:
std::unique_ptr<riegeli::Writer> GetWriter(
std::unique_ptr<riegeli::Writer> base_writer,
size_t element_bytes) const override;
std::unique_ptr<riegeli::Reader> GetReader(
std::unique_ptr<riegeli::Reader> base_reader,
size_t element_bytes) const override;
};
}
}
#endif
#include "tensorstore/internal/compression/xz_compressor.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "riegeli/xz/xz_reader.h"
#include "riegeli/xz/xz_writer.h"
namespace tensorstore {
namespace internal {
std::unique_ptr<riegeli::Writer> XzCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
using Writer = riegeli::XzWriter<std::unique_ptr<riegeli::Writer>>;
Writer::Options options;
options.set_container(Writer::Container::kXz);
options.set_check(static_cast<Writer::Check>(check));
options.set_compression_level(level);
options.set_extreme(extreme);
return std::make_unique<Writer>(std::move(base_writer), options);
}
std::unique_ptr<riegeli::Reader> XzCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
using Reader = riegeli::XzReader<std::unique_ptr<riegeli::Reader>>;
Reader::Options options;
options.set_container(Reader::Container::kXzOrLzma);
options.set_concatenate(true);
return std::make_unique<Reader>(std::move(base_reader), options);
}
}
} | #include "tensorstore/internal/compression/xz_compressor.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include <lzma.h>
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::XzCompressor;
TEST(XzCompressorTest, SmallRoundtrip) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
TENSORSTORE_ASSERT_OK(compressor.Decode(
encode_result.Subcord(3, encode_result.size() - 3), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, SmallRoundtripFragmented) {
XzCompressor compressor;
const absl::Cord input = absl::MakeFragmentedCord(
{"The quick", " brown fox", " jumped over", " ", "the lazy dog."});
absl::Cord encode_result("abc"), decode_result("def");
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
std::vector<std::string> encode_result_fragments;
for (size_t i = 3; i < encode_result.size(); ++i) {
encode_result_fragments.push_back(std::string(encode_result.Subcord(i, 1)));
}
TENSORSTORE_ASSERT_OK(compressor.Decode(
absl::MakeFragmentedCord(encode_result_fragments), &decode_result, 0));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST(XzCompressorTest, LargeRoundtrip) {
std::string input(100000, '\0');
unsigned char x = 0;
for (auto& v : input) {
v = x;
x += 7;
}
XzCompressor compressor;
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(
compressor.Encode(absl::Cord(input), &encode_result, 0));
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultLevel) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.level = 9;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, NonDefaultCheck) {
XzCompressor compressor;
XzCompressor compressor2;
compressor2.check = LZMA_CHECK_CRC32;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result1, 0));
TENSORSTORE_ASSERT_OK(compressor2.Encode(input, &encode_result2, 0));
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(compressor.Decode(encode_result2, &decode_result, 0));
EXPECT_EQ(input, decode_result);
}
TEST(XzCompressorTest, DecodeCorruptData) {
XzCompressor compressor;
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted[0] = 0;
EXPECT_THAT(compressor.Decode(absl::Cord(corrupted), &decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
{
absl::Cord encode_result, decode_result;
TENSORSTORE_ASSERT_OK(compressor.Encode(input, &encode_result, 0));
ASSERT_GE(encode_result.size(), 1);
EXPECT_THAT(
compressor.Decode(encode_result.Subcord(0, encode_result.size() - 1),
&decode_result, 0),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
} |
129 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_TILE_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_TILE_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewTileNodeShader();
}
}
}
#endif
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class ConvertTile : public OpConverterBase<ConvertTile> {
public:
explicit ConvertTile(const OpConverterParams *params)
: OpConverterBase<ConvertTile>(
params,
{DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}) {}
static constexpr std::array<InputArgSpec, 2> InputSpec() {
return std::array<InputArgSpec, 2>{
InputArgSpec::Create("input_tensor", TrtInputArg::kBoth),
InputArgSpec::Create("weight", TrtInputArg::kBoth)};
}
Status Validate() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
const auto &repl = inputs.at(1);
if (params.use_implicit_batch && repl.is_tensor()) {
return errors::InvalidArgument(
"Conversion for Tile is not implemented for multipliers "
"passed as a tensor in implicit batch mode.");
}
nvinfer1::DataType dtype;
const int *multiplies;
if (repl.is_weights()) {
TFTRT_CHECK_SHAPE_TENSOR(repl.weights().GetTensor());
dtype = repl.weights().TrtDType();
multiplies = repl.weights().GetPointer<int>();
} else {
dtype = repl.tensor()->getType();
multiplies = nullptr;
}
const auto &node = params.node_def;
TF_RETURN_IF_ERROR(check_type(dtype, nvinfer1::DataType::kINT32, node, 1));
const auto dims = inputs.at(0).GetTrtDims();
const auto nb_dims =
dims.nbDims +
(params.use_implicit_batch && inputs.at(0).is_tensor() ? 1 : 0);
if (multiplies) {
const int mult_numb = repl.weights().count();
if (mult_numb != nb_dims) {
return errors::InvalidArgument(
"The length of the replication vector (", mult_numb,
") of the Tile operation in '", node.name(),
"' is expected to be equal to the rank of the input vector (",
nb_dims, ").");
}
if (std::any_of(multiplies, multiplies + nb_dims,
[](int i) { return i <= 0; })) {
const auto &mul = absl::StrJoin(multiplies, multiplies + nb_dims, ", ");
return errors::InvalidArgument(
"All replications of the Tile operation in '", node.name(),
"' should be positive, got (", mul, ").");
}
if (params.use_implicit_batch && multiplies[0] > 1) {
return errors::Unimplemented(
"The Tile operation along the batch dimension in '", node.name(),
"' is not implemented.");
}
} else {
const auto &repl_dims = repl.GetTrtDims();
if (repl_dims.nbDims != 1) {
return errors::InvalidArgument(
"When replications are defined as a tensor, that tensor must be "
"1-dimensional. Got ",
repl_dims.nbDims, "-dimensional tensor.");
}
if (repl_dims.d[0] >= 0 && repl_dims.d[0] != nb_dims) {
return errors::InvalidArgument(
"When replications are defined as a tensor, "
"the number of its elements (",
repl_dims.d[0], ") must be equal to the rank of the input tensor (",
nb_dims, ").");
}
}
return OkStatus();
}
Status Convert() {
const auto ¶ms = *this->params_;
const auto &inputs = params.inputs;
auto *converter = params.converter;
auto *network = converter->network();
const auto &tensor = inputs.at(0);
const auto &replics = inputs.at(1);
const auto dims = tensor.GetTrtDims();
const auto nb_dims = dims.nbDims;
nvinfer1::Dims output_size{nb_dims, {1}};
bool dynamic_flag = replics.is_tensor() || !HasStaticShape(dims);
if (!dynamic_flag) {
const auto dim_offset =
params.use_implicit_batch && tensor.is_tensor() ? 1 : 0;
const auto *input_size = dims.d;
const int *pReplics = replics.weights().GetPointer<int>() + dim_offset;
for (int i = 0; i < nb_dims; i++)
output_size.d[i] = pReplics[i] * input_size[i];
}
StatusOr<TRTNetworkBuilder> builder;
if (tensor.is_weights() || (dynamic_flag && replics.is_weights())) {
builder =
TRTNetworkBuilder::Create(converter->network(), params.weight_store);
TRT_ENSURE_OK(builder);
}
ITensorProxyPtr input_tensor;
if (tensor.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(tensor.weights().GetTrtWeights(), dims);
TRT_ENSURE_PTR_OK(weights_const);
input_tensor = (*weights_const)->getOutput(0);
} else {
input_tensor = tensor.tensor();
}
auto &input_trt_tensor = *input_tensor->trt_tensor();
nvinfer1::ITensor *target_shape = nullptr;
if (dynamic_flag) {
nvinfer1::ITensor *mult;
if (replics.is_weights()) {
StatusOr<nvinfer1::IConstantLayer *> weights_const =
builder->WeightsToConstant(replics.weights().GetTrtWeights(),
replics.GetTrtDims());
TRT_ENSURE_PTR_OK(weights_const);
mult = (*weights_const)->getOutput(0);
} else {
const ITensorProxyPtr multiplies = replics.tensor()->trt_tensor();
mult = multiplies->trt_tensor();
}
nvinfer1::ITensor *shape =
network->addShape(input_trt_tensor)->getOutput(0);
target_shape = network
->addElementWise(*shape, *mult,
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
}
nvinfer1::Dims start{nb_dims, {}};
DimsAdapter stride(std::vector<int>(nb_dims, 1));
auto layer = network->addSlice(input_trt_tensor, start, output_size,
stride.AsTrtDims());
layer->setMode(nvinfer1::SliceMode::kWRAP);
if (target_shape) layer->setInput(2, *target_shape);
converter->SetLayerName(layer, params.node_def.name(), "to_tile");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (tensor.is_weights() && params.use_implicit_batch) {
DimsAdapter adap(output_tensor->getDimensions());
TF_RETURN_IF_ERROR(adap.RemoveBatchDimension());
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params.converter, TRT_TensorOrWeights(output_tensor),
adap.AsTrtDims(), false, &output_tensor, params.node_def));
}
AddOutput(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
};
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertTile>(), "Tile");
}
}
}
#endif | #include "tensorflow/lite/delegates/gpu/gl/kernels/tile.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(TileTest, ChannelsTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 1, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 1, 6), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f,
4.0f, 5.0f, 6.0f, 4.0f, 5.0f, 6.0f}));
}
TEST(TileTest, WidthTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 1, 2, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 1, 4, 3), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
}
TEST(TileTest, HeightTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 1, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 4, 1, 3), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
}
TEST(TileTest, HWCTiling) {
const TensorRef<BHWC> input = {
.type = DataType::FLOAT32, .shape = BHWC(1, 2, 2, 3), .ref = 0};
const TensorRef<BHWC> output = {
.type = DataType::FLOAT32, .shape = BHWC(1, 4, 4, 6), .ref = 1};
SingleOpModel model({ToString(OperationType::TILE)}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f,
8.0f, 9.0f, 10.0f, 11.0f, 12.0f}));
ASSERT_OK(model.Invoke(*NewTileNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(
FloatNear(1e-6),
{1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 4.0f,
5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
6.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 10.0f, 11.0f, 12.0f, 7.0f, 8.0f, 9.0f, 7.0f,
8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 10.0f, 11.0f, 12.0f, 1.0f, 2.0f,
3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 4.0f, 5.0f, 6.0f,
1.0f, 2.0f, 3.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 4.0f,
5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f,
12.0f, 10.0f, 11.0f, 12.0f, 7.0f, 8.0f, 9.0f, 7.0f, 8.0f, 9.0f,
10.0f, 11.0f, 12.0f, 10.0f, 11.0f, 12.0f}));
}
}
}
}
} |
130 | #ifndef GLOG_INTERNAL_UTILITIES_H
#define GLOG_INTERNAL_UTILITIES_H
#include <cstddef>
#include <cstdio>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#ifdef _LP64
# define __PRIS_PREFIX "z"
#else
# define __PRIS_PREFIX
#endif
#define PRIdS __PRIS_PREFIX "d"
#define PRIxS __PRIS_PREFIX "x"
#define PRIuS __PRIS_PREFIX "u"
#define PRIXS __PRIS_PREFIX "X"
#define PRIoS __PRIS_PREFIX "o"
#include "config.h"
#include "glog/platform.h"
#if defined(GLOG_USE_WINDOWS_PORT)
# include "port.h"
#endif
#if defined(HAVE_UNISTD_H)
# include <unistd.h>
#endif
#if !defined(HAVE_SSIZE_T)
# if defined(GLOG_OS_WINDOWS)
# include <basetsd.h>
using ssize_t = SSIZE_T;
# else
using ssize_t = std::ptrdiff_t;
# endif
#endif
#include "glog/log_severity.h"
#include "glog/types.h"
#ifndef ARRAYSIZE
# define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
#endif
namespace google {
namespace logging {
namespace internal {
struct CrashReason {
CrashReason() = default;
const char* filename{nullptr};
int line_number{0};
const char* message{nullptr};
void* stack[32];
int depth{0};
};
}
}
inline namespace glog_internal_namespace_ {
#if defined(__has_attribute)
# if __has_attribute(noinline)
# define ATTRIBUTE_NOINLINE __attribute__((noinline))
# define HAVE_ATTRIBUTE_NOINLINE
# endif
#endif
#if !defined(HAVE_ATTRIBUTE_NOINLINE)
# if defined(GLOG_OS_WINDOWS)
# define ATTRIBUTE_NOINLINE __declspec(noinline)
# define HAVE_ATTRIBUTE_NOINLINE
# endif
#endif
#if !defined(HAVE_ATTRIBUTE_NOINLINE)
# define ATTRIBUTE_NOINLINE
#endif
void AlsoErrorWrite(LogSeverity severity, const char* tag,
const char* message) noexcept;
const char* ProgramInvocationShortName();
int32 GetMainThreadPid();
bool PidHasChanged();
const std::string& MyUserName();
const char* const_basename(const char* filepath);
void SetCrashReason(const logging::internal::CrashReason* r);
void InitGoogleLoggingUtilities(const char* argv0);
void ShutdownGoogleLoggingUtilities();
template <class Functor>
class ScopedExit final {
public:
template <class F, std::enable_if_t<
std::is_constructible<Functor, F&&>::value>* = nullptr>
constexpr explicit ScopedExit(F&& functor) noexcept(
std::is_nothrow_constructible<Functor, F&&>::value)
: functor_{std::forward<F>(functor)} {}
~ScopedExit() noexcept(noexcept(std::declval<Functor&>()())) { functor_(); }
ScopedExit(const ScopedExit& other) = delete;
ScopedExit& operator=(const ScopedExit& other) = delete;
ScopedExit(ScopedExit&& other) noexcept = delete;
ScopedExit& operator=(ScopedExit&& other) noexcept = delete;
private:
Functor functor_;
};
class GLOG_NO_EXPORT FileDescriptor final {
static constexpr int InvalidHandle = -1;
public:
constexpr FileDescriptor() noexcept : FileDescriptor{nullptr} {}
constexpr explicit FileDescriptor(int fd) noexcept : fd_{fd} {}
constexpr FileDescriptor(std::nullptr_t) noexcept : fd_{InvalidHandle} {}
FileDescriptor(const FileDescriptor& other) = delete;
FileDescriptor& operator=(const FileDescriptor& other) = delete;
FileDescriptor(FileDescriptor&& other) noexcept : fd_{other.release()} {}
FileDescriptor& operator=(FileDescriptor&& other) noexcept {
reset(other.release());
return *this;
}
constexpr explicit operator bool() const noexcept {
return fd_ != InvalidHandle;
}
constexpr int get() const noexcept { return fd_; }
int release() noexcept { return std::exchange(fd_, InvalidHandle); }
void reset(std::nullptr_t) noexcept { safe_close(); }
void reset() noexcept { reset(nullptr); }
void reset(int fd) noexcept {
reset();
fd_ = fd;
}
int close() noexcept { return unsafe_close(); }
~FileDescriptor() { safe_close(); }
private:
int unsafe_close() noexcept { return ::close(release()); }
void safe_close() noexcept {
if (*this) {
unsafe_close();
}
}
int fd_;
};
constexpr bool operator==(const FileDescriptor& lhs, int rhs) noexcept {
return lhs.get() == rhs;
}
constexpr bool operator==(int lhs, const FileDescriptor& rhs) noexcept {
return rhs == lhs;
}
constexpr bool operator!=(const FileDescriptor& lhs, int rhs) noexcept {
return !(lhs == rhs);
}
constexpr bool operator!=(int lhs, const FileDescriptor& rhs) noexcept {
return !(lhs == rhs);
}
constexpr bool operator==(const FileDescriptor& lhs, std::nullptr_t) noexcept {
return !lhs;
}
constexpr bool operator==(std::nullptr_t, const FileDescriptor& rhs) noexcept {
return !rhs;
}
constexpr bool operator!=(const FileDescriptor& lhs, std::nullptr_t) noexcept {
return static_cast<bool>(lhs);
}
constexpr bool operator!=(std::nullptr_t, const FileDescriptor& rhs) noexcept {
return static_cast<bool>(rhs);
}
}
}
template <>
struct std::default_delete<std::FILE> {
void operator()(FILE* p) const noexcept { fclose(p); }
};
#endif
#define _GNU_SOURCE 1
#include "utilities.h"
#include <atomic>
#include <cerrno>
#include <csignal>
#include <cstdio>
#include <cstdlib>
#include "base/googleinit.h"
#include "config.h"
#include "glog/flags.h"
#include "glog/logging.h"
#include "stacktrace.h"
#include "symbolize.h"
#ifdef GLOG_OS_ANDROID
# include <android/log.h>
#endif
#ifdef HAVE_SYS_TIME_H
# include <sys/time.h>
#endif
#if defined(HAVE_SYSCALL_H)
# include <syscall.h>
#elif defined(HAVE_SYS_SYSCALL_H)
# include <sys/syscall.h>
#endif
#ifdef HAVE_SYSLOG_H
# include <syslog.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_PWD_H
# include <pwd.h>
#endif
#if defined(HAVE___PROGNAME)
extern char* __progname;
#endif
using std::string;
namespace google {
static const char* g_program_invocation_short_name = nullptr;
bool IsGoogleLoggingInitialized() {
return g_program_invocation_short_name != nullptr;
}
inline namespace glog_internal_namespace_ {
constexpr int FileDescriptor::InvalidHandle;
void AlsoErrorWrite(LogSeverity severity, const char* tag,
const char* message) noexcept {
#if defined(GLOG_OS_WINDOWS)
(void)severity;
(void)tag;
::OutputDebugStringA(message);
#elif defined(GLOG_OS_ANDROID)
constexpr int android_log_levels[] = {
ANDROID_LOG_INFO,
ANDROID_LOG_WARN,
ANDROID_LOG_ERROR,
ANDROID_LOG_FATAL,
};
__android_log_write(android_log_levels[severity], tag, message);
#else
(void)severity;
(void)tag;
(void)message;
#endif
}
}
}
#ifdef HAVE_STACKTRACE
# include "base/commandlineflags.h"
# include "stacktrace.h"
# include "symbolize.h"
namespace google {
using DebugWriter = void(const char*, void*);
static const int kPrintfPointerFieldWidth = 2 + 2 * sizeof(void*);
static void DebugWriteToStderr(const char* data, void*) {
if (write(fileno(stderr), data, strlen(data)) < 0) {
}
AlsoErrorWrite(GLOG_FATAL,
glog_internal_namespace_::ProgramInvocationShortName(), data);
}
static void DebugWriteToString(const char* data, void* arg) {
reinterpret_cast<string*>(arg)->append(data);
}
# ifdef HAVE_SYMBOLIZE
static void DumpPCAndSymbol(DebugWriter* writerfn, void* arg, void* pc,
const char* const prefix) {
char tmp[1024];
const char* symbol = "(unknown)";
if (Symbolize(reinterpret_cast<char*>(pc) - 1, tmp, sizeof(tmp))) {
symbol = tmp;
}
char buf[1024];
std::snprintf(buf, sizeof(buf), "%s@ %*p %s\n", prefix,
kPrintfPointerFieldWidth, pc, symbol);
writerfn(buf, arg);
}
# endif
static void DumpPC(DebugWriter* writerfn, void* arg, void* pc,
const char* const prefix) {
char buf[100];
std::snprintf(buf, sizeof(buf), "%s@ %*p\n", prefix, kPrintfPointerFieldWidth,
pc);
writerfn(buf, arg);
}
static void DumpStackTrace(int skip_count, DebugWriter* writerfn, void* arg) {
void* stack[32];
int depth = GetStackTrace(stack, ARRAYSIZE(stack), skip_count + 1);
for (int i = 0; i < depth; i++) {
# if defined(HAVE_SYMBOLIZE)
if (FLAGS_symbolize_stacktrace) {
DumpPCAndSymbol(writerfn, arg, stack[i], " ");
} else {
DumpPC(writerfn, arg, stack[i], " ");
}
# else
DumpPC(writerfn, arg, stack[i], " ");
# endif
}
}
# ifdef __GNUC__
__attribute__((noreturn))
# endif
static void
DumpStackTraceAndExit() {
DumpStackTrace(1, DebugWriteToStderr, nullptr);
if (IsFailureSignalHandlerInstalled()) {
# ifdef HAVE_SIGACTION
struct sigaction sig_action;
memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL;
sigaction(SIGABRT, &sig_action, nullptr);
# elif defined(GLOG_OS_WINDOWS)
signal(SIGABRT, SIG_DFL);
# endif
}
abort();
}
}
#endif
namespace google {
inline namespace glog_internal_namespace_ {
const char* const_basename(const char* filepath) {
const char* base = strrchr(filepath, '/');
#ifdef GLOG_OS_WINDOWS
if (!base) base = strrchr(filepath, '\\');
#endif
return base ? (base + 1) : filepath;
}
const char* ProgramInvocationShortName() {
if (g_program_invocation_short_name != nullptr) {
return g_program_invocation_short_name;
}
#if defined(HAVE_PROGRAM_INVOCATION_SHORT_NAME)
return program_invocation_short_name;
#elif defined(HAVE_GETPROGNAME)
return getprogname();
#elif defined(HAVE___PROGNAME)
return __progname;
#elif defined(HAVE___ARGV)
return const_basename(__argv[0]);
#else
return "UNKNOWN";
#endif
}
static int32 g_main_thread_pid = getpid();
int32 GetMainThreadPid() { return g_main_thread_pid; }
bool PidHasChanged() {
int32 pid = getpid();
if (g_main_thread_pid == pid) {
return false;
}
g_main_thread_pid = pid;
return true;
}
static string g_my_user_name;
const string& MyUserName() { return g_my_user_name; }
static void MyUserNameInitializer() {
#if defined(GLOG_OS_WINDOWS)
const char* user = getenv("USERNAME");
#else
const char* user = getenv("USER");
#endif
if (user != nullptr) {
g_my_user_name = user;
} else {
#if defined(HAVE_PWD_H) && defined(HAVE_UNISTD_H)
struct passwd pwd;
struct passwd* result = nullptr;
char buffer[1024] = {'\0'};
uid_t uid = geteuid();
int pwuid_res = getpwuid_r(uid, &pwd, buffer, sizeof(buffer), &result);
if (pwuid_res == 0 && result) {
g_my_user_name = pwd.pw_name;
} else {
std::snprintf(buffer, sizeof(buffer), "uid%d", uid);
g_my_user_name = buffer;
}
#endif
if (g_my_user_name.empty()) {
g_my_user_name = "invalid-user";
}
}
}
REGISTER_MODULE_INITIALIZER(utilities, MyUserNameInitializer())
static std::atomic<const logging::internal::CrashReason*> g_reason{nullptr};
void SetCrashReason(const logging::internal::CrashReason* r) {
const logging::internal::CrashReason* expected = nullptr;
g_reason.compare_exchange_strong(expected, r);
}
void InitGoogleLoggingUtilities(const char* argv0) {
CHECK(!IsGoogleLoggingInitialized())
<< "You called InitGoogleLogging() twice!";
g_program_invocation_short_name = const_basename(argv0);
#ifdef HAVE_STACKTRACE
InstallFailureFunction(&DumpStackTraceAndExit);
#endif
}
void ShutdownGoogleLoggingUtilities() {
CHECK(IsGoogleLoggingInitialized())
<< "You called ShutdownGoogleLogging() without calling "
"InitGoogleLogging() first!";
g_program_invocation_short_name = nullptr;
#ifdef HAVE_SYSLOG_H
closelog();
#endif
}
}
#ifdef HAVE_STACKTRACE
std::string GetStackTrace() {
std::string stacktrace;
DumpStackTrace(1, DebugWriteToString, &stacktrace);
return stacktrace;
}
#endif
} | #include "utilities.h"
#include "glog/logging.h"
#include "googletest.h"
#ifdef GLOG_USE_GFLAGS
# include <gflags/gflags.h>
using namespace GFLAGS_NAMESPACE;
#endif
using namespace google;
TEST(utilities, InitGoogleLoggingDeathTest) {
ASSERT_DEATH(InitGoogleLogging("foobar"), "");
}
int main(int argc, char** argv) {
InitGoogleLogging(argv[0]);
InitGoogleTest(&argc, argv);
CHECK_EQ(RUN_ALL_TESTS(), 0);
} |
131 | #ifndef XLA_SERVICE_STABLE_SORT_EXPANDER_H_
#define XLA_SERVICE_STABLE_SORT_EXPANDER_H_
#include <cstdint>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/op_expander_pass.h"
namespace xla {
class StableSortExpander : public OpExpanderPass {
public:
absl::string_view name() const override { return "stable-sort-expander"; }
static int64_t IotaOperandIndexForStableSort(const HloSortInstruction& sort);
private:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/stable_sort_expander.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
int64_t StableSortExpander::IotaOperandIndexForStableSort(
const HloSortInstruction& sort) {
for (const HloInstruction* operand : sort.operands()) {
if (operand->opcode() == HloOpcode::kIota &&
Cast<HloIotaInstruction>(operand)->iota_dimension() ==
sort.sort_dimension() &&
operand->shape().element_type() == S32) {
return sort.operand_index(operand);
}
}
return -1;
}
absl::StatusOr<HloInstruction*> StableSortExpander::ExpandInstruction(
HloInstruction* instruction) {
auto* sort = Cast<HloSortInstruction>(instruction);
HloComputation* computation = sort->parent();
HloInstruction* expanded_sort = nullptr;
absl::flat_hash_set<int64_t> used_indices;
int64_t iota_index = IotaOperandIndexForStableSort(*sort);
if (iota_index == -1) {
Shape iota_shape = sort->operand(0)->shape();
if (iota_shape.dimensions(sort->sort_dimension()) >
std::numeric_limits<int32_t>::max()) {
return Unimplemented(
"Stable sorting of more than 2^31-1 elements is not implemented");
}
iota_shape.set_element_type(S32);
auto iota = computation->AddInstruction(
HloInstruction::CreateIota(iota_shape, sort->sort_dimension()));
auto comparator = sort->to_apply();
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
std::vector<std::unique_ptr<HloInstruction>> extra_parameters;
std::vector<HloInstruction*> extra_parameter_ptrs;
Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
extra_parameters.push_back(HloInstruction::CreateParameter(
sort->operand_count() * 2, scalar_shape,
absl::StrCat("p.", sort->operand_count(), ".lhs")));
extra_parameter_ptrs.push_back(extra_parameters.back().get());
extra_parameters.push_back(HloInstruction::CreateParameter(
sort->operand_count() * 2 + 1, scalar_shape,
absl::StrCat("p.", sort->operand_count(), ".rhs")));
extra_parameter_ptrs.push_back(extra_parameters.back().get());
sort->set_to_apply(sort->GetModule()->AddEmbeddedComputation(
comparator->CloneWithReplacements(&replacements,
extra_parameter_ptrs)));
std::vector<HloInstruction*> new_operands(sort->operands().begin(),
sort->operands().end());
new_operands.push_back(iota);
std::vector<Shape> new_shapes = sort->operand_count() == 1
? std::vector<Shape>{sort->shape()}
: sort->shape().tuple_shapes();
new_shapes.push_back(iota_shape);
Shape new_sort_shape = ShapeUtil::MakeTupleShape(new_shapes);
HloInstruction* new_sort = computation->AddInstruction(
sort->CloneWithNewOperands(new_sort_shape, new_operands));
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(sort->operand_count());
for (int64_t i = 0; i < sort->operand_count(); ++i) {
tuple_elements.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
sort->operand(i)->shape(), new_sort, i)));
}
expanded_sort = tuple_elements[0];
if (tuple_elements.size() > 1) {
expanded_sort = computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
sort = Cast<HloSortInstruction>(new_sort);
iota_index = sort->operand_count() - 1;
}
auto comparator = sort->to_apply();
std::vector<HloInstruction*> instructions_postorder =
comparator->MakeInstructionPostOrder();
absl::flat_hash_map<HloInstruction*, HloInstruction*> replacements;
auto replace = [&](HloInstruction* instr) {
auto it = replacements.find(instr);
if (it == replacements.end()) {
return instr;
}
return it->second;
};
HloInstruction* old_root = comparator->root_instruction();
for (int64_t i = 0; i < comparator->num_parameters(); ++i) {
replacements[comparator->parameter_instruction(i)] =
comparator->parameter_instruction(i ^ 1);
}
HloInstruction* cloned_root = nullptr;
for (HloInstruction* inst : instructions_postorder) {
if (inst->operand_count() == 0) {
continue;
}
std::vector<HloInstruction*> new_operands;
new_operands.reserve(inst->operand_count());
for (HloInstruction* operand : inst->operands()) {
new_operands.push_back(replace(operand));
}
auto new_instruction =
inst->CloneWithNewOperands(inst->shape(), new_operands);
replacements[inst] = new_instruction.get();
if (inst == old_root) {
cloned_root = new_instruction.get();
}
comparator->AddInstruction(std::move(new_instruction));
}
CHECK_NE(cloned_root, nullptr);
Shape scalar_pred = ShapeUtil::MakeShape(PRED, {});
HloInstruction* same =
comparator->AddInstruction(HloInstruction::CreateCompare(
scalar_pred, old_root, cloned_root, ComparisonDirection::kEq));
HloInstruction* tie_breaker =
comparator->AddInstruction(HloInstruction::CreateCompare(
scalar_pred, comparator->parameter_instruction(2 * iota_index),
comparator->parameter_instruction(2 * iota_index + 1),
ComparisonDirection::kLt));
HloInstruction* new_root =
comparator->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeShape(PRED, {}), HloOpcode::kSelect, same, tie_breaker,
old_root));
comparator->set_root_instruction(new_root);
return expanded_sort;
}
bool StableSortExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kSort &&
Cast<HloSortInstruction>(instruction)->is_stable();
}
} | #include "xla/service/stable_sort_expander.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = match;
using StableSortExpanderTest = HloTestBase;
bool IsSameComputationExceptParams(const HloInstruction* a,
const HloInstruction* b) {
if (a->opcode() != b->opcode() || a->operand_count() != b->operand_count()) {
return false;
}
if (a->opcode() == HloOpcode::kParameter) {
return a->parameter_number() == (b->parameter_number() ^ 1);
}
if (a->operand_count() == 0) {
return a == b;
}
for (int64_t i = 0; i < a->operand_count(); ++i) {
if (!IsSameComputationExceptParams(a->operand(i), b->operand(i))) {
return false;
}
}
return true;
}
void CheckComputationHasTieBreaker(const HloInstruction* root,
int64_t iota_parameter) {
ASSERT_EQ(root->opcode(), HloOpcode::kSelect);
ASSERT_EQ(root->operand(0)->opcode(), HloOpcode::kCompare);
ASSERT_EQ(root->operand(0)->comparison_direction(), ComparisonDirection::kEq);
EXPECT_THAT(root->operand(1),
GmockMatch(m::Lt(m::Parameter(iota_parameter * 2),
m::Parameter(iota_parameter * 2 + 1))));
EXPECT_EQ(root->operand(2), root->operand(0)->operand(0));
EXPECT_TRUE(IsSameComputationExceptParams(root->operand(0)->operand(0),
root->operand(0)->operand(1)));
}
TEST_F(StableSortExpanderTest, StabilizeSortReuseIotaOperand) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest,
StabilizeSortReuseIotaOperandComplicatedComparison) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
max = u32[] constant(2147483647)
zero = s32[] constant(0)
lhs.signed = s32[] bitcast-convert(p.0.lhs)
lhs.unsigned = u32[] bitcast-convert(p.0.lhs)
lhs.flipped = u32[] subtract(max, lhs.unsigned)
lhs.flipped.signed = s32[] bitcast-convert(lhs.flipped)
lhs.is_negative = pred[] compare(lhs.flipped.signed, zero), direction=LT
lhs.converted = s32[] select(lhs.is_negative, lhs.flipped.signed, lhs.signed)
rhs.signed = s32[] bitcast-convert(p.0.rhs)
rhs.unsigned = u32[] bitcast-convert(p.0.rhs)
rhs.flipped = u32[] subtract(max, rhs.unsigned)
rhs.flipped.signed = s32[] bitcast-convert(rhs.flipped)
rhs.is_negative = pred[] compare(rhs.flipped.signed, zero), direction=LT
rhs.converted = s32[] select(rhs.is_negative, rhs.flipped.signed, rhs.signed)
ROOT lt = pred[] compare(lhs.converted, rhs.converted), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest, StabilizeSortAddIotaOperandAndChangeRoot) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} parameter(1)
ROOT sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root, GmockMatch(m::Tuple(
m::GetTupleElement(
m::Sort(m::Parameter(0), m::Parameter(1), m::Iota()), 0),
m::GetTupleElement(
m::Sort(m::Parameter(0), m::Parameter(1), m::Iota()), 1))));
CheckComputationHasTieBreaker(
root->operand(0)->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, HonorIsStableFlag) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=false
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_FALSE(stabilizer.Run(module.get()).value());
}
TEST_F(StableSortExpanderTest,
StabilizeSortDontReuseIotaOperandWrongDimension) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = s32[] parameter(2)
p.1.rhs = s32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = s32[64,8732]{1,0} iota(), iota_dimension=0
sort = (f32[64,8732]{1,0}, s32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
AlgebraicSimplifier simplifier(AlgebraicSimplifierOptions(
[](const Shape&, const Shape&) { return false; }));
ASSERT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota(), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, StabilizeSortDontReuseIotaOperandWrongType) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY sort_computation {
keys = f32[64,8732]{1,0} parameter(0)
values = f32[64,8732]{1,0} iota(), iota_dimension=1
sort = (f32[64,8732]{1,0}, f32[64,8732]{1,0}) sort(keys, values),
dimensions={1}, to_apply=compare, is_stable=true
ROOT gte = f32[64,8732]{1,0} get-tuple-element(sort), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
AlgebraicSimplifier simplifier(AlgebraicSimplifierOptions(
[](const Shape&, const Shape&) { return false; }));
ASSERT_TRUE(simplifier.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota(), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(),
2);
}
TEST_F(StableSortExpanderTest, StabilizeSortR1) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = s32[] parameter(0)
p.0.rhs = s32[] parameter(1)
mask = s32[] constant(65535)
lhs = s32[] and(p.0.lhs, mask)
rhs = s32[] and(p.0.rhs, mask)
ROOT lt = pred[] compare(lhs, rhs), direction=LT
}
ENTRY sort_computation {
keys = s32[64,8732]{1,0} parameter(0)
ROOT sort = s32[64,8732]{1,0} sort(keys), dimensions={0}, to_apply=compare,
is_stable=true
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0)));
CheckComputationHasTieBreaker(
root->operand(0)->to_apply()->root_instruction(), 1);
}
TEST_F(StableSortExpanderTest, StabilizeSortR1NoRoot) {
const char* hlo_string = R"(
HloModule permutation_sort
compare {
p.0.lhs = s32[] parameter(0)
p.0.rhs = s32[] parameter(1)
mask = s32[] constant(65535)
lhs = s32[] and(p.0.lhs, mask)
rhs = s32[] and(p.0.rhs, mask)
ROOT lt = pred[] compare(lhs, rhs), direction=LT
}
ENTRY sort_computation {
keys = s32[64,8732]{1,0} parameter(0)
sort = s32[64,8732]{1,0} sort(keys), dimensions={0}, to_apply=compare,
is_stable=true
ROOT neg = s32[64,8732]{1,0} negate(sort)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
StableSortExpander stabilizer;
EXPECT_TRUE(stabilizer.Run(module.get()).value());
auto root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Negate(m::GetTupleElement(
m::Sort(m::Parameter(0), m::Iota()), 0))));
CheckComputationHasTieBreaker(
root->operand(0)->operand(0)->to_apply()->root_instruction(),
1);
}
}
} |
132 | #ifndef AROLLA_EXPR_OPERATORS_CASTING_REGISTRY_H_
#define AROLLA_EXPR_OPERATORS_CASTING_REGISTRY_H_
#include <optional>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/qtype/qtype.h"
namespace arolla::expr_operators {
class CastingRegistry {
public:
static CastingRegistry* GetInstance();
absl::StatusOr<expr::ExprNodePtr> GetCast(
expr::ExprNodePtr node, QTypePtr to_qtype, bool implicit_only,
std::optional<expr::ExprNodePtr> shape_for_broadcasting =
std::nullopt) const;
absl::StatusOr<QTypePtr> CommonType(absl::Span<const QTypePtr> arg_types,
bool enable_broadcasting = false) const;
private:
CastingRegistry();
absl::flat_hash_map<QTypePtr, expr::ExprOperatorPtr> cast_to_ops_;
};
}
#endif
#include "arolla/expr/operators/casting_registry.h"
#include <cstdint>
#include <memory>
#include <optional>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/standard_type_properties/common_qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/indestructible.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::RegisteredOperator;
CastingRegistry* CastingRegistry::GetInstance() {
static Indestructible<CastingRegistry> instance(
[](auto* self) { new (self) CastingRegistry; });
return instance.get();
}
CastingRegistry::CastingRegistry() {
cast_to_ops_ = {
{GetQType<bool>(), std::make_shared<RegisteredOperator>("core.to_bool")},
{GetQType<int32_t>(),
std::make_shared<RegisteredOperator>("core.to_int32")},
{GetQType<int64_t>(),
std::make_shared<RegisteredOperator>("core.to_int64")},
{GetQType<float>(),
std::make_shared<RegisteredOperator>("core.to_float32")},
{GetQType<double>(),
std::make_shared<RegisteredOperator>("core.to_float64")},
{GetWeakFloatQType(),
std::make_shared<RegisteredOperator>("core._to_weak_float")},
{GetQType<uint64_t>(),
std::make_shared<RegisteredOperator>("core.to_uint64")},
};
}
absl::StatusOr<ExprNodePtr> CastingRegistry::GetCast(
ExprNodePtr node, QTypePtr to_qtype, bool implicit_only,
std::optional<ExprNodePtr> shape_for_broadcasting) const {
const QType* from_qtype = node->qtype();
if (from_qtype == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"cannot cast expression %s with unknown QType", GetDebugSnippet(node)));
}
if (from_qtype == to_qtype) {
return node;
}
if (implicit_only &&
!CanCastImplicitly(
from_qtype, to_qtype,
shape_for_broadcasting.has_value())) {
return absl::InvalidArgumentError(
absl::StrFormat("implicit casting from %s to %s is not allowed",
from_qtype->name(), to_qtype->name()));
}
ASSIGN_OR_RETURN(auto from_scalar_qtype, GetScalarQType(from_qtype));
ASSIGN_OR_RETURN(auto to_scalar_qtype, GetScalarQType(to_qtype));
if (from_scalar_qtype == GetWeakFloatQType() &&
from_scalar_qtype != to_scalar_qtype) {
const auto upcast_op =
std::make_shared<expr::DerivedQTypeUpcastOperator>(node->qtype());
ASSIGN_OR_RETURN(node, CallOp(upcast_op, {node}));
from_scalar_qtype = GetQType<double>();
}
if (from_scalar_qtype != to_scalar_qtype) {
if (!cast_to_ops_.contains(to_scalar_qtype)) {
return absl::InvalidArgumentError(
absl::StrFormat("unable to find a cast from %s to %s",
from_qtype->name(), to_qtype->name()));
}
ASSIGN_OR_RETURN(node, CallOp(cast_to_ops_.at(to_scalar_qtype), {node}));
if (node->qtype() == to_qtype) {
return node;
}
}
if (!IsArrayLikeQType(node->qtype()) && IsArrayLikeQType(to_qtype)) {
if (!shape_for_broadcasting.has_value()) {
return absl::InvalidArgumentError(
absl::StrFormat("unable to cast non-array type %s into an array type "
"%s without shape for broadcasting provided",
from_qtype->name(), to_qtype->name()));
}
ASSIGN_OR_RETURN(
node, CallOp("core.const_with_shape", {*shape_for_broadcasting, node}));
if (node->qtype() == to_qtype) {
return node;
}
}
if (!IsOptionalQType(node->qtype()) && IsOptionalQType(to_qtype)) {
ASSIGN_OR_RETURN(node, CallOp("core.to_optional", {node}));
}
if (node->qtype() == to_qtype) {
return node;
} else {
return absl::InvalidArgumentError(
absl::StrFormat("unable to find a cast from %s to %s",
from_qtype->name(), to_qtype->name()));
}
}
absl::StatusOr<QTypePtr> CastingRegistry::CommonType(
absl::Span<const QTypePtr> arg_types, bool enable_broadcasting) const {
if (arg_types.empty()) {
return absl::InvalidArgumentError(
"empty arg_types list passed to CommonType");
}
const QType* result_qtype = CommonQType(arg_types, enable_broadcasting);
if (result_qtype == nullptr) {
if (enable_broadcasting || !CommonType(arg_types, true).ok()) {
return absl::InvalidArgumentError(
absl::StrCat("no common QType for ", FormatTypeVector(arg_types)));
} else {
return absl::InvalidArgumentError(
absl::StrCat("no common QType without broadcasting for ",
FormatTypeVector(arg_types)));
}
}
return result_qtype;
}
} | #include "arolla/expr/operators/casting_registry.h"
#include <cstdint>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/operators/bootstrap_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::CallOp;
using ::arolla::expr::Leaf;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::HasSubstr;
class CastingRegistryTest : public ::testing::Test {
protected:
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(CastingRegistryTest, CommonType) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<int32_t>()}),
IsOkAndHolds(GetQType<int32_t>()));
EXPECT_THAT(reg->CommonType({GetQType<uint64_t>(), GetQType<uint64_t>()}),
IsOkAndHolds(GetQType<uint64_t>()));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<int32_t>(), GetOptionalQType<int32_t>()}),
IsOkAndHolds(GetOptionalQType<int32_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<uint64_t>(), GetOptionalQType<uint64_t>()}),
IsOkAndHolds(GetOptionalQType<uint64_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<int32_t>(), GetOptionalQType<int64_t>()}),
IsOkAndHolds(GetOptionalQType<int64_t>()));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<Bytes>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no common QType for (INT32,BYTES)")));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<uint64_t>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no common QType for (INT32,UINT64)")));
EXPECT_THAT(reg->CommonType({GetQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(
reg->CommonType({GetQType<int32_t>(), GetQType<Bytes>()}).status(),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
reg->CommonType({GetOptionalQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetOptionalQType<int64_t>()));
}
TEST_F(CastingRegistryTest, GetCast) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()));
EXPECT_THAT(reg->GetCast(x, GetOptionalQType<int64_t>(),
true),
IsOkAndHolds(EqualsExpr(
CallOp("core.to_optional", {CallOp("core.to_int64", {x})}))));
}
TEST_F(CastingRegistryTest, GetCastWithBroadcasting) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
GetDenseArrayQType<int64_t>();
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<int32_t>()));
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
EXPECT_THAT(
reg->GetCast(x, GetDenseArrayQType<int64_t>(),
true, shape),
IsOkAndHolds(EqualsExpr(CallOp("core.const_with_shape",
{shape, CallOp("core.to_int64", {x})}))));
}
TEST_F(CastingRegistryTest, GetCastFromWeakType) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
expr::ExprOperatorPtr upcast_op =
std::make_shared<expr::DerivedQTypeUpcastOperator>(GetWeakFloatQType());
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetWeakFloatQType()));
EXPECT_THAT(reg->GetCast(x, GetOptionalQType<double>(),
true),
IsOkAndHolds(EqualsExpr(
CallOp("core.to_optional", {CallOp(upcast_op, {x})}))));
}
{
expr::ExprOperatorPtr opt_upcast_op =
std::make_shared<expr::DerivedQTypeUpcastOperator>(
GetOptionalWeakFloatQType());
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetOptionalWeakFloatQType()));
EXPECT_THAT(reg->GetCast(x, GetOptionalQType<double>(),
true),
IsOkAndHolds(EqualsExpr(CallOp(opt_upcast_op, {x}))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetWeakFloatQType()));
EXPECT_THAT(reg->GetCast(x, GetOptionalWeakFloatQType(),
true),
IsOkAndHolds(EqualsExpr(CallOp("core.to_optional", {x}))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetWeakFloatQType()));
ASSERT_OK_AND_ASSIGN(
auto shape,
WithQTypeAnnotation(Leaf("shape"), GetQType<DenseArrayShape>()));
GetDenseArrayQType<float>();
EXPECT_THAT(
reg->GetCast(x, GetDenseArrayQType<float>(),
true, shape),
IsOkAndHolds(EqualsExpr(CallOp(
"core.const_with_shape",
{shape, CallOp("core.to_float32", {CallOp(upcast_op, {x})})}))));
}
}
TEST_F(CastingRegistryTest, GetCastToWeakType) {
const CastingRegistry* reg = CastingRegistry::GetInstance();
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(CoreToWeakFloat(x))));
}
{
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetOptionalQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetOptionalWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(CoreToWeakFloat(x))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetOptionalWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(
CallOp("core.to_optional", {CoreToWeakFloat(x)}))));
}
{
GetDenseArrayQType<float>();
GetDenseArrayWeakFloatQType();
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetDenseArrayQType<float>()));
EXPECT_THAT(reg->GetCast(x, GetDenseArrayWeakFloatQType(),
false),
IsOkAndHolds(EqualsExpr(CoreToWeakFloat(x))));
}
{
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
EXPECT_THAT(
reg->GetCast(x, GetWeakFloatQType(),
true),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"implicit casting from FLOAT32 to WEAK_FLOAT is not allowed")));
}
}
}
} |
133 | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_MARK_EXPLICIT_OP_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_MARK_EXPLICIT_OP_H_
#include "tensorstore/index_space/dimension_index_buffer.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyChangeImplicitState(
IndexTransform<> transform, DimensionIndexBuffer* dimensions, bool implicit,
bool lower, bool upper, bool domain_only = false);
struct ChangeImplicitStateOp {
static constexpr bool selected_dimensions_are_new = false;
constexpr static DimensionIndex GetNewStaticInputRank(
DimensionIndex input_rank, DimensionIndex num_input_dims) {
return input_rank;
}
constexpr static DimensionIndex GetStaticSelectionRank(
DimensionIndex num_input_dims) {
return num_input_dims;
}
Result<IndexTransform<>> Apply(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) const {
return ApplyChangeImplicitState(std::move(transform), dimensions,
implicit, lower, upper,
domain_only);
}
bool implicit;
bool lower;
bool upper;
};
}
}
#endif
#include "tensorstore/index_space/internal/mark_explicit_op.h"
#include "absl/status/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
Result<IndexTransform<>> ApplyChangeImplicitState(
IndexTransform<> transform, DimensionIndexBuffer* dimensions, bool implicit,
bool lower, bool upper, bool domain_only) {
if (!lower && !upper) {
return transform;
}
TransformRep::Ptr<> rep = MutableRep(
TransformAccess::rep_ptr<container>(std::move(transform)), domain_only);
if (implicit) {
for (DimensionIndex output_dim = 0, output_rank = rep->output_rank;
output_dim < output_rank; ++output_dim) {
auto& map = rep->output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::array) continue;
auto& index_array_data = map.index_array_data();
for (DimensionIndex input_dim : *dimensions) {
if (index_array_data.byte_strides[input_dim] != 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot mark input dimension ", input_dim,
" as having implicit bounds because it indexes the index array "
"map for output dimension ",
output_dim));
}
}
}
}
for (DimensionIndex input_dim : *dimensions) {
const auto d = rep->input_dimension(input_dim);
if (lower) d.implicit_lower_bound() = implicit;
if (upper) d.implicit_upper_bound() = implicit;
}
if (!implicit && IsDomainExplicitlyEmpty(rep.get())) {
ReplaceAllIndexArrayMapsWithConstantMaps(rep.get());
}
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::TestDimExpression;
using ::tensorstore::internal_index_space::TestDimExpressionErrorTransformOnly;
TEST(MarkBoundsExplicitTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(true, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
TestDimExpression(original_transform,
Dims("x", "z").MarkBoundsExplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).MarkBoundsExplicit(),
{0},
IndexTransformBuilder(2, 2)
.input_shape({2, 3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
{});
}
TEST(MarkBoundsExplicitTest, IndexArrayZeroSize) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({0, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).MarkBoundsExplicit(),
{0},
IndexTransformBuilder(2, 2)
.input_shape({0, 3})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({0, 3})
.output_constant(0, 0)
.Finalize()
.value(),
{});
}
TEST(UnsafeMarkBoundsImplicitTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 1})
.implicit_upper_bounds({1, 0, 1})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).UnsafeMarkBoundsImplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
TestDimExpression(
original_transform,
Dims(0, 2).UnsafeMarkBoundsImplicit(true, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
TestDimExpression(original_transform,
Dims("x", "z").UnsafeMarkBoundsImplicit(),
{0, 2},
expected_new_transform,
expected_new_transform,
{},
true,
false);
}
TEST(UnsafeMarkBoundsImplicitTest, IndexArray) {
TestDimExpression(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(0).UnsafeMarkBoundsImplicit(false, true),
{0},
IndexTransformBuilder(2, 2)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_identity_transform()
.Finalize()
.value(),
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.implicit_upper_bounds({1, 0})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
{},
true,
false);
}
TEST(UnsafeMarkBoundsImplicitTest, IndexArrayInvalid) {
TestDimExpressionErrorTransformOnly(
IndexTransformBuilder(2, 1)
.input_shape({2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{1, 2, 3}}))
.Finalize()
.value(),
Dims(1).UnsafeMarkBoundsImplicit(false, true),
absl::StatusCode::kInvalidArgument,
"Cannot mark input dimension 1 as having implicit bounds because it "
"indexes the index array map for output dimension 0",
IndexDomainBuilder(2)
.shape({2, 3})
.implicit_upper_bounds({0, 1})
.Finalize()
.value());
}
TEST(MarkBoundsExplicitTest, LowerOnly) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(true, false),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, UpperOnly) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({0, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(false, true),
{0, 2},
expected_new_transform,
expected_new_transform,
{});
}
TEST(MarkBoundsExplicitTest, None) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 0})
.output_identity_transform()
.Finalize()
.value();
TestDimExpression(original_transform,
Dims(0, 2).MarkBoundsExplicit(false, false),
{0, 2},
original_transform,
original_transform,
{});
}
} |
134 | #ifndef AROLLA_QTYPE_ARRAY_LIKE_FRAME_ITER_H_
#define AROLLA_QTYPE_ARRAY_LIKE_FRAME_ITER_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/barrier.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/threading.h"
namespace arolla {
class FrameIterator {
public:
FrameIterator(const FrameIterator&) = delete;
FrameIterator(FrameIterator&&) = default;
~FrameIterator();
struct Options {
static constexpr Options Default() { return {}; }
std::optional<int64_t> row_count;
int64_t frame_buffer_count = 64;
RawBufferFactory* buffer_factory = nullptr;
};
static absl::StatusOr<FrameIterator> Create(
absl::Span<const TypedRef> input_arrays,
absl::Span<const TypedSlot> input_scalar_slots,
absl::Span<const TypedSlot> output_array_slots,
absl::Span<const TypedSlot> output_scalar_slots,
const FrameLayout* scalar_layout, Options options = Options::Default());
template <class Fn>
void CustomFrameInitialization(Fn&& fn) {
for (auto& frame : frames_) fn(frame);
}
int64_t row_count() const { return row_count_; }
template <typename Fn>
void ForEachFrame(Fn&& fn) {
for (int64_t offset = 0; offset < row_count_; offset += frames_.size()) {
int64_t count = std::min<int64_t>(frames_.size(), row_count_ - offset);
PreloadFrames(count);
for (int64_t i = 0; i < count; ++i) {
fn(frames_[i]);
}
SaveOutputsOfProcessedFrames(count);
}
}
template <typename Fn>
void ForEachFrame(Fn&& fn, ThreadingInterface& threading, int thread_count) {
DCHECK_GE(thread_count, 1);
const int frames_per_worker =
(frames_.size() + thread_count - 1) / thread_count;
auto barrier1 = std::make_unique<absl::Barrier>(thread_count);
auto barrier2 = std::make_unique<absl::Barrier>(thread_count);
auto BarrierSync = [thread_count](std::unique_ptr<absl::Barrier>& b) {
if (b->Block()) {
b = std::make_unique<absl::Barrier>(thread_count);
}
};
auto worker_fn = [&](int worker_id) {
for (int64_t offset = 0; offset < row_count_; offset += frames_.size()) {
int64_t count = std::min<int64_t>(frames_.size(), row_count_ - offset);
if (worker_id == 0) {
PreloadFrames(count);
}
BarrierSync(barrier1);
for (int64_t i = worker_id * frames_per_worker;
i < std::min<int64_t>(count, (worker_id + 1) * frames_per_worker);
++i) {
fn(frames_[i]);
}
BarrierSync(barrier2);
if (worker_id == 0) {
SaveOutputsOfProcessedFrames(count);
}
}
};
threading.WithThreading([&] {
std::vector<std::function<void()>> join_fns;
join_fns.reserve(thread_count - 1);
for (int i = 1; i < thread_count; ++i) {
join_fns.push_back(
threading.StartThread([&worker_fn, i] { worker_fn(i); }));
}
worker_fn(0);
for (auto& join : join_fns) join();
});
}
absl::Status StoreOutput(FramePtr output_frame);
private:
void* GetAllocByIndex(size_t index) {
return buffer_.data() + index * dense_scalar_layout_size_;
}
void PreloadFrames(size_t frames_count);
void SaveOutputsOfProcessedFrames(size_t frames_count);
FrameIterator(
std::vector<std::unique_ptr<BatchToFramesCopier>>&& input_copiers,
std::vector<std::unique_ptr<BatchFromFramesCopier>>&& output_copiers,
size_t row_count, size_t frame_buffer_count,
const FrameLayout* scalar_layout);
int64_t row_count_;
std::vector<std::unique_ptr<BatchToFramesCopier>> input_copiers_;
std::vector<std::unique_ptr<BatchFromFramesCopier>> output_copiers_;
std::vector<FramePtr> frames_;
std::vector<ConstFramePtr> const_frames_;
std::vector<char> buffer_;
const FrameLayout* scalar_layout_;
size_t dense_scalar_layout_size_;
};
}
#endif
#include "arolla/qtype/array_like/frame_iter.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::StatusOr<std::vector<std::unique_ptr<BatchToFramesCopier>>>
CreateInputCopiers(absl::Span<const TypedRef> input_arrays,
absl::Span<const TypedSlot> input_scalar_slots) {
if (input_arrays.size() != input_scalar_slots.size()) {
return absl::InvalidArgumentError(
absl::StrFormat("size of input_arrays and input_scalar_slots should be "
"the same: %d vs %d",
input_arrays.size(), input_scalar_slots.size()));
}
absl::flat_hash_map<QTypePtr, std::unique_ptr<BatchToFramesCopier>>
input_copiers;
for (size_t i = 0; i < input_arrays.size(); ++i) {
QTypePtr array_type = input_arrays[i].GetType();
if (!input_copiers.contains(array_type)) {
ASSIGN_OR_RETURN(input_copiers[array_type],
CreateBatchToFramesCopier(array_type));
}
RETURN_IF_ERROR(input_copiers[array_type]->AddMapping(
input_arrays[i], input_scalar_slots[i]));
}
std::vector<std::unique_ptr<BatchToFramesCopier>> input_copiers_vector;
for (auto& [_, v] : input_copiers)
input_copiers_vector.push_back(std::move(v));
return input_copiers_vector;
}
absl::StatusOr<std::vector<std::unique_ptr<BatchFromFramesCopier>>>
CreateOutputCopiers(absl::Span<const TypedSlot> output_array_slots,
absl::Span<const TypedSlot> output_scalar_slots,
RawBufferFactory* buffer_factory) {
if (output_array_slots.size() != output_scalar_slots.size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"size of output_array_slots and output_scalar_slots should be "
"the same: %d vs %d",
output_array_slots.size(), output_scalar_slots.size()));
}
absl::flat_hash_map<QTypePtr, std::unique_ptr<BatchFromFramesCopier>>
output_copiers;
for (size_t i = 0; i < output_array_slots.size(); ++i) {
QTypePtr array_type = output_array_slots[i].GetType();
if (!output_copiers.contains(array_type)) {
ASSIGN_OR_RETURN(output_copiers[array_type],
CreateBatchFromFramesCopier(array_type, buffer_factory));
}
RETURN_IF_ERROR(output_copiers[array_type]->AddMapping(
output_scalar_slots[i], output_array_slots[i]));
}
std::vector<std::unique_ptr<BatchFromFramesCopier>> output_copiers_vector;
for (auto& [_, v] : output_copiers)
output_copiers_vector.push_back(std::move(v));
return output_copiers_vector;
}
}
absl::StatusOr<FrameIterator> FrameIterator::Create(
absl::Span<const TypedRef> input_arrays,
absl::Span<const TypedSlot> input_scalar_slots,
absl::Span<const TypedSlot> output_array_slots,
absl::Span<const TypedSlot> output_scalar_slots,
const FrameLayout* scalar_layout, FrameIterator::Options options) {
ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<BatchToFramesCopier>> input_copiers,
CreateInputCopiers(input_arrays, input_scalar_slots));
RawBufferFactory* buf_factory = options.buffer_factory;
if (!buf_factory) buf_factory = GetHeapBufferFactory();
ASSIGN_OR_RETURN(
std::vector<std::unique_ptr<BatchFromFramesCopier>> output_copiers,
CreateOutputCopiers(output_array_slots, output_scalar_slots,
buf_factory));
std::optional<int64_t> row_count = std::nullopt;
for (const auto& copier : input_copiers) {
if (!copier->row_count() ||
(row_count && *row_count != *copier->row_count())) {
return absl::InvalidArgumentError(
absl::StrFormat("input arrays have different sizes: %d vs %d",
*row_count, *copier->row_count()));
}
row_count = copier->row_count();
}
if (!row_count.has_value()) {
if (!options.row_count.has_value()) {
return absl::InvalidArgumentError(
"options.row_count can not be missed if there is no input arrays");
}
row_count = options.row_count;
} else if (options.row_count.has_value() &&
*options.row_count != *row_count) {
return absl::InvalidArgumentError(
absl::StrFormat("sizes of input arrays don't correspond "
"to options.row_count: %d vs %d",
*row_count, *options.row_count));
}
return FrameIterator(std::move(input_copiers), std::move(output_copiers),
*row_count, options.frame_buffer_count, scalar_layout);
}
FrameIterator::FrameIterator(
std::vector<std::unique_ptr<BatchToFramesCopier>>&& input_copiers,
std::vector<std::unique_ptr<BatchFromFramesCopier>>&& output_copiers,
size_t row_count, size_t frame_buffer_count,
const FrameLayout* scalar_layout)
: row_count_(row_count),
input_copiers_(std::move(input_copiers)),
output_copiers_(std::move(output_copiers)),
scalar_layout_(scalar_layout) {
frame_buffer_count = std::min(row_count, frame_buffer_count);
dense_scalar_layout_size_ = (scalar_layout_->AllocSize() + 7) & ~7;
buffer_.resize(dense_scalar_layout_size_ * frame_buffer_count);
for (size_t i = 0; i < frame_buffer_count; ++i) {
void* alloc_ptr = GetAllocByIndex(i);
scalar_layout->InitializeAlignedAlloc(alloc_ptr);
frames_.emplace_back(alloc_ptr, scalar_layout);
const_frames_.emplace_back(alloc_ptr, scalar_layout);
}
for (auto& copier : input_copiers_) copier->Start();
for (auto& copier : output_copiers_) copier->Start(row_count);
}
FrameIterator::~FrameIterator() {
for (size_t i = 0; i < frames_.size(); ++i) {
scalar_layout_->DestroyAlloc(GetAllocByIndex(i));
}
}
absl::Status FrameIterator::StoreOutput(FramePtr output_frame) {
for (std::unique_ptr<BatchFromFramesCopier>& copier : output_copiers_) {
RETURN_IF_ERROR(copier->Finalize(output_frame));
}
return absl::OkStatus();
}
void FrameIterator::PreloadFrames(size_t frames_count) {
for (auto& copier : input_copiers_) {
copier->CopyNextBatch({frames_.data(), frames_count});
}
}
void FrameIterator::SaveOutputsOfProcessedFrames(size_t frames_count) {
for (auto& copier : output_copiers_) {
absl::Status status =
copier->CopyNextBatch({const_frames_.data(), frames_count});
DCHECK_OK(status);
}
}
} | #include "arolla/qtype/array_like/frame_iter.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/threading.h"
namespace arolla {
namespace {
using ::arolla::testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Test;
TEST(FrameIterator, Iterate) {
FrameLayout::Builder scalar_bldr;
auto scalar_f_slot1 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_i_slot1 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_i_slot2 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_f_slot2 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {
TypedSlot::FromSlot(scalar_f_slot1), TypedSlot::FromSlot(scalar_i_slot1),
TypedSlot::FromSlot(scalar_i_slot2), TypedSlot::FromSlot(scalar_f_slot2)};
DenseArray<float> arr_f1 =
CreateDenseArray<float>({1.5, std::nullopt, 2.5, 3.5});
DenseArray<int64_t> arr_i1 = CreateDenseArray<int64_t>({3, 4, 5, 6});
DenseArray<int64_t> arr_i2 =
CreateDenseArray<int64_t>({2, std::nullopt, 0, std::nullopt});
DenseArray<float> arr_f2 =
CreateDenseArray<float>({3.2, 2.2, std::nullopt, 1.2});
FrameLayout::Builder vector_bldr;
auto arr_output_f1 = vector_bldr.AddSlot<DenseArray<float>>();
auto arr_output_i1 = vector_bldr.AddSlot<DenseArray<int64_t>>();
auto arr_output_i2 = vector_bldr.AddSlot<DenseArray<int64_t>>();
auto arr_output_f2 = vector_bldr.AddSlot<DenseArray<float>>();
auto output_vector_layout = std::move(vector_bldr).Build();
std::vector<TypedRef> input_refs = {
TypedRef::FromValue(arr_f1), TypedRef::FromValue(arr_i1),
TypedRef::FromValue(arr_i2), TypedRef::FromValue(arr_f2)};
std::vector<TypedSlot> output_slots = {
TypedSlot::FromSlot(arr_output_f1), TypedSlot::FromSlot(arr_output_i1),
TypedSlot::FromSlot(arr_output_i2), TypedSlot::FromSlot(arr_output_f2)};
auto scalar_processing_fn = [&](FramePtr frame) {
OptionalValue<float> f1 = frame.Get(scalar_f_slot1);
OptionalValue<float> f2 = frame.Get(scalar_f_slot2);
if (f1.present) frame.Set(scalar_f_slot1, f1.value + 1.0);
if (f2.present) frame.Set(scalar_f_slot2, f2.value + 2.0);
OptionalValue<int64_t> i1 = frame.Get(scalar_i_slot1);
OptionalValue<int64_t> i2 = frame.Get(scalar_i_slot2);
if (i1.present) frame.Set(scalar_i_slot1, i1.value + 3);
if (i2.present) frame.Set(scalar_i_slot2, i2.value + 4);
};
auto check_output_fn = [&](FrameIterator& frame_iterator) {
MemoryAllocation alloc(&output_vector_layout);
FramePtr output_frame = alloc.frame();
EXPECT_OK(frame_iterator.StoreOutput(output_frame));
EXPECT_THAT(output_frame.Get(arr_output_f1),
ElementsAre(2.5, std::nullopt, 3.5, 4.5));
EXPECT_THAT(output_frame.Get(arr_output_f2),
ElementsAre(5.2, 4.2, std::nullopt, 3.2));
EXPECT_THAT(output_frame.Get(arr_output_i1), ElementsAre(6, 7, 8, 9));
EXPECT_THAT(output_frame.Get(arr_output_i2),
ElementsAre(6, std::nullopt, 4, std::nullopt));
};
{
ASSERT_OK_AND_ASSIGN(
auto frame_iterator,
FrameIterator::Create(input_refs, scalar_slots, output_slots,
scalar_slots, &scalar_layout,
{.frame_buffer_count = 2}));
frame_iterator.ForEachFrame(scalar_processing_fn);
check_output_fn(frame_iterator);
}
StdThreading threading(4);
for (int threads = 1; threads <= 4; ++threads) {
ASSERT_OK_AND_ASSIGN(
auto frame_iterator,
FrameIterator::Create(input_refs, scalar_slots, output_slots,
scalar_slots, &scalar_layout,
{.frame_buffer_count = 3}));
frame_iterator.ForEachFrame(scalar_processing_fn, threading, threads);
check_output_fn(frame_iterator);
}
}
TEST(FrameIterator, EmptyArrays) {
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_slot)};
FrameLayout::Builder arrays_layout_bldr;
auto arr_output = arrays_layout_bldr.AddSlot<DenseArray<float>>();
auto output_arrays_layout = std::move(arrays_layout_bldr).Build();
DenseArray<float> arr;
std::vector<TypedRef> input_refs = {TypedRef::FromValue(arr)};
std::vector<TypedSlot> output_slots = {TypedSlot::FromSlot(arr_output)};
auto scalar_processing_fn = [&](FramePtr frame) { ADD_FAILURE(); };
ASSERT_OK_AND_ASSIGN(auto frame_iterator,
FrameIterator::Create(
input_refs, scalar_slots, output_slots, scalar_slots,
&scalar_layout, {.frame_buffer_count = 2}));
frame_iterator.ForEachFrame(scalar_processing_fn);
MemoryAllocation alloc(&output_arrays_layout);
FramePtr output_frame = alloc.frame();
EXPECT_OK(frame_iterator.StoreOutput(output_frame));
EXPECT_EQ(output_frame.Get(arr_output).size(), 0);
}
TEST(FrameIterator, EmptyInputAndOutput) {
FrameLayout::Builder scalar_bldr;
auto scalar_layout = std::move(scalar_bldr).Build();
{
auto frame_iterator_or_status =
FrameIterator::Create({}, {}, {}, {}, &scalar_layout);
EXPECT_THAT(
frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("options.row_count can not be missed if there "
"is no input arrays")));
}
{
ASSERT_OK_AND_ASSIGN(auto frame_iterator,
FrameIterator::Create({}, {}, {}, {}, &scalar_layout,
{.row_count = 4}));
EXPECT_EQ(frame_iterator.row_count(), 4);
}
}
TEST(FrameIterator, IncorrectInputType) {
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<float>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_slot)};
DenseArray<int64_t> arr = CreateDenseArray<int64_t>({1, std::nullopt, 2, 3});
auto frame_iterator_or_status = FrameIterator::Create(
{TypedRef::FromValue(arr)}, scalar_slots, {}, {}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match")));
}
TEST(FrameIterator, IncorrectOutputType) {
FrameLayout::Builder vector_bldr;
auto vector_slot = vector_bldr.AddSlot<DenseArray<float>>();
auto vector_layout = std::move(vector_bldr).Build();
FrameLayout::Builder scalar_bldr;
auto scalar_slot = scalar_bldr.AddSlot<int64_t>();
auto scalar_layout = std::move(scalar_bldr).Build();
auto frame_iterator_or_status =
FrameIterator::Create({}, {}, {TypedSlot::FromSlot(vector_slot)},
{TypedSlot::FromSlot(scalar_slot)}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot type does not match")));
}
TEST(FrameIterator, WrongSize) {
FrameLayout::Builder scalar_bldr;
auto scalar_f_slot1 = scalar_bldr.AddSlot<OptionalValue<float>>();
auto scalar_i_slot1 = scalar_bldr.AddSlot<OptionalValue<int64_t>>();
auto scalar_layout = std::move(scalar_bldr).Build();
std::vector<TypedSlot> scalar_slots = {TypedSlot::FromSlot(scalar_f_slot1),
TypedSlot::FromSlot(scalar_i_slot1)};
DenseArray<float> arr_f1 =
CreateDenseArray<float>({1.5, std::nullopt, 2.5, 3.5});
DenseArray<int64_t> arr_i1 = CreateDenseArray<int64_t>({3, 4, 5});
auto frame_iterator_or_status = FrameIterator::Create(
{TypedRef::FromValue(arr_f1), TypedRef::FromValue(arr_i1)}, scalar_slots,
{}, {}, &scalar_layout);
EXPECT_THAT(frame_iterator_or_status,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("input arrays have different sizes")));
}
}
} |
135 | #ifndef QUICHE_QUIC_LOAD_BALANCER_LOAD_BALANCER_ENCODER_H_
#define QUICHE_QUIC_LOAD_BALANCER_LOAD_BALANCER_ENCODER_H_
#include <algorithm>
#include <cstdint>
#include <optional>
#include "absl/numeric/int128.h"
#include "quiche/quic/core/connection_id_generator.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/load_balancer/load_balancer_config.h"
#include "quiche/quic/load_balancer/load_balancer_server_id.h"
namespace quic {
namespace test {
class LoadBalancerEncoderPeer;
}
inline constexpr uint8_t kLoadBalancerUnroutableLen = 8;
constexpr uint8_t kLoadBalancerLengthMask = (1 << kConnectionIdLengthBits) - 1;
constexpr uint8_t kLoadBalancerConfigIdMask = ~kLoadBalancerLengthMask;
constexpr uint8_t kLoadBalancerUnroutableConfigId = kNumLoadBalancerConfigs;
constexpr uint8_t kLoadBalancerUnroutablePrefix =
kLoadBalancerUnroutableConfigId << kConnectionIdLengthBits;
class QUIC_EXPORT_PRIVATE LoadBalancerEncoderVisitorInterface {
public:
virtual ~LoadBalancerEncoderVisitorInterface() {}
virtual void OnConfigAdded(uint8_t config_id) = 0;
virtual void OnConfigChanged(uint8_t old_config_id,
uint8_t new_config_id) = 0;
virtual void OnConfigDeleted(uint8_t config_id) = 0;
};
class QUIC_EXPORT_PRIVATE LoadBalancerEncoder
: public ConnectionIdGeneratorInterface {
public:
LoadBalancerEncoder(QuicRandom& random,
LoadBalancerEncoderVisitorInterface* const visitor,
const bool len_self_encoded)
: LoadBalancerEncoder(random, visitor, len_self_encoded,
kLoadBalancerUnroutableLen) {}
~LoadBalancerEncoder() override {}
static std::optional<LoadBalancerEncoder> Create(
QuicRandom& random, LoadBalancerEncoderVisitorInterface* visitor,
bool len_self_encoded,
uint8_t unroutable_connection_id_len = kLoadBalancerUnroutableLen);
bool UpdateConfig(const LoadBalancerConfig& config,
LoadBalancerServerId server_id);
virtual void DeleteConfig();
absl::uint128 num_nonces_left() const { return num_nonces_left_; }
virtual bool IsEncoding() const { return config_.has_value(); }
virtual bool IsEncrypted() const {
return config_.has_value() && config_->IsEncrypted();
}
virtual bool len_self_encoded() const { return len_self_encoded_; }
QuicConnectionId GenerateConnectionId();
std::optional<QuicConnectionId> GenerateNextConnectionId(
const QuicConnectionId& original) override;
std::optional<QuicConnectionId> MaybeReplaceConnectionId(
const QuicConnectionId& original,
const ParsedQuicVersion& version) override;
uint8_t ConnectionIdLength(uint8_t first_byte) const override;
protected:
LoadBalancerEncoder(QuicRandom& random,
LoadBalancerEncoderVisitorInterface* const visitor,
const bool len_self_encoded,
const uint8_t unroutable_connection_id_len)
: random_(random),
len_self_encoded_(len_self_encoded),
visitor_(visitor) {
std::fill_n(connection_id_lengths_, kNumLoadBalancerConfigs + 1,
unroutable_connection_id_len);
}
private:
friend class test::LoadBalancerEncoderPeer;
QuicConnectionId MakeUnroutableConnectionId(uint8_t first_byte);
QuicRandom& random_;
const bool len_self_encoded_;
LoadBalancerEncoderVisitorInterface* const visitor_;
std::optional<LoadBalancerConfig> config_;
absl::uint128 seed_, num_nonces_left_ = 0;
std::optional<LoadBalancerServerId> server_id_;
uint8_t connection_id_lengths_[kNumLoadBalancerConfigs + 1];
};
}
#endif
#include "quiche/quic/load_balancer/load_balancer_encoder.h"
#include <cstdint>
#include <cstring>
#include <optional>
#include "absl/cleanup/cleanup.h"
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/load_balancer/load_balancer_config.h"
#include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
namespace {
absl::uint128 NumberOfNonces(uint8_t nonce_len) {
return (static_cast<absl::uint128>(1) << (nonce_len * 8));
}
bool WriteUint128(const absl::uint128 in, uint8_t size, QuicDataWriter &out) {
if (out.remaining() < size) {
QUIC_BUG(quic_bug_435375038_05)
<< "Call to WriteUint128() does not have enough space in |out|";
return false;
}
uint64_t num64 = absl::Uint128Low64(in);
if (size <= sizeof(num64)) {
out.WriteBytes(&num64, size);
} else {
out.WriteBytes(&num64, sizeof(num64));
num64 = absl::Uint128High64(in);
out.WriteBytes(&num64, size - sizeof(num64));
}
return true;
}
}
std::optional<LoadBalancerEncoder> LoadBalancerEncoder::Create(
QuicRandom &random, LoadBalancerEncoderVisitorInterface *const visitor,
const bool len_self_encoded, const uint8_t unroutable_connection_id_len) {
if (unroutable_connection_id_len == 0 ||
unroutable_connection_id_len >
kQuicMaxConnectionIdWithLengthPrefixLength) {
QUIC_BUG(quic_bug_435375038_01)
<< "Invalid unroutable_connection_id_len = "
<< static_cast<int>(unroutable_connection_id_len);
return std::optional<LoadBalancerEncoder>();
}
return LoadBalancerEncoder(random, visitor, len_self_encoded,
unroutable_connection_id_len);
}
bool LoadBalancerEncoder::UpdateConfig(const LoadBalancerConfig &config,
const LoadBalancerServerId server_id) {
if (config_.has_value() && config_->config_id() == config.config_id()) {
QUIC_BUG(quic_bug_435375038_02)
<< "Attempting to change config with same ID";
return false;
}
if (server_id.length() != config.server_id_len()) {
QUIC_BUG(quic_bug_435375038_03)
<< "Server ID length " << static_cast<int>(server_id.length())
<< " does not match configured value of "
<< static_cast<int>(config.server_id_len());
return false;
}
if (visitor_ != nullptr) {
if (config_.has_value()) {
visitor_->OnConfigChanged(config_->config_id(), config.config_id());
} else {
visitor_->OnConfigAdded(config.config_id());
}
}
config_ = config;
server_id_ = server_id;
seed_ = absl::MakeUint128(random_.RandUint64(), random_.RandUint64()) %
NumberOfNonces(config.nonce_len());
num_nonces_left_ = NumberOfNonces(config.nonce_len());
connection_id_lengths_[config.config_id()] = config.total_len();
return true;
}
void LoadBalancerEncoder::DeleteConfig() {
if (visitor_ != nullptr && config_.has_value()) {
visitor_->OnConfigDeleted(config_->config_id());
}
config_.reset();
server_id_.reset();
num_nonces_left_ = 0;
}
QuicConnectionId LoadBalancerEncoder::GenerateConnectionId() {
absl::Cleanup cleanup = [&] {
if (num_nonces_left_ == 0) {
DeleteConfig();
}
};
uint8_t config_id = config_.has_value() ? config_->config_id()
: kLoadBalancerUnroutableConfigId;
uint8_t shifted_config_id = config_id << kConnectionIdLengthBits;
uint8_t length = connection_id_lengths_[config_id];
if (config_.has_value() != server_id_.has_value()) {
QUIC_BUG(quic_bug_435375038_04)
<< "Existence of config and server_id are out of sync";
return QuicConnectionId();
}
uint8_t first_byte;
if (len_self_encoded_) {
first_byte = shifted_config_id | (length - 1);
} else {
random_.RandBytes(static_cast<void *>(&first_byte), 1);
first_byte = shifted_config_id | (first_byte & kLoadBalancerLengthMask);
}
if (!config_.has_value()) {
return MakeUnroutableConnectionId(first_byte);
}
uint8_t result[kQuicMaxConnectionIdWithLengthPrefixLength];
QuicDataWriter writer(length, reinterpret_cast<char *>(result),
quiche::HOST_BYTE_ORDER);
writer.WriteUInt8(first_byte);
absl::uint128 next_nonce =
(seed_ + num_nonces_left_--) % NumberOfNonces(config_->nonce_len());
writer.WriteBytes(server_id_->data().data(), server_id_->length());
if (!WriteUint128(next_nonce, config_->nonce_len(), writer)) {
return QuicConnectionId();
}
if (!config_->IsEncrypted()) {
absl::uint128 nonce_hash = QuicUtils::FNV1a_128_Hash(absl::string_view(
reinterpret_cast<char *>(result), config_->total_len()));
const uint64_t lo = absl::Uint128Low64(nonce_hash);
if (config_->nonce_len() <= sizeof(uint64_t)) {
memcpy(&result[1 + config_->server_id_len()], &lo, config_->nonce_len());
return QuicConnectionId(reinterpret_cast<char *>(result),
config_->total_len());
}
memcpy(&result[1 + config_->server_id_len()], &lo, sizeof(uint64_t));
const uint64_t hi = absl::Uint128High64(nonce_hash);
memcpy(&result[1 + config_->server_id_len() + sizeof(uint64_t)], &hi,
config_->nonce_len() - sizeof(uint64_t));
return QuicConnectionId(reinterpret_cast<char *>(result),
config_->total_len());
}
if (config_->plaintext_len() == kLoadBalancerBlockSize) {
if (!config_->BlockEncrypt(&result[1], &result[1])) {
return QuicConnectionId();
}
return (QuicConnectionId(reinterpret_cast<char *>(result),
config_->total_len()));
}
return config_->FourPassEncrypt(
absl::Span<uint8_t>(result, config_->total_len()));
}
std::optional<QuicConnectionId> LoadBalancerEncoder::GenerateNextConnectionId(
[[maybe_unused]] const QuicConnectionId &original) {
return (IsEncoding() && !IsEncrypted()) ? std::optional<QuicConnectionId>()
: GenerateConnectionId();
}
std::optional<QuicConnectionId> LoadBalancerEncoder::MaybeReplaceConnectionId(
const QuicConnectionId &original, const ParsedQuicVersion &version) {
uint8_t needed_length = config_.has_value()
? config_->total_len()
: connection_id_lengths_[kNumLoadBalancerConfigs];
return (!version.HasIetfQuicFrames() && original.length() == needed_length)
? std::optional<QuicConnectionId>()
: GenerateConnectionId();
}
uint8_t LoadBalancerEncoder::ConnectionIdLength(uint8_t first_byte) const {
if (len_self_encoded()) {
return (first_byte &= kLoadBalancerLengthMask) + 1;
}
return connection_id_lengths_[first_byte >> kConnectionIdLengthBits];
}
QuicConnectionId LoadBalancerEncoder::MakeUnroutableConnectionId(
uint8_t first_byte) {
QuicConnectionId id;
uint8_t target_length =
connection_id_lengths_[kLoadBalancerUnroutableConfigId];
id.set_length(target_length);
id.mutable_data()[0] = first_byte;
random_.RandBytes(&id.mutable_data()[1], target_length - 1);
return id;
}
} | #include "quiche/quic/load_balancer/load_balancer_encoder.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <optional>
#include <queue>
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/load_balancer/load_balancer_config.h"
#include "quiche/quic/load_balancer/load_balancer_server_id.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
class LoadBalancerEncoderPeer {
public:
static void SetNumNoncesLeft(LoadBalancerEncoder &encoder,
uint64_t nonces_remaining) {
encoder.num_nonces_left_ = absl::uint128(nonces_remaining);
}
};
namespace {
class TestLoadBalancerEncoderVisitor
: public LoadBalancerEncoderVisitorInterface {
public:
~TestLoadBalancerEncoderVisitor() override {}
void OnConfigAdded(const uint8_t config_id) override {
num_adds_++;
current_config_id_ = config_id;
}
void OnConfigChanged(const uint8_t old_config_id,
const uint8_t new_config_id) override {
num_adds_++;
num_deletes_++;
EXPECT_EQ(old_config_id, current_config_id_);
current_config_id_ = new_config_id;
}
void OnConfigDeleted(const uint8_t config_id) override {
EXPECT_EQ(config_id, current_config_id_);
current_config_id_.reset();
num_deletes_++;
}
uint32_t num_adds() const { return num_adds_; }
uint32_t num_deletes() const { return num_deletes_; }
private:
uint32_t num_adds_ = 0, num_deletes_ = 0;
std::optional<uint8_t> current_config_id_ = std::optional<uint8_t>();
};
class TestRandom : public QuicRandom {
public:
uint64_t RandUint64() override {
if (next_values_.empty()) {
return base_;
}
uint64_t value = next_values_.front();
next_values_.pop();
return value;
}
void RandBytes(void *data, size_t len) override {
size_t written = 0;
uint8_t *ptr = static_cast<uint8_t *>(data);
while (written < len) {
uint64_t result = RandUint64();
size_t to_write = (len - written > sizeof(uint64_t)) ? sizeof(uint64_t)
: (len - written);
memcpy(ptr + written, &result, to_write);
written += to_write;
}
}
void InsecureRandBytes(void *data, size_t len) override {
RandBytes(data, len);
}
uint64_t InsecureRandUint64() override { return RandUint64(); }
void AddNextValues(uint64_t hi, uint64_t lo) {
next_values_.push(hi);
next_values_.push(lo);
}
private:
std::queue<uint64_t> next_values_;
uint64_t base_ = 0xDEADBEEFDEADBEEF;
};
class LoadBalancerEncoderTest : public QuicTest {
public:
TestRandom random_;
};
LoadBalancerServerId MakeServerId(const uint8_t array[], const uint8_t length) {
return LoadBalancerServerId(absl::Span<const uint8_t>(array, length));
}
constexpr char kRawKey[] = {0x8f, 0x95, 0xf0, 0x92, 0x45, 0x76, 0x5f, 0x80,
0x25, 0x69, 0x34, 0xe5, 0x0c, 0x66, 0x20, 0x7f};
constexpr absl::string_view kKey(kRawKey, kLoadBalancerKeyLen);
constexpr uint64_t kNonceLow = 0xe5d1c048bf0d08ee;
constexpr uint64_t kNonceHigh = 0x9321e7e34dde525d;
constexpr uint8_t kServerId[] = {0xed, 0x79, 0x3a, 0x51, 0xd4, 0x9b, 0x8f, 0x5f,
0xab, 0x65, 0xba, 0x04, 0xc3, 0x33, 0x0a};
TEST_F(LoadBalancerEncoderTest, BadUnroutableLength) {
EXPECT_QUIC_BUG(
EXPECT_FALSE(
LoadBalancerEncoder::Create(random_, nullptr, false, 0).has_value()),
"Invalid unroutable_connection_id_len = 0");
EXPECT_QUIC_BUG(
EXPECT_FALSE(
LoadBalancerEncoder::Create(random_, nullptr, false, 21).has_value()),
"Invalid unroutable_connection_id_len = 21");
}
TEST_F(LoadBalancerEncoderTest, BadServerIdLength) {
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, true);
ASSERT_TRUE(encoder.has_value());
auto config = LoadBalancerConfig::CreateUnencrypted(1, 3, 4);
ASSERT_TRUE(config.has_value());
EXPECT_QUIC_BUG(
EXPECT_FALSE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 4))),
"Server ID length 4 does not match configured value of 3");
EXPECT_FALSE(encoder->IsEncoding());
}
TEST_F(LoadBalancerEncoderTest, FailToUpdateConfigWithSameId) {
TestLoadBalancerEncoderVisitor visitor;
auto encoder = LoadBalancerEncoder::Create(random_, &visitor, true);
ASSERT_TRUE(encoder.has_value());
auto config = LoadBalancerConfig::CreateUnencrypted(1, 3, 4);
ASSERT_TRUE(config.has_value());
EXPECT_TRUE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 3)));
EXPECT_EQ(visitor.num_adds(), 1u);
EXPECT_QUIC_BUG(
EXPECT_FALSE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 3))),
"Attempting to change config with same ID");
EXPECT_EQ(visitor.num_adds(), 1u);
}
struct LoadBalancerEncoderTestCase {
LoadBalancerConfig config;
QuicConnectionId connection_id;
LoadBalancerServerId server_id;
};
TEST_F(LoadBalancerEncoderTest, UnencryptedConnectionIdTestVectors) {
const struct LoadBalancerEncoderTestCase test_vectors[2] = {
{
*LoadBalancerConfig::CreateUnencrypted(0, 3, 4),
QuicConnectionId({0x07, 0xed, 0x79, 0x3a, 0x80, 0x49, 0x71, 0x8a}),
MakeServerId(kServerId, 3),
},
{
*LoadBalancerConfig::CreateUnencrypted(1, 8, 5),
QuicConnectionId({0x2d, 0xed, 0x79, 0x3a, 0x51, 0xd4, 0x9b, 0x8f,
0x5f, 0x8e, 0x98, 0x53, 0xfe, 0x93}),
MakeServerId(kServerId, 8),
},
};
for (const auto &test : test_vectors) {
random_.AddNextValues(kNonceHigh, kNonceLow);
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, true, 8);
EXPECT_TRUE(encoder->UpdateConfig(test.config, test.server_id));
absl::uint128 nonces_left = encoder->num_nonces_left();
EXPECT_EQ(encoder->GenerateConnectionId(), test.connection_id);
EXPECT_EQ(encoder->num_nonces_left(), nonces_left - 1);
}
}
TEST_F(LoadBalancerEncoderTest, FollowSpecExample) {
const uint8_t config_id = 0, server_id_len = 3, nonce_len = 4;
const uint8_t raw_server_id[] = {
0x31,
0x44,
0x1a,
};
const char raw_key[] = {
0xfd, 0xf7, 0x26, 0xa9, 0x89, 0x3e, 0xc0, 0x5c,
0x06, 0x32, 0xd3, 0x95, 0x66, 0x80, 0xba, 0xf0,
};
random_.AddNextValues(0, 0x75c2699c);
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, true, 8);
ASSERT_TRUE(encoder.has_value());
auto config = LoadBalancerConfig::Create(config_id, server_id_len, nonce_len,
absl::string_view(raw_key));
ASSERT_TRUE(config.has_value());
EXPECT_TRUE(
encoder->UpdateConfig(*config, LoadBalancerServerId(raw_server_id)));
EXPECT_TRUE(encoder->IsEncoding());
const char raw_connection_id[] = {0x07, 0x67, 0x94, 0x7d,
0x29, 0xbe, 0x05, 0x4a};
auto expected =
QuicConnectionId(raw_connection_id, 1 + server_id_len + nonce_len);
EXPECT_EQ(encoder->GenerateConnectionId(), expected);
}
TEST_F(LoadBalancerEncoderTest, EncoderTestVectors) {
const LoadBalancerEncoderTestCase test_vectors[4] = {
{
*LoadBalancerConfig::Create(0, 3, 4, kKey),
QuicConnectionId({0x07, 0x20, 0xb1, 0xd0, 0x7b, 0x35, 0x9d, 0x3c}),
MakeServerId(kServerId, 3),
},
{
*LoadBalancerConfig::Create(1, 10, 5, kKey),
QuicConnectionId({0x2f, 0xcc, 0x38, 0x1b, 0xc7, 0x4c, 0xb4, 0xfb,
0xad, 0x28, 0x23, 0xa3, 0xd1, 0xf8, 0xfe, 0xd2}),
MakeServerId(kServerId, 10),
},
{
*LoadBalancerConfig::Create(2, 8, 8, kKey),
QuicConnectionId({0x50, 0x4d, 0xd2, 0xd0, 0x5a, 0x7b, 0x0d, 0xe9,
0xb2, 0xb9, 0x90, 0x7a, 0xfb, 0x5e, 0xcf, 0x8c,
0xc3}),
MakeServerId(kServerId, 8),
},
{
*LoadBalancerConfig::Create(0, 9, 9, kKey),
QuicConnectionId({0x12, 0x57, 0x79, 0xc9, 0xcc, 0x86, 0xbe, 0xb3,
0xa3, 0xa4, 0xa3, 0xca, 0x96, 0xfc, 0xe4, 0xbf,
0xe0, 0xcd, 0xbc}),
MakeServerId(kServerId, 9),
},
};
for (const auto &test : test_vectors) {
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, true, 8);
ASSERT_TRUE(encoder.has_value());
random_.AddNextValues(kNonceHigh, kNonceLow);
EXPECT_TRUE(encoder->UpdateConfig(test.config, test.server_id));
EXPECT_EQ(encoder->GenerateConnectionId(), test.connection_id);
}
}
TEST_F(LoadBalancerEncoderTest, RunOutOfNonces) {
const uint8_t server_id_len = 3;
TestLoadBalancerEncoderVisitor visitor;
auto encoder = LoadBalancerEncoder::Create(random_, &visitor, true, 8);
ASSERT_TRUE(encoder.has_value());
auto config = LoadBalancerConfig::Create(0, server_id_len, 4, kKey);
ASSERT_TRUE(config.has_value());
EXPECT_TRUE(
encoder->UpdateConfig(*config, MakeServerId(kServerId, server_id_len)));
EXPECT_EQ(visitor.num_adds(), 1u);
LoadBalancerEncoderPeer::SetNumNoncesLeft(*encoder, 2);
EXPECT_EQ(encoder->num_nonces_left(), 2);
EXPECT_EQ(encoder->GenerateConnectionId(),
QuicConnectionId({0x07, 0x29, 0xd8, 0xc2, 0x17, 0xce, 0x2d, 0x92}));
EXPECT_EQ(encoder->num_nonces_left(), 1);
encoder->GenerateConnectionId();
EXPECT_EQ(encoder->IsEncoding(), false);
EXPECT_EQ(visitor.num_deletes(), 1u);
}
TEST_F(LoadBalancerEncoderTest, UnroutableConnectionId) {
random_.AddNextValues(0x83, kNonceHigh);
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, false);
ASSERT_TRUE(encoder.has_value());
EXPECT_EQ(encoder->num_nonces_left(), 0);
auto connection_id = encoder->GenerateConnectionId();
QuicConnectionId expected({0xe3, 0x5d, 0x52, 0xde, 0x4d, 0xe3, 0xe7, 0x21});
EXPECT_EQ(expected, connection_id);
}
TEST_F(LoadBalancerEncoderTest, NonDefaultUnroutableConnectionIdLength) {
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, true, 9);
ASSERT_TRUE(encoder.has_value());
QuicConnectionId connection_id = encoder->GenerateConnectionId();
EXPECT_EQ(connection_id.length(), 9);
}
TEST_F(LoadBalancerEncoderTest, DeleteConfigWhenNoConfigExists) {
TestLoadBalancerEncoderVisitor visitor;
auto encoder = LoadBalancerEncoder::Create(random_, &visitor, true);
ASSERT_TRUE(encoder.has_value());
encoder->DeleteConfig();
EXPECT_EQ(visitor.num_deletes(), 0u);
}
TEST_F(LoadBalancerEncoderTest, AddConfig) {
auto config = LoadBalancerConfig::CreateUnencrypted(0, 3, 4);
ASSERT_TRUE(config.has_value());
TestLoadBalancerEncoderVisitor visitor;
auto encoder = LoadBalancerEncoder::Create(random_, &visitor, true);
EXPECT_TRUE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 3)));
EXPECT_EQ(visitor.num_adds(), 1u);
absl::uint128 left = encoder->num_nonces_left();
EXPECT_EQ(left, (0x1ull << 32));
EXPECT_TRUE(encoder->IsEncoding());
EXPECT_FALSE(encoder->IsEncrypted());
encoder->GenerateConnectionId();
EXPECT_EQ(encoder->num_nonces_left(), left - 1);
EXPECT_EQ(visitor.num_deletes(), 0u);
}
TEST_F(LoadBalancerEncoderTest, UpdateConfig) {
auto config = LoadBalancerConfig::CreateUnencrypted(0, 3, 4);
ASSERT_TRUE(config.has_value());
TestLoadBalancerEncoderVisitor visitor;
auto encoder = LoadBalancerEncoder::Create(random_, &visitor, true);
EXPECT_TRUE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 3)));
config = LoadBalancerConfig::Create(1, 4, 4, kKey);
ASSERT_TRUE(config.has_value());
EXPECT_TRUE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 4)));
EXPECT_EQ(visitor.num_adds(), 2u);
EXPECT_EQ(visitor.num_deletes(), 1u);
EXPECT_TRUE(encoder->IsEncoding());
EXPECT_TRUE(encoder->IsEncrypted());
}
TEST_F(LoadBalancerEncoderTest, DeleteConfig) {
auto config = LoadBalancerConfig::CreateUnencrypted(0, 3, 4);
ASSERT_TRUE(config.has_value());
TestLoadBalancerEncoderVisitor visitor;
auto encoder = LoadBalancerEncoder::Create(random_, &visitor, true);
EXPECT_TRUE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 3)));
encoder->DeleteConfig();
EXPECT_EQ(visitor.num_adds(), 1u);
EXPECT_EQ(visitor.num_deletes(), 1u);
EXPECT_FALSE(encoder->IsEncoding());
EXPECT_FALSE(encoder->IsEncrypted());
EXPECT_EQ(encoder->num_nonces_left(), 0);
}
TEST_F(LoadBalancerEncoderTest, DeleteConfigNoVisitor) {
auto config = LoadBalancerConfig::CreateUnencrypted(0, 3, 4);
ASSERT_TRUE(config.has_value());
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, true);
EXPECT_TRUE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 3)));
encoder->DeleteConfig();
EXPECT_FALSE(encoder->IsEncoding());
EXPECT_FALSE(encoder->IsEncrypted());
EXPECT_EQ(encoder->num_nonces_left(), 0);
}
TEST_F(LoadBalancerEncoderTest, MaybeReplaceConnectionIdReturnsNoChange) {
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, false);
ASSERT_TRUE(encoder.has_value());
EXPECT_EQ(encoder->MaybeReplaceConnectionId(TestConnectionId(1),
ParsedQuicVersion::Q046()),
std::nullopt);
}
TEST_F(LoadBalancerEncoderTest, MaybeReplaceConnectionIdReturnsChange) {
random_.AddNextValues(0x83, kNonceHigh);
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, false);
ASSERT_TRUE(encoder.has_value());
QuicConnectionId expected({0xe3, 0x5d, 0x52, 0xde, 0x4d, 0xe3, 0xe7, 0x21});
EXPECT_EQ(*encoder->MaybeReplaceConnectionId(TestConnectionId(1),
ParsedQuicVersion::RFCv1()),
expected);
}
TEST_F(LoadBalancerEncoderTest, GenerateNextConnectionIdReturnsNoChange) {
auto config = LoadBalancerConfig::CreateUnencrypted(0, 3, 4);
ASSERT_TRUE(config.has_value());
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, true);
EXPECT_TRUE(encoder->UpdateConfig(*config, MakeServerId(kServerId, 3)));
EXPECT_EQ(encoder->GenerateNextConnectionId(TestConnectionId(1)),
std::nullopt);
}
TEST_F(LoadBalancerEncoderTest, GenerateNextConnectionIdReturnsChange) {
random_.AddNextValues(0x83, kNonceHigh);
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, false);
ASSERT_TRUE(encoder.has_value());
QuicConnectionId expected({0xe3, 0x5d, 0x52, 0xde, 0x4d, 0xe3, 0xe7, 0x21});
EXPECT_EQ(*encoder->GenerateNextConnectionId(TestConnectionId(1)), expected);
}
TEST_F(LoadBalancerEncoderTest, ConnectionIdLengthsEncoded) {
auto len_encoder = LoadBalancerEncoder::Create(random_, nullptr, true);
ASSERT_TRUE(len_encoder.has_value());
EXPECT_EQ(len_encoder->ConnectionIdLength(0xe8), 9);
EXPECT_EQ(len_encoder->ConnectionIdLength(0x4a), 11);
EXPECT_EQ(len_encoder->ConnectionIdLength(0x09), 10);
auto encoder = LoadBalancerEncoder::Create(random_, nullptr, false);
ASSERT_TRUE(encoder.has_value());
EXPECT_EQ(encoder->ConnectionIdLength(0xe8), kQuicDefaultConnectionIdLength);
EXPECT_EQ(encoder->ConnectionIdLength(0x4a), kQuicDefaultConnectionIdLength);
EXPECT_EQ(encoder->ConnectionIdLength(0x09), kQuicDefaultConnectionIdLength);
uint8_t config_id = 0;
uint8_t server_id_len = 3;
uint8_t nonce_len = 6;
uint8_t config_0_len = server_id_len + nonce_len + 1;
auto config0 = LoadBalancerConfig::CreateUnencrypted(config_id, server_id_len,
nonce_len);
ASSERT_TRUE(config0.has_value());
EXPECT_TRUE(
encoder->UpdateConfig(*config0, MakeServerId(kServerId, server_id_len)));
EXPECT_EQ(encoder->ConnectionIdLength(0xe8), kQuicDefaultConnectionIdLength);
EXPECT_EQ(encoder->ConnectionIdLength(0x4a), kQuicDefaultConnectionIdLength);
EXPECT_EQ(encoder->ConnectionIdLength(0x09), config_0_len);
config_id = 1;
nonce_len++;
uint8_t config_1_len = server_id_len + nonce_len + 1;
auto config1 = LoadBalancerConfig::CreateUnencrypted(config_id, server_id_len,
nonce_len);
ASSERT_TRUE(config1.has_value());
EXPECT_TRUE(
encoder->UpdateConfig(*config1, MakeServerId(kServerId, server_id_len)));
EXPECT_EQ(encoder->ConnectionIdLength(0xe8), kQuicDefaultConnectionIdLength);
EXPECT_EQ(encoder->ConnectionIdLength(0x2a), config_1_len);
EXPECT_EQ(encoder->ConnectionIdLength(0x09), config_0_len);
encoder->DeleteConfig();
EXPECT_EQ(encoder->ConnectionIdLength(0xe8), kQuicDefaultConnectionIdLength);
EXPECT_EQ(encoder->ConnectionIdLength(0x2a), config_1_len);
EXPECT_EQ(encoder->ConnectionIdLength(0x09), config_0_len);
}
}
}
} |
136 | #ifndef TENSORFLOW_LITE_KERNELS_TEST_DELEGATE_PROVIDERS_H_
#define TENSORFLOW_LITE_KERNELS_TEST_DELEGATE_PROVIDERS_H_
#include <vector>
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
class KernelTestDelegateProviders {
public:
static KernelTestDelegateProviders* Get();
KernelTestDelegateProviders();
bool InitFromCmdlineArgs(int* argc, const char** argv);
tools::ToolParams* MutableParams() { return ¶ms_; }
const tools::ToolParams& ConstParams() const { return params_; }
std::vector<tools::ProvidedDelegateList::ProvidedDelegate> CreateAllDelegates(
const tools::ToolParams& params) const {
tools::ProvidedDelegateList util;
return util.CreateAllRankedDelegates(params);
}
std::vector<tools::ProvidedDelegateList::ProvidedDelegate>
CreateAllDelegates() const {
return delegate_list_util_.CreateAllRankedDelegates();
}
static constexpr char kUseSimpleAllocator[] = "use_simple_allocator";
static constexpr char kAccelerationTestConfigPath[] =
"acceleration_test_config_path";
private:
tools::ToolParams params_;
tools::ProvidedDelegateList delegate_list_util_;
};
}
#endif
#include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <string>
#include <vector>
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
constexpr char KernelTestDelegateProviders::kAccelerationTestConfigPath[];
constexpr char KernelTestDelegateProviders::kUseSimpleAllocator[];
KernelTestDelegateProviders* KernelTestDelegateProviders::Get() {
static KernelTestDelegateProviders* const providers =
new KernelTestDelegateProviders();
return providers;
}
KernelTestDelegateProviders::KernelTestDelegateProviders()
: delegate_list_util_(¶ms_) {
delegate_list_util_.AddAllDelegateParams();
params_.AddParam(kAccelerationTestConfigPath,
tools::ToolParam::Create<std::string>(""));
params_.AddParam(kUseSimpleAllocator, tools::ToolParam::Create<bool>(false));
}
bool KernelTestDelegateProviders::InitFromCmdlineArgs(int* argc,
const char** argv) {
std::vector<tflite::Flag> flags = {
Flag(
kAccelerationTestConfigPath,
[this](const std::string& val, int argv_position) {
this->params_.Set<std::string>(kAccelerationTestConfigPath, val,
argv_position);
},
"", "Acceleration test config file for SingleOpModel",
Flag::kOptional),
Flag(
kUseSimpleAllocator,
[this](const bool& val, int argv_position) {
this->params_.Set<bool>(kUseSimpleAllocator, val, argv_position);
},
false, "Use Simple Memory Allocator for SingleOpModel",
Flag::kOptional)};
delegate_list_util_.AppendCmdlineFlags(flags);
bool parse_result = tflite::Flags::Parse(argc, argv, flags);
if (!parse_result || params_.Get<bool>("help")) {
std::string usage = Flags::Usage(argv[0], flags);
TFLITE_LOG(ERROR) << usage;
parse_result = false;
}
return parse_result;
}
} | #include "tensorflow/lite/kernels/test_delegate_providers.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(KernelTestDelegateProvidersTest, DelegateProvidersParams) {
KernelTestDelegateProviders providers;
const auto& params = providers.ConstParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
EXPECT_TRUE(params.HasParam("use_nnapi"));
int argc = 3;
const char* argv[] = {"program_name", "--use_nnapi=true",
"--other_undefined_flag=1"};
EXPECT_TRUE(providers.InitFromCmdlineArgs(&argc, argv));
EXPECT_TRUE(params.Get<bool>("use_nnapi"));
EXPECT_EQ(2, argc);
EXPECT_EQ("--other_undefined_flag=1", argv[1]);
}
TEST(KernelTestDelegateProvidersTest, CreateTfLiteDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
KernelTestDelegateProviders providers;
providers.MutableParams()->Set<bool>("use_xnnpack", true);
EXPECT_GE(providers.CreateAllDelegates().size(), 1);
tools::ToolParams local_params;
local_params.Merge(providers.ConstParams());
local_params.Set<bool>("use_xnnpack", false);
EXPECT_TRUE(providers.CreateAllDelegates(local_params).empty());
#endif
}
}
} |
137 | #ifndef TENSORFLOW_CORE_FRAMEWORK_GRAPH_DEF_UTIL_H_
#define TENSORFLOW_CORE_FRAMEWORK_GRAPH_DEF_UTIL_H_
#include <set>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class GraphDef;
class NodeDef;
string SummarizeGraphDef(const GraphDef& graph_def);
Status ValidateExternalGraphDefSyntax(const GraphDef& graph_def);
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset);
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset, bool skip_unknown_ops);
Status RemoveNewDefaultAttrsFromGraphDef(
GraphDef* graph_def, const OpRegistryInterface& consumer_op_registry,
const OpRegistryInterface& producer_op_registry,
std::set<std::pair<string, string>>* op_attr_removed);
void StripDefaultAttributes(const OpRegistryInterface& op_registry,
protobuf::RepeatedPtrField<NodeDef>* nodes);
void OpsUsedByGraph(const GraphDef& graph_def,
std::set<string>* ops_used_in_graph);
Status StrippedOpListForGraph(const GraphDef& graph_def,
const OpRegistryInterface& op_registry,
OpList* stripped_op_list);
}
#endif
#include "tensorflow/core/framework/graph_def_util.h"
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def_util.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
string SummarizeGraphDef(const GraphDef& graph_def) {
string ret;
strings::StrAppend(
&ret, "versions = ", graph_def.versions().ShortDebugString(), ";\n");
for (const NodeDef& node : graph_def.node()) {
strings::StrAppend(&ret, SummarizeNodeDef(node), ";\n");
}
return ret;
}
Status ValidateExternalGraphDefSyntax(const GraphDef& graph_def) {
for (const NodeDef& node : graph_def.node()) {
TF_RETURN_IF_ERROR(ValidateExternalNodeDefSyntax(node));
}
return OkStatus();
}
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset) {
return AddDefaultAttrsToGraphDef(graph_def, op_registry, node_offset, false);
}
Status AddDefaultAttrsToGraphDef(GraphDef* graph_def,
const OpRegistryInterface& op_registry,
int node_offset, bool skip_unknown_ops) {
if (node_offset > graph_def->node_size()) {
return errors::InvalidArgument(
"Tried to add default attrs to GraphDef "
"starting at offset ",
node_offset, " with total nodes in graph: ", graph_def->node_size());
}
for (int i = node_offset; i < graph_def->node_size(); ++i) {
NodeDef* node_def = graph_def->mutable_node(i);
const OpDef* op_def;
Status s = op_registry.LookUpOpDef(node_def->op(), &op_def);
if (s.ok()) {
AddDefaultsToNodeDef(*op_def, node_def);
} else if (!skip_unknown_ops) {
return s;
}
}
return OkStatus();
}
static Status RemoveNewDefaultAttrsFromNodeDef(
NodeDef* node_def, const OpRegistryInterface& consumer_op_registry,
const OpRegistryInterface& producer_op_registry,
std::set<std::pair<string, string>>* op_attr_removed) {
const OpDef* producer_op_def;
const OpDef* consumer_op_def;
TF_RETURN_IF_ERROR(
producer_op_registry.LookUpOpDef(node_def->op(), &producer_op_def));
TF_RETURN_IF_ERROR(
consumer_op_registry.LookUpOpDef(node_def->op(), &consumer_op_def));
std::vector<string> to_remove;
for (const auto& attr : node_def->attr()) {
if (!absl::StartsWith(attr.first, "_") &&
FindAttr(attr.first, *consumer_op_def) == nullptr) {
const OpDef::AttrDef* producer_attr_def =
FindAttr(attr.first, *producer_op_def);
if (producer_attr_def == nullptr) {
return errors::InvalidArgument(
"Attr '", attr.first,
"' missing in producer's OpDef: ", SummarizeOpDef(*producer_op_def),
" but found in node: ", FormatNodeDefForError(*node_def));
}
if (producer_attr_def->has_default_value() &&
AreAttrValuesEqual(producer_attr_def->default_value(), attr.second)) {
to_remove.emplace_back(attr.first);
}
}
}
for (const string& attr_name : to_remove) {
node_def->mutable_attr()->erase(attr_name);
if (op_attr_removed != nullptr) {
op_attr_removed->insert(std::make_pair(node_def->op(), attr_name));
}
}
return OkStatus();
}
static bool IsFunction(const GraphDef& graph_def, const string& op_name) {
for (const auto& func_def : graph_def.library().function()) {
if (op_name == func_def.signature().name()) return true;
}
return false;
}
Status RemoveNewDefaultAttrsFromGraphDef(
GraphDef* graph_def, const OpRegistryInterface& consumer_op_registry,
const OpRegistryInterface& producer_op_registry,
std::set<std::pair<string, string>>* op_attr_removed) {
for (int n = 0; n < graph_def->node_size(); ++n) {
NodeDef* node_def = graph_def->mutable_node(n);
if (!IsFunction(*graph_def, node_def->op())) {
TF_RETURN_IF_ERROR(RemoveNewDefaultAttrsFromNodeDef(
node_def, consumer_op_registry, producer_op_registry,
op_attr_removed));
}
}
for (int f = 0; f < graph_def->library().function_size(); ++f) {
FunctionDef* func_def = graph_def->mutable_library()->mutable_function(f);
for (int n = 0; n < func_def->node_def_size(); ++n) {
NodeDef* node_def = func_def->mutable_node_def(n);
if (!IsFunction(*graph_def, node_def->op())) {
TF_RETURN_IF_ERROR(RemoveNewDefaultAttrsFromNodeDef(
node_def, consumer_op_registry, producer_op_registry,
op_attr_removed));
}
}
}
return OkStatus();
}
void StripDefaultAttributes(const OpRegistryInterface& op_registry,
protobuf::RepeatedPtrField<NodeDef>* nodes) {
for (int i = 0; i < nodes->size(); ++i) {
NodeDef* node = nodes->Mutable(i);
const OpDef* op_def;
const OpRegistrationData* op_reg_data = nullptr;
Status s = op_registry.LookUp(node->op(), &op_reg_data);
if (!s.ok()) {
VLOG(1) << "Ignoring encountered unknown operation "
<< SummarizeNodeDef(*node)
<< " when stripping default attributes. It is likely a function, "
"in which case ignoring it is fine";
continue;
}
op_def = &op_reg_data->op_def;
for (const OpDef::AttrDef& attr_def : op_def->attr()) {
if (attr_def.has_default_value()) {
AttrValueMap* attrs = node->mutable_attr();
const string& name = attr_def.name();
auto iter = attrs->find(name);
if (iter != attrs->end()) {
const AttrValue& default_value = attr_def.default_value();
if (AreAttrValuesEqual(iter->second, default_value,
true)) {
attrs->erase(name);
}
}
}
}
}
}
void OpsUsedByGraph(const GraphDef& graph_def,
std::set<string>* ops_used_in_graph) {
std::unordered_map<string, const FunctionDef*> name_to_function;
for (const auto& function : graph_def.library().function()) {
name_to_function.insert(
std::make_pair(function.signature().name(), &function));
}
std::set<string> used_ops;
std::vector<const FunctionDef*> functions_to_process;
const auto mark_op_as_used = [&used_ops, &functions_to_process,
&name_to_function](const string& op) {
if (used_ops.insert(op).second) {
const auto it = name_to_function.find(op);
if (it != name_to_function.end()) {
functions_to_process.push_back(it->second);
}
}
};
for (const auto& node : graph_def.node()) {
mark_op_as_used(node.op());
}
while (!functions_to_process.empty()) {
const FunctionDef* fun = functions_to_process.back();
functions_to_process.pop_back();
for (const auto& node : fun->node_def()) {
mark_op_as_used(node.op());
}
}
ops_used_in_graph->clear();
for (const string& op_name : used_ops) {
if (name_to_function.find(op_name) == name_to_function.end()) {
ops_used_in_graph->insert(op_name);
}
}
}
Status StrippedOpListForGraph(const GraphDef& graph_def,
const OpRegistryInterface& op_registry,
OpList* stripped_op_list) {
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
stripped_op_list->clear_op();
for (const string& op_name : used_ops) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(op_registry.LookUpOpDef(op_name, &op_def));
OpDef* stripped_op = stripped_op_list->add_op();
stripped_op->CopyFrom(*op_def);
RemoveDescriptionsFromOpDef(stripped_op);
}
return OkStatus();
}
} | #include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
Status FinalizeOpDef(const OpDefBuilder& b, OpDef* op_def) {
OpRegistrationData op_reg_data;
const Status s = b.Finalize(&op_reg_data);
*op_def = op_reg_data.op_def;
return s;
}
TEST(AddToGraphTest, MakeGraphDefWithNamespacedOpName) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("Project>SomeOp"), op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("node", "Project>SomeOp", ®istry)
.Finalize(graph_def.add_node()));
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, NoChangeWithDefault) {
OpList op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("NoChangeWithDefault").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("ncwd", "NoChangeWithDefault", ®istry)
.Finalize(graph_def.add_node()));
GraphDef expected_graph_def = graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(RemoveNewDefaultAttrsFromGraphDef(&graph_def, registry, registry,
&op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, NoChangeNoDefault) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("NoChangeNoDefault").Attr("a: int"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("ncnd", "NoChangeNoDefault", ®istry)
.Attr("a", 42)
.Finalize(graph_def.add_node()));
GraphDef expected_graph_def = graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(RemoveNewDefaultAttrsFromGraphDef(&graph_def, registry, registry,
&op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, UsesDefault) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("UsesDefault"), consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("UsesDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("uses_default", "UsesDefault", &producer_registry)
.Finalize(produced_graph_def.add_node()));
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
GraphDef expected_graph_def;
TF_ASSERT_OK(NodeDefBuilder("uses_default", "UsesDefault", &consumer_registry)
.Finalize(expected_graph_def.add_node()));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
std::set<std::pair<string, string>> expected_removed({{"UsesDefault", "a"}});
EXPECT_EQ(expected_removed, op_attr_removed);
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, ChangedFromDefault) {
OpList consumer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("ChangedFromDefault"),
consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("ChangedFromDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("changed_from_default", "ChangedFromDefault",
&producer_registry)
.Attr("a", 9)
.Finalize(produced_graph_def.add_node()));
GraphDef expected_graph_def = produced_graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_TRUE(op_attr_removed.empty());
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, UnderscoreAttrs) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("Underscore"), consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("Underscore"), producer_op_list.add_op()));
OpDef::AttrDef* attr = producer_op_list.mutable_op(0)->add_attr();
attr->set_name("_underscore");
attr->set_type("int");
attr->mutable_default_value()->set_i(17);
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
TF_ASSERT_OK(NodeDefBuilder("node", "Underscore", &producer_registry)
.Attr("_underscore", 17)
.Finalize(produced_graph_def.add_node()));
GraphDef expected_graph_def = produced_graph_def;
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_EQ(op_attr_removed.size(), 0);
}
TEST(RemoveNewDefaultAttrsFromGraphDefTest, HasFunction) {
OpList consumer_op_list;
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("UsesDefault"), consumer_op_list.add_op()));
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("ChangedFromDefault"),
consumer_op_list.add_op()));
OpListOpRegistry consumer_registry(&consumer_op_list);
OpList producer_op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("UsesDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
TF_ASSERT_OK(
FinalizeOpDef(OpDefBuilder("ChangedFromDefault").Attr("a: int = 17"),
producer_op_list.add_op()));
OpListOpRegistry producer_registry(&producer_op_list);
GraphDef produced_graph_def;
*produced_graph_def.mutable_library()->add_function() =
FunctionDefHelper::Create(
"my_func", {}, {}, {},
{{{"x"}, "UsesDefault", {}, {{"a", 17}}},
{{"y"}, "ChangedFromDefault", {}, {{"a", 99}}}},
{});
OpList function_op_list;
*function_op_list.add_op() =
produced_graph_def.library().function(0).signature();
OpListOpRegistry function_registry(&function_op_list);
TF_ASSERT_OK(NodeDefBuilder("call_func", "my_func", &function_registry)
.Finalize(produced_graph_def.add_node()));
std::set<std::pair<string, string>> op_attr_removed;
TF_ASSERT_OK(
RemoveNewDefaultAttrsFromGraphDef(&produced_graph_def, consumer_registry,
producer_registry, &op_attr_removed));
GraphDef expected_graph_def;
*expected_graph_def.mutable_library()->add_function() =
FunctionDefHelper::Create(
"my_func", {}, {}, {},
{{{"x"}, "UsesDefault", {}, {}},
{{"y"}, "ChangedFromDefault", {}, {{"a", 99}}}},
{});
TF_ASSERT_OK(NodeDefBuilder("call_func", "my_func", &function_registry)
.Finalize(expected_graph_def.add_node()));
TF_EXPECT_GRAPH_EQ(expected_graph_def, produced_graph_def);
EXPECT_EQ(expected_graph_def.library().DebugString(),
produced_graph_def.library().DebugString());
std::set<std::pair<string, string>> expected_removed({{"UsesDefault", "a"}});
EXPECT_EQ(expected_removed, op_attr_removed);
}
TEST(StripDefaultAttributesTest, DefaultStripped) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("OpName1").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("op1", "OpName1", ®istry)
.Finalize(graph_def.add_node()));
ASSERT_EQ(1, graph_def.node(0).attr_size());
ASSERT_EQ(12, graph_def.node(0).attr().at("a").i());
StripDefaultAttributes(registry, graph_def.mutable_node());
ASSERT_EQ(1, graph_def.node_size());
ASSERT_EQ(0, graph_def.node(0).attr_size());
}
TEST(StripDefaultAttributesTest, NonDefaultNotStripped) {
OpList op_list;
TF_ASSERT_OK(FinalizeOpDef(OpDefBuilder("OpName1").Attr("a: int = 12"),
op_list.add_op()));
OpListOpRegistry registry(&op_list);
GraphDef graph_def;
TF_ASSERT_OK(NodeDefBuilder("op1", "OpName1", ®istry)
.Attr("a", 9)
.Finalize(graph_def.add_node()));
GraphDef expected = graph_def;
StripDefaultAttributes(registry, graph_def.mutable_node());
TF_EXPECT_GRAPH_EQ(expected, graph_def);
}
TEST(StrippedOpListForGraphTest, FlatTest) {
OpList op_list;
for (const string& op : {"A", "B", "C", "D"}) {
OpDef* op_def = op_list.add_op();
op_def->set_name(op);
op_def->set_summary("summary");
op_def->set_description("description");
op_def->set_is_commutative(op == "B");
}
const string graph_ops[4][3] = {
{"C", "B", "B"}, {"B", "C", "B"}, {"B", "B", "C"}, {"C", "C", "B"}};
for (const bool use_function : {false, true}) {
for (int order = 0; order < 4; order++) {
GraphDef graph_def;
if (use_function) {
FunctionDef* function_def = graph_def.mutable_library()->add_function();
function_def->mutable_signature()->set_name("F");
for (const string& op : graph_ops[order]) {
function_def->add_node_def()->set_op(op);
}
graph_def.add_node()->set_op("F");
} else {
for (const string& op : graph_ops[order]) {
string name = strings::StrCat("name", graph_def.node_size());
NodeDef* node = graph_def.add_node();
node->set_name(name);
node->set_op(op);
}
}
OpList stripped_op_list;
TF_ASSERT_OK(StrippedOpListForGraph(graph_def, OpListOpRegistry(&op_list),
&stripped_op_list));
ASSERT_EQ(stripped_op_list.op_size(), 2);
for (int i = 0; i < 2; i++) {
const OpDef& op = stripped_op_list.op(i);
EXPECT_EQ(op.name(), i ? "C" : "B");
EXPECT_EQ(op.summary(), "");
EXPECT_EQ(op.description(), "");
EXPECT_EQ(op.is_commutative(), !i);
}
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
ASSERT_EQ(std::set<string>({"B", "C"}), used_ops);
}
}
}
TEST(StrippedOpListForGraphTest, NestedFunctionTest) {
OpList op_list;
op_list.add_op()->set_name("A");
for (const bool recursive : {false, true}) {
GraphDef graph_def;
FunctionDef* b = graph_def.mutable_library()->add_function();
FunctionDef* c = graph_def.mutable_library()->add_function();
b->mutable_signature()->set_name("B");
c->mutable_signature()->set_name("C");
b->add_node_def()->set_op("A");
c->add_node_def()->set_op("B");
if (recursive) {
b->add_node_def()->set_op("B");
c->add_node_def()->set_op("C");
}
graph_def.add_node()->set_op("C");
OpList stripped_op_list;
TF_ASSERT_OK(StrippedOpListForGraph(graph_def, OpListOpRegistry(&op_list),
&stripped_op_list));
ASSERT_EQ(stripped_op_list.op_size(), 1);
ASSERT_EQ(stripped_op_list.op(0).name(), "A");
std::set<string> used_ops;
OpsUsedByGraph(graph_def, &used_ops);
ASSERT_EQ(std::set<string>({"A"}), used_ops);
}
}
}
} |
138 | #ifndef TENSORFLOW_LITE_TOOLS_OPTIMIZE_MODEL_UTILS_H_
#define TENSORFLOW_LITE_TOOLS_OPTIMIZE_MODEL_UTILS_H_
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace optimize {
namespace utils {
void MakeDequantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output);
void MakeQuantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output);
void MakeTensor(const string& name, const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, std::unique_ptr<TensorT>* tensor);
void MakeTensorWithQuantParam(const string& name,
const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, float scale,
int64_t zero_point,
std::unique_ptr<TensorT>* tensor);
bool QuantizationParametersExist(const TensorT* tensor);
bool HasBuffer(const ModelT* model, const SubGraphT* subgraph,
int tensor_index);
bool HasMinMax(const TensorT* tensor);
void SetOperatorCodeVersion(ModelT* model);
void WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes);
std::unique_ptr<flatbuffers::FlatBufferBuilder> FinishModel(
const tflite::ModelT* model);
std::unique_ptr<tflite::ModelT> CreateMutableModelFromFile(
const string& model_filepath);
}
}
}
#endif
#include "tensorflow/lite/tools/optimize/model_utils.h"
#include <fstream>
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
#include "tensorflow/lite/tools/optimize/operator_property.h"
namespace tflite {
namespace optimize {
namespace utils {
namespace {
int32_t GetOrInsertOpCodeIndex(ModelT* model, const BuiltinOperator& op_code,
int32_t version) {
for (size_t i = 0; i < model->operator_codes.size(); ++i) {
if (GetBuiltinCode(model->operator_codes[i].get()) == op_code) {
return i;
}
}
model->operator_codes.push_back(std::make_unique<OperatorCodeT>());
int op_code_idx = model->operator_codes.size() - 1;
model->operator_codes[op_code_idx]->builtin_code = op_code;
model->operator_codes[op_code_idx]->deprecated_builtin_code =
ConvertBuiltinCodeToDeprecatedBuiltinCode(op_code);
model->operator_codes[op_code_idx]->version = version;
return op_code_idx;
}
}
void MakeDequantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output) {
OperatorT* op_raw = new OperatorT;
op_raw->opcode_index =
GetOrInsertOpCodeIndex(model, BuiltinOperator_DEQUANTIZE, 2);
op_raw->inputs = {input};
op_raw->outputs = {output};
op->reset(op_raw);
}
void MakeQuantizeOperator(ModelT* model, std::unique_ptr<OperatorT>* op,
int32_t input, int32_t output) {
OperatorT* op_raw = new OperatorT;
op_raw->opcode_index =
GetOrInsertOpCodeIndex(model, BuiltinOperator_QUANTIZE, 1);
op_raw->inputs = {input};
op_raw->outputs = {output};
op->reset(op_raw);
}
void MakeTensor(const string& name, const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, std::unique_ptr<TensorT>* tensor) {
TensorT* tensor_raw = new TensorT;
tensor_raw->name = name;
tensor_raw->shape = shape;
if (!shape_signature.empty()) {
tensor_raw->shape_signature = shape_signature;
}
tensor_raw->type = type;
tensor->reset(tensor_raw);
}
void MakeTensorWithQuantParam(const string& name,
const std::vector<int32_t>& shape,
const std::vector<int32_t>& shape_signature,
const TensorType& type, float scale,
int64_t zero_point,
std::unique_ptr<TensorT>* tensor) {
MakeTensor(name, shape, shape_signature, type, tensor);
(*tensor)->quantization = std::make_unique<QuantizationParametersT>();
(*tensor)->quantization->scale.push_back(scale);
(*tensor)->quantization->zero_point.push_back(zero_point);
}
bool QuantizationParametersExist(const TensorT* tensor) {
return tensor->quantization != nullptr &&
!tensor->quantization->scale.empty() &&
!tensor->quantization->zero_point.empty();
}
bool HasBuffer(const ModelT* model, const SubGraphT* subgraph,
int tensor_index) {
const int buffer_index = subgraph->tensors[tensor_index]->buffer;
BufferT* buffer = model->buffers[buffer_index].get();
if (buffer == nullptr || buffer->data.empty()) {
return false;
}
return true;
}
bool HasMinMax(const TensorT* tensor) {
return tensor->quantization && !tensor->quantization->min.empty() &&
!tensor->quantization->max.empty();
}
void SetOperatorCodeVersion(ModelT* model) {
for (int subgraph_idx = 0, end = model->subgraphs.size(); subgraph_idx < end;
subgraph_idx++) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (int op_idx = subgraph->operators.size() - 1; op_idx >= 0; op_idx--) {
OperatorT* op = subgraph->operators[op_idx].get();
OperatorCodeT* op_code = model->operator_codes[op->opcode_index].get();
operator_property::OperatorProperty property =
operator_property::GetOperatorProperty(model, subgraph_idx, op_idx);
if (property.quantizable && op_code->version < property.version) {
op_code->version = property.version;
}
}
}
}
void WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes) {
std::fstream stream(out_file, std::ios::binary | std::ios::out);
for (size_t i = 0; i < num_bytes; i++) {
stream << bytes[i];
}
TFLITE_DCHECK(!stream.bad() && !stream.fail());
}
std::unique_ptr<flatbuffers::FlatBufferBuilder> FinishModel(
const tflite::ModelT* model) {
std::unique_ptr<flatbuffers::FlatBufferBuilder> builder(
new flatbuffers::FlatBufferBuilder());
auto packed_model = tflite::Model::Pack(*builder, model);
tflite::FinishModelBuffer(*builder, packed_model);
return builder;
}
std::unique_ptr<tflite::ModelT> CreateMutableModelFromFile(
const string& model_filepath) {
auto fb_model =
tflite::FlatBufferModel::BuildFromFile(model_filepath.c_str());
auto tflite_model = fb_model->GetModel();
auto copied_model = std::make_unique<tflite::ModelT>();
tflite_model->UnPackTo(copied_model.get(), nullptr);
return copied_model;
}
}
}
} | #include "tensorflow/lite/tools/optimize/model_utils.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace optimize {
namespace utils {
namespace {
TEST(ModelUtilsTest, QuantizationParametersExist) {
TensorT tensor;
tensor.quantization = std::make_unique<QuantizationParametersT>();
tensor.quantization->scale.push_back(0.5);
tensor.quantization->scale.push_back(1.5);
EXPECT_FALSE(QuantizationParametersExist(&tensor));
tensor.quantization->zero_point.push_back(1);
tensor.quantization->zero_point.push_back(-1);
EXPECT_TRUE(QuantizationParametersExist(&tensor));
}
TEST(ModelUtilsTest, HasBuffer) {
tflite::ModelT model;
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto tensor = std::make_unique<tflite::TensorT>();
tensor->buffer = 0;
subgraph->tensors.push_back(std::move(tensor));
model.subgraphs.push_back(std::move(subgraph));
auto buffer = std::make_unique<tflite::BufferT>();
model.buffers.push_back(std::move(buffer));
EXPECT_FALSE(HasBuffer(&model, model.subgraphs[0].get(), 0));
model.buffers[0]->data = {0, 1, 2, 3};
EXPECT_TRUE(HasBuffer(&model, model.subgraphs[0].get(), 0));
}
TEST(ModelUtilsTest, HasMinMax) {
TensorT tensor;
tensor.quantization = std::make_unique<QuantizationParametersT>();
tensor.quantization->min.push_back(0.5);
EXPECT_FALSE(HasMinMax(&tensor));
tensor.quantization->max.push_back(1.5);
EXPECT_TRUE(HasMinMax(&tensor));
}
}
}
}
} |
139 | #ifndef ABSL_LOG_INTERNAL_GLOBALS_H_
#define ABSL_LOG_INTERNAL_GLOBALS_H_
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
bool IsInitialized();
void SetInitialized();
void WriteToStderr(absl::string_view message, absl::LogSeverity severity);
void SetTimeZone(absl::TimeZone tz);
const absl::TimeZone* TimeZone();
bool ShouldSymbolizeLogStackTrace();
void EnableSymbolizeLogStackTrace(bool on_off);
int MaxFramesInLogStackTrace();
void SetMaxFramesInLogStackTrace(int max_num_frames);
bool ExitOnDFatal();
void SetExitOnDFatal(bool on_off);
bool SuppressSigabortTrace();
bool SetSuppressSigabortTrace(bool on_off);
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/log/internal/globals.h"
#include <atomic>
#include <cstdio>
#if defined(__EMSCRIPTEN__)
#include <emscripten/console.h>
#endif
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/log_severity.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
namespace {
ABSL_CONST_INIT std::atomic<bool> logging_initialized(false);
ABSL_CONST_INIT std::atomic<absl::TimeZone*> timezone_ptr{nullptr};
ABSL_CONST_INIT std::atomic<bool> symbolize_stack_trace(true);
ABSL_CONST_INIT std::atomic<int> max_frames_in_stack_trace(64);
ABSL_CONST_INIT std::atomic<bool> exit_on_dfatal(true);
ABSL_CONST_INIT std::atomic<bool> suppress_sigabort_trace(false);
}
bool IsInitialized() {
return logging_initialized.load(std::memory_order_acquire);
}
void SetInitialized() {
logging_initialized.store(true, std::memory_order_release);
}
void WriteToStderr(absl::string_view message, absl::LogSeverity severity) {
if (message.empty()) return;
#if defined(__EMSCRIPTEN__)
const auto message_minus_newline = absl::StripSuffix(message, "\n");
#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
emscripten_errn(message_minus_newline.data(), message_minus_newline.size());
#else
std::string null_terminated_message(message_minus_newline);
_emscripten_err(null_terminated_message.c_str());
#endif
#else
std::fwrite(message.data(), message.size(), 1, stderr);
#endif
#if defined(_WIN64) || defined(_WIN32) || defined(_WIN16)
if (severity >= absl::LogSeverity::kWarning) {
std::fflush(stderr);
}
#else
(void)severity;
#endif
}
void SetTimeZone(absl::TimeZone tz) {
absl::TimeZone* expected = nullptr;
absl::TimeZone* new_tz = new absl::TimeZone(tz);
if (!timezone_ptr.compare_exchange_strong(expected, new_tz,
std::memory_order_release,
std::memory_order_relaxed)) {
ABSL_RAW_LOG(FATAL,
"absl::log_internal::SetTimeZone() has already been called");
}
}
const absl::TimeZone* TimeZone() {
return timezone_ptr.load(std::memory_order_acquire);
}
bool ShouldSymbolizeLogStackTrace() {
return symbolize_stack_trace.load(std::memory_order_acquire);
}
void EnableSymbolizeLogStackTrace(bool on_off) {
symbolize_stack_trace.store(on_off, std::memory_order_release);
}
int MaxFramesInLogStackTrace() {
return max_frames_in_stack_trace.load(std::memory_order_acquire);
}
void SetMaxFramesInLogStackTrace(int max_num_frames) {
max_frames_in_stack_trace.store(max_num_frames, std::memory_order_release);
}
bool ExitOnDFatal() { return exit_on_dfatal.load(std::memory_order_acquire); }
void SetExitOnDFatal(bool on_off) {
exit_on_dfatal.store(on_off, std::memory_order_release);
}
bool SuppressSigabortTrace() {
return suppress_sigabort_trace.load(std::memory_order_acquire);
}
bool SetSuppressSigabortTrace(bool on_off) {
return suppress_sigabort_trace.exchange(on_off);
}
}
ABSL_NAMESPACE_END
} | #include "absl/log/globals.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/log_severity.h"
#include "absl/log/internal/globals.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
namespace {
using ::testing::_;
using ::testing::StrEq;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
constexpr static absl::LogSeverityAtLeast DefaultMinLogLevel() {
return absl::LogSeverityAtLeast::kInfo;
}
constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() {
return absl::LogSeverityAtLeast::kError;
}
TEST(TestGlobals, MinLogLevel) {
EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
absl::SetMinLogLevel(DefaultMinLogLevel());
}
TEST(TestGlobals, ScopedMinLogLevel) {
EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
{
absl::log_internal::ScopedMinLogLevel scoped_stderr_threshold(
absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
}
EXPECT_EQ(absl::MinLogLevel(), DefaultMinLogLevel());
}
TEST(TestGlobals, StderrThreshold) {
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
absl::SetStderrThreshold(absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
absl::SetStderrThreshold(DefaultStderrThreshold());
}
TEST(TestGlobals, ScopedStderrThreshold) {
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
{
absl::ScopedStderrThreshold scoped_stderr_threshold(
absl::LogSeverityAtLeast::kError);
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
}
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
}
TEST(TestGlobals, LogBacktraceAt) {
EXPECT_FALSE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111));
absl::SetLogBacktraceLocation("some_file.cc", 111);
EXPECT_TRUE(absl::log_internal::ShouldLogBacktraceAt("some_file.cc", 111));
EXPECT_FALSE(
absl::log_internal::ShouldLogBacktraceAt("another_file.cc", 222));
}
TEST(TestGlobals, LogPrefix) {
EXPECT_TRUE(absl::ShouldPrependLogPrefix());
absl::EnableLogPrefix(false);
EXPECT_FALSE(absl::ShouldPrependLogPrefix());
absl::EnableLogPrefix(true);
EXPECT_TRUE(absl::ShouldPrependLogPrefix());
}
TEST(TestGlobals, SetGlobalVLogLevel) {
EXPECT_EQ(absl::SetGlobalVLogLevel(42), 0);
EXPECT_EQ(absl::SetGlobalVLogLevel(1337), 42);
EXPECT_EQ(absl::SetGlobalVLogLevel(0), 1337);
}
TEST(TestGlobals, SetVLogLevel) {
EXPECT_EQ(absl::SetVLogLevel("setvloglevel", 42), 0);
EXPECT_EQ(absl::SetVLogLevel("setvloglevel", 1337), 42);
EXPECT_EQ(absl::SetVLogLevel("othersetvloglevel", 50), 0);
}
TEST(TestGlobals, AndroidLogTag) {
EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag(nullptr), ".*");
EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("native"));
absl::SetAndroidNativeTag("test_tag");
EXPECT_THAT(absl::log_internal::GetAndroidNativeTag(), StrEq("test_tag"));
EXPECT_DEATH_IF_SUPPORTED(absl::SetAndroidNativeTag("test_tag_fail"), ".*");
}
TEST(TestExitOnDFatal, OffTest) {
absl::log_internal::SetExitOnDFatal(false);
EXPECT_FALSE(absl::log_internal::ExitOnDFatal());
{
absl::ScopedMockLog log(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(log, Log(absl::kLogDebugFatal, _, "This should not be fatal"));
log.StartCapturingLogs();
LOG(DFATAL) << "This should not be fatal";
}
}
#if GTEST_HAS_DEATH_TEST
TEST(TestDeathWhileExitOnDFatal, OnTest) {
absl::log_internal::SetExitOnDFatal(true);
EXPECT_TRUE(absl::log_internal::ExitOnDFatal());
EXPECT_DEBUG_DEATH({ LOG(DFATAL) << "This should be fatal in debug mode"; },
"This should be fatal in debug mode");
}
#endif
} |
140 | #ifndef TENSORFLOW_CORE_KERNELS_MFCC_MEL_FILTERBANK_H_
#define TENSORFLOW_CORE_KERNELS_MFCC_MEL_FILTERBANK_H_
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
namespace tensorflow {
class MfccMelFilterbank {
public:
MfccMelFilterbank();
bool Initialize(int input_length,
double input_sample_rate, int output_channel_count,
double lower_frequency_limit, double upper_frequency_limit);
void Compute(const std::vector<double>& input,
std::vector<double>* output) const;
private:
double FreqToMel(double freq) const;
bool initialized_;
int num_channels_;
double sample_rate_;
int input_length_;
std::vector<double> center_frequencies_;
std::vector<double> weights_;
std::vector<int> band_mapper_;
int start_index_;
int end_index_;
MfccMelFilterbank(const MfccMelFilterbank&) = delete;
void operator=(const MfccMelFilterbank&) = delete;
};
}
#endif
#include "tensorflow/core/kernels/mfcc_mel_filterbank.h"
#include <math.h>
#include <limits>
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
MfccMelFilterbank::MfccMelFilterbank() : initialized_(false) {}
bool MfccMelFilterbank::Initialize(int input_length, double input_sample_rate,
int output_channel_count,
double lower_frequency_limit,
double upper_frequency_limit) {
num_channels_ = output_channel_count;
sample_rate_ = input_sample_rate;
input_length_ = input_length;
if (num_channels_ < 1) {
LOG(ERROR) << "Number of filterbank channels must be positive.";
return false;
}
if (sample_rate_ <= 0) {
LOG(ERROR) << "Sample rate must be positive.";
return false;
}
if (input_length < 2) {
LOG(ERROR) << "Input length must greater than 1.";
return false;
}
if (lower_frequency_limit < 0) {
LOG(ERROR) << "Lower frequency limit must be nonnegative.";
return false;
}
if (upper_frequency_limit <= lower_frequency_limit) {
LOG(ERROR) << "Upper frequency limit must be greater than "
<< "lower frequency limit.";
return false;
}
std::size_t center_frequencies_size = std::size_t(num_channels_) + 1;
if (center_frequencies_size >= std::numeric_limits<int>::max() ||
center_frequencies_size > center_frequencies_.max_size()) {
LOG(ERROR) << "Number of filterbank channels must be less than "
<< std::numeric_limits<int>::max()
<< " and less than or equal to "
<< center_frequencies_.max_size();
return false;
}
center_frequencies_.resize(center_frequencies_size);
const double mel_low = FreqToMel(lower_frequency_limit);
const double mel_hi = FreqToMel(upper_frequency_limit);
const double mel_span = mel_hi - mel_low;
const double mel_spacing = mel_span / static_cast<double>(num_channels_ + 1);
for (int i = 0; i < num_channels_ + 1; ++i) {
center_frequencies_[i] = mel_low + (mel_spacing * (i + 1));
}
const double hz_per_sbin =
0.5 * sample_rate_ / static_cast<double>(input_length_ - 1);
start_index_ = static_cast<int>(1.5 + (lower_frequency_limit / hz_per_sbin));
end_index_ = static_cast<int>(upper_frequency_limit / hz_per_sbin);
band_mapper_.resize(input_length_);
int channel = 0;
for (int i = 0; i < input_length_; ++i) {
double melf = FreqToMel(i * hz_per_sbin);
if ((i < start_index_) || (i > end_index_)) {
band_mapper_[i] = -2;
} else {
while ((channel < num_channels_) &&
(center_frequencies_[channel] < melf)) {
++channel;
}
band_mapper_[i] = channel - 1;
}
}
weights_.resize(input_length_);
for (int i = 0; i < input_length_; ++i) {
channel = band_mapper_[i];
if ((i < start_index_) || (i > end_index_)) {
weights_[i] = 0.0;
} else {
if (channel >= 0) {
weights_[i] =
(center_frequencies_[channel + 1] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[channel + 1] - center_frequencies_[channel]);
} else {
weights_[i] = (center_frequencies_[0] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[0] - mel_low);
}
}
}
std::vector<int> bad_channels;
for (int c = 0; c < num_channels_; ++c) {
float band_weights_sum = 0.0;
for (int i = 0; i < input_length_; ++i) {
if (band_mapper_[i] == c - 1) {
band_weights_sum += (1.0 - weights_[i]);
} else if (band_mapper_[i] == c) {
band_weights_sum += weights_[i];
}
}
if (band_weights_sum < 0.5) {
bad_channels.push_back(c);
}
}
if (!bad_channels.empty()) {
LOG(ERROR) << "Missing " << bad_channels.size() << " bands "
<< " starting at " << bad_channels[0]
<< " in mel-frequency design. "
<< "Perhaps too many channels or "
<< "not enough frequency resolution in spectrum. ("
<< "input_length: " << input_length
<< " input_sample_rate: " << input_sample_rate
<< " output_channel_count: " << output_channel_count
<< " lower_frequency_limit: " << lower_frequency_limit
<< " upper_frequency_limit: " << upper_frequency_limit;
}
initialized_ = true;
return true;
}
void MfccMelFilterbank::Compute(const std::vector<double> &input,
std::vector<double> *output) const {
if (!initialized_) {
LOG(ERROR) << "Mel Filterbank not initialized.";
return;
}
if (input.size() <= end_index_) {
LOG(ERROR) << "Input too short to compute filterbank";
return;
}
output->assign(num_channels_, 0.0);
for (int i = start_index_; i <= end_index_; i++) {
double spec_val = sqrt(input[i]);
double weighted = spec_val * weights_[i];
int channel = band_mapper_[i];
if (channel >= 0)
(*output)[channel] += weighted;
channel++;
if (channel < num_channels_)
(*output)[channel] += spec_val - weighted;
}
}
double MfccMelFilterbank::FreqToMel(double freq) const {
return 1127.0 * log1p(freq / 700.0);
}
} | #include "tensorflow/core/kernels/mfcc_mel_filterbank.h"
#include <limits>
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(MfccMelFilterbankTest, AgreesWithPythonGoldenValues) {
MfccMelFilterbank filterbank;
std::vector<double> input;
const int kSampleCount = 513;
input.reserve(kSampleCount);
for (int i = 0; i < kSampleCount; ++i) {
input.push_back(i + 1);
}
const int kChannelCount = 20;
filterbank.Initialize(
input.size(), 22050 , kChannelCount ,
20.0 , 4000.0 );
std::vector<double> output;
filterbank.Compute(input, &output);
std::vector<double> expected = {
7.38894574, 10.30330648, 13.72703292, 17.24158686, 21.35253118,
25.77781089, 31.30624108, 37.05877236, 43.9436536, 51.80306637,
60.79867148, 71.14363376, 82.90910141, 96.50069158, 112.08428368,
129.96721968, 150.4277597, 173.74997634, 200.86037462, 231.59802942};
ASSERT_EQ(output.size(), kChannelCount);
for (int i = 0; i < kChannelCount; ++i) {
EXPECT_NEAR(output[i], expected[i], 1e-04);
}
}
TEST(MfccMelFilterbankTest, IgnoresExistingContentOfOutputVector) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::vector<double> input;
std::vector<double> output;
filterbank.Initialize(kSampleCount, 22050 ,
20 , 20.0 ,
4000.0 );
input.assign(kSampleCount, 1.0);
filterbank.Compute(input, &output);
for (const double value : output) {
EXPECT_LE(0.0, value);
}
input.assign(kSampleCount, 0.0);
filterbank.Compute(input, &output);
for (const double value : output) {
EXPECT_EQ(0.0, value);
}
}
TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxIntValue) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::size_t num_channels = std::numeric_limits<int>::max();
bool initialized = filterbank.Initialize(
kSampleCount, 2 , num_channels ,
1.0 , 5.0 );
EXPECT_FALSE(initialized);
}
TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxSize) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::size_t num_channels = std::vector<double>().max_size() + 1;
bool initialized = filterbank.Initialize(
kSampleCount, 2 , num_channels ,
1.0 , 5.0 );
EXPECT_FALSE(initialized);
}
} |
141 | #ifndef QUICHE_COMMON_QUICHE_RANDOM_H_
#define QUICHE_COMMON_QUICHE_RANDOM_H_
#include <cstddef>
#include <cstdint>
#include "quiche/common/platform/api/quiche_export.h"
namespace quiche {
class QUICHE_EXPORT QuicheRandom {
public:
virtual ~QuicheRandom() {}
static QuicheRandom* GetInstance();
virtual void RandBytes(void* data, size_t len) = 0;
virtual uint64_t RandUint64() = 0;
virtual void InsecureRandBytes(void* data, size_t len) = 0;
virtual uint64_t InsecureRandUint64() = 0;
};
}
#endif
#include "quiche/common/quiche_random.h"
#include <cstdint>
#include <cstring>
#include "openssl/rand.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
inline uint64_t Xoshiro256InitializeRngStateMember() {
uint64_t result;
RAND_bytes(reinterpret_cast<uint8_t*>(&result), sizeof(result));
return result;
}
inline uint64_t Xoshiro256PlusPlusRotLeft(uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t Xoshiro256PlusPlus() {
static thread_local uint64_t rng_state[4] = {
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember()};
const uint64_t result =
Xoshiro256PlusPlusRotLeft(rng_state[0] + rng_state[3], 23) + rng_state[0];
const uint64_t t = rng_state[1] << 17;
rng_state[2] ^= rng_state[0];
rng_state[3] ^= rng_state[1];
rng_state[1] ^= rng_state[2];
rng_state[0] ^= rng_state[3];
rng_state[2] ^= t;
rng_state[3] = Xoshiro256PlusPlusRotLeft(rng_state[3], 45);
return result;
}
class DefaultQuicheRandom : public QuicheRandom {
public:
DefaultQuicheRandom() {}
DefaultQuicheRandom(const DefaultQuicheRandom&) = delete;
DefaultQuicheRandom& operator=(const DefaultQuicheRandom&) = delete;
~DefaultQuicheRandom() override {}
void RandBytes(void* data, size_t len) override;
uint64_t RandUint64() override;
void InsecureRandBytes(void* data, size_t len) override;
uint64_t InsecureRandUint64() override;
};
void DefaultQuicheRandom::RandBytes(void* data, size_t len) {
RAND_bytes(reinterpret_cast<uint8_t*>(data), len);
}
uint64_t DefaultQuicheRandom::RandUint64() {
uint64_t value;
RandBytes(&value, sizeof(value));
return value;
}
void DefaultQuicheRandom::InsecureRandBytes(void* data, size_t len) {
while (len >= sizeof(uint64_t)) {
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, sizeof(uint64_t));
data = reinterpret_cast<char*>(data) + sizeof(uint64_t);
len -= sizeof(uint64_t);
}
if (len > 0) {
QUICHE_DCHECK_LT(len, sizeof(uint64_t));
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, len);
}
}
uint64_t DefaultQuicheRandom::InsecureRandUint64() {
return Xoshiro256PlusPlus();
}
}
QuicheRandom* QuicheRandom::GetInstance() {
static DefaultQuicheRandom* random = new DefaultQuicheRandom();
return random;
}
} | #include "quiche/common/quiche_random.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
TEST(QuicheRandom, RandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->RandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, RandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->RandUint64();
uint64_t value2 = rng->RandUint64();
EXPECT_NE(value1, value2);
}
TEST(QuicheRandom, InsecureRandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->InsecureRandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, InsecureRandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->InsecureRandUint64();
uint64_t value2 = rng->InsecureRandUint64();
EXPECT_NE(value1, value2);
}
}
} |
142 | #ifndef QUICHE_QUIC_CORE_CRYPTO_AES_128_GCM_DECRYPTER_H_
#define QUICHE_QUIC_CORE_CRYPTO_AES_128_GCM_DECRYPTER_H_
#include <cstdint>
#include "quiche/quic/core/crypto/aes_base_decrypter.h"
#include "quiche/quic/platform/api/quic_export.h"
namespace quic {
class QUICHE_EXPORT Aes128GcmDecrypter : public AesBaseDecrypter {
public:
enum {
kAuthTagSize = 16,
};
Aes128GcmDecrypter();
Aes128GcmDecrypter(const Aes128GcmDecrypter&) = delete;
Aes128GcmDecrypter& operator=(const Aes128GcmDecrypter&) = delete;
~Aes128GcmDecrypter() override;
uint32_t cipher_id() const override;
};
}
#endif
#include "quiche/quic/core/crypto/aes_128_gcm_decrypter.h"
#include "openssl/aead.h"
#include "openssl/tls1.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
namespace {
const size_t kKeySize = 16;
const size_t kNonceSize = 12;
}
Aes128GcmDecrypter::Aes128GcmDecrypter()
: AesBaseDecrypter(EVP_aead_aes_128_gcm, kKeySize, kAuthTagSize, kNonceSize,
true) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
Aes128GcmDecrypter::~Aes128GcmDecrypter() {}
uint32_t Aes128GcmDecrypter::cipher_id() const {
return TLS1_CK_AES_128_GCM_SHA256;
}
} | #include "quiche/quic/core/crypto/aes_128_gcm_decrypter.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestGroupInfo {
size_t key_len;
size_t iv_len;
size_t pt_len;
size_t aad_len;
size_t tag_len;
};
struct TestVector {
const char* key;
const char* iv;
const char* ct;
const char* aad;
const char* tag;
const char* pt;
};
const TestGroupInfo test_group_info[] = {
{128, 96, 0, 0, 128}, {128, 96, 0, 128, 128}, {128, 96, 128, 0, 128},
{128, 96, 408, 160, 128}, {128, 96, 408, 720, 128}, {128, 96, 104, 0, 128},
};
const TestVector test_group_0[] = {
{"cf063a34d4a9a76c2c86787d3f96db71", "113b9785971864c83b01c787", "", "",
"72ac8493e3a5228b5d130a69d2510e42", ""},
{
"a49a5e26a2f8cb63d05546c2a62f5343", "907763b19b9b4ab6bd4f0281", "", "",
"a2be08210d8c470a8df6e8fbd79ec5cf",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_1[] = {
{
"d1f6af919cde85661208bdce0c27cb22", "898c6929b435017bf031c3c5", "",
"7c5faa40e636bbc91107e68010c92b9f", "ae45f11777540a2caeb128be8092468a",
nullptr
},
{"2370e320d4344208e0ff5683f243b213", "04dbb82f044d30831c441228", "",
"d43a8e5089eea0d026c03a85178b27da", "2a049c049d25aa95969b451d93c31c6e",
""},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_2[] = {
{"e98b72a9881a84ca6b76e0f43e68647a", "8b23299fde174053f3d652ba",
"5a3c1cf1985dbb8bed818036fdd5ab42", "", "23c7ab0f952b7091cd324835043b5eb5",
"28286a321293253c3e0aa2704a278032"},
{"33240636cd3236165f1a553b773e728e", "17c4d61493ecdc8f31700b12",
"47bb7e23f7bdfe05a8091ac90e4f8b2e", "", "b723c70e931d9785f40fd4ab1d612dc9",
"95695a5b12f2870b9cc5fdc8f218a97d"},
{
"5164df856f1e9cac04a79b808dc5be39", "e76925d5355e0584ce871b2b",
"0216c899c88d6e32c958c7e553daa5bc", "",
"a145319896329c96df291f64efbe0e3a",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_3[] = {
{"af57f42c60c0fc5a09adb81ab86ca1c3", "a2dc01871f37025dc0fc9a79",
"b9a535864f48ea7b6b1367914978f9bfa087d854bb0e269bed8d279d2eea1210e48947"
"338b22f9bad09093276a331e9c79c7f4",
"41dc38988945fcb44faf2ef72d0061289ef8efd8",
"4f71e72bde0018f555c5adcce062e005",
"3803a0727eeb0ade441e0ec107161ded2d425ec0d102f21f51bf2cf9947c7ec4aa7279"
"5b2f69b041596e8817d0a3c16f8fadeb"},
{"ebc753e5422b377d3cb64b58ffa41b61", "2e1821efaced9acf1f241c9b",
"069567190554e9ab2b50a4e1fbf9c147340a5025fdbd201929834eaf6532325899ccb9"
"f401823e04b05817243d2142a3589878",
"b9673412fd4f88ba0e920f46dd6438ff791d8eef",
"534d9234d2351cf30e565de47baece0b",
"39077edb35e9c5a4b1e4c2a6b9bb1fce77f00f5023af40333d6d699014c2bcf4209c18"
"353a18017f5b36bfc00b1f6dcb7ed485"},
{
"52bdbbf9cf477f187ec010589cb39d58", "d3be36d3393134951d324b31",
"700188da144fa692cf46e4a8499510a53d90903c967f7f13e8a1bd8151a74adc4fe63e"
"32b992760b3a5f99e9a47838867000a9",
"93c4fc6a4135f54d640b0c976bf755a06a292c33",
"8ca4e38aa3dfa6b1d0297021ccf3ea5f",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_4[] = {
{"da2bb7d581493d692380c77105590201", "44aa3e7856ca279d2eb020c6",
"9290d430c9e89c37f0446dbd620c9a6b34b1274aeb6f911f75867efcf95b6feda69f1a"
"f4ee16c761b3c9aeac3da03aa9889c88",
"4cd171b23bddb3a53cdf959d5c1710b481eb3785a90eb20a2345ee00d0bb7868c367ab"
"12e6f4dd1dee72af4eee1d197777d1d6499cc541f34edbf45cda6ef90b3c024f9272d7"
"2ec1909fb8fba7db88a4d6f7d3d925980f9f9f72",
"9e3ac938d3eb0cadd6f5c9e35d22ba38",
"9bbf4c1a2742f6ac80cb4e8a052e4a8f4f07c43602361355b717381edf9fabd4cb7e3a"
"d65dbd1378b196ac270588dd0621f642"},
{"d74e4958717a9d5c0e235b76a926cae8", "0b7471141e0c70b1995fd7b1",
"e701c57d2330bf066f9ff8cf3ca4343cafe4894651cd199bdaaa681ba486b4a65c5a22"
"b0f1420be29ea547d42c713bc6af66aa",
"4a42b7aae8c245c6f1598a395316e4b8484dbd6e64648d5e302021b1d3fa0a38f46e22"
"bd9c8080b863dc0016482538a8562a4bd0ba84edbe2697c76fd039527ac179ec5506cf"
"34a6039312774cedebf4961f3978b14a26509f96",
"e192c23cb036f0b31592989119eed55d",
"840d9fb95e32559fb3602e48590280a172ca36d9b49ab69510f5bd552bfab7a306f85f"
"f0a34bc305b88b804c60b90add594a17"},
{
"1986310c725ac94ecfe6422e75fc3ee7", "93ec4214fa8e6dc4e3afc775",
"b178ec72f85a311ac4168f42a4b2c23113fbea4b85f4b9dabb74e143eb1b8b0a361e02"
"43edfd365b90d5b325950df0ada058f9",
"e80b88e62c49c958b5e0b8b54f532d9ff6aa84c8a40132e93e55b59fc24e8decf28463"
"139f155d1e8ce4ee76aaeefcd245baa0fc519f83a5fb9ad9aa40c4b21126013f576c42"
"72c2cb136c8fd091cc4539877a5d1e72d607f960",
"8b347853f11d75e81e8a95010be81f17",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_5[] = {
{"387218b246c1a8257748b56980e50c94", "dd7e014198672be39f95b69d",
"cdba9e73eaf3d38eceb2b04a8d", "", "ecf90f4a47c9c626d6fb2c765d201556",
"48f5b426baca03064554cc2b30"},
{"294de463721e359863887c820524b3d4", "3338b35c9d57a5d28190e8c9",
"2f46634e74b8e4c89812ac83b9", "", "dabd506764e68b82a7e720aa18da0abe",
"46a2e55c8e264df211bd112685"},
{"28ead7fd2179e0d12aa6d5d88c58c2dc", "5055347f18b4d5add0ae5c41",
"142d8210c3fb84774cdbd0447a", "", "5fd321d9cdb01952dc85f034736c2a7d",
"3b95b981086ee73cc4d0cc1422"},
{
"7d7b6c988137b8d470c57bf674a09c87", "9edf2aa970d016ac962e1fd8",
"a85b66c3cb5eab91d5bdc8bc0e", "", "dc054efc01f3afd21d9c2484819f569a",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector* const test_group_array[] = {
test_group_0, test_group_1, test_group_2,
test_group_3, test_group_4, test_group_5,
};
}
namespace quic {
namespace test {
QuicData* DecryptWithNonce(Aes128GcmDecrypter* decrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view ciphertext) {
decrypter->SetIV(nonce);
std::unique_ptr<char[]> output(new char[ciphertext.length()]);
size_t output_length = 0;
const bool success =
decrypter->DecryptPacket(0, associated_data, ciphertext, output.get(),
&output_length, ciphertext.length());
if (!success) {
return nullptr;
}
return new QuicData(output.release(), output_length, true);
}
class Aes128GcmDecrypterTest : public QuicTest {};
TEST_F(Aes128GcmDecrypterTest, Decrypt) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(test_group_array); i++) {
SCOPED_TRACE(i);
const TestVector* test_vectors = test_group_array[i];
const TestGroupInfo& test_info = test_group_info[i];
for (size_t j = 0; test_vectors[j].key != nullptr; j++) {
bool has_pt = test_vectors[j].pt;
std::string key;
std::string iv;
std::string ct;
std::string aad;
std::string tag;
std::string pt;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].ct, &ct));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].tag, &tag));
if (has_pt) {
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].pt, &pt));
}
EXPECT_EQ(test_info.key_len, key.length() * 8);
EXPECT_EQ(test_info.iv_len, iv.length() * 8);
EXPECT_EQ(test_info.pt_len, ct.length() * 8);
EXPECT_EQ(test_info.aad_len, aad.length() * 8);
EXPECT_EQ(test_info.tag_len, tag.length() * 8);
if (has_pt) {
EXPECT_EQ(test_info.pt_len, pt.length() * 8);
}
std::string ciphertext = ct + tag;
Aes128GcmDecrypter decrypter;
ASSERT_TRUE(decrypter.SetKey(key));
std::unique_ptr<QuicData> decrypted(DecryptWithNonce(
&decrypter, iv,
aad.length() ? aad : absl::string_view(), ciphertext));
if (!decrypted) {
EXPECT_FALSE(has_pt);
continue;
}
EXPECT_TRUE(has_pt);
ASSERT_EQ(pt.length(), decrypted->length());
quiche::test::CompareCharArraysWithHexError(
"plaintext", decrypted->data(), pt.length(), pt.data(), pt.length());
}
}
}
TEST_F(Aes128GcmDecrypterTest, GenerateHeaderProtectionMask) {
Aes128GcmDecrypter decrypter;
std::string key;
std::string sample;
std::string expected_mask;
ASSERT_TRUE(absl::HexStringToBytes("d9132370cb18476ab833649cf080d970", &key));
ASSERT_TRUE(
absl::HexStringToBytes("d1d7998068517adb769b48b924a32c47", &sample));
ASSERT_TRUE(absl::HexStringToBytes("b132c37d6164da4ea4dc9b763aceec27",
&expected_mask));
QuicDataReader sample_reader(sample.data(), sample.size());
ASSERT_TRUE(decrypter.SetHeaderProtectionKey(key));
std::string mask = decrypter.GenerateHeaderProtectionMask(&sample_reader);
quiche::test::CompareCharArraysWithHexError(
"header protection mask", mask.data(), mask.size(), expected_mask.data(),
expected_mask.size());
}
}
} |
143 | #ifndef AROLLA_SERIALIZATION_CONTAINER_PROTO_H_
#define AROLLA_SERIALIZATION_CONTAINER_PROTO_H_
#include <cstdint>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/serialization_base/base.pb.h"
#include "arolla/serialization_base/container.h"
namespace arolla::serialization_base {
class ContainerProtoBuilder final : public ContainerBuilder {
public:
static constexpr int kContainerProtoVersion = 1;
absl::StatusOr<uint64_t> Add(DecodingStepProto&& decoding_step_proto) final;
ContainerProto Finish() &&;
private:
ContainerProto result_;
};
absl::Status ProcessContainerProto(const ContainerProto& container_proto,
ContainerProcessor& container_processor);
}
#endif
#include "arolla/serialization_base/container_proto.h"
#include <cstdint>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "arolla/serialization_base/base.pb.h"
#include "arolla/serialization_base/container.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::serialization_base {
absl::StatusOr<uint64_t> ContainerProtoBuilder::Add(
DecodingStepProto&& decoding_step_proto) {
switch (decoding_step_proto.type_case()) {
case DecodingStepProto::kCodec:
*result_.add_codecs() = std::move(*decoding_step_proto.mutable_codec());
return result_.codecs_size() - 1;
case DecodingStepProto::kOutputValueIndex:
result_.add_output_value_indices(
decoding_step_proto.output_value_index());
return result_.output_value_indices_size() - 1;
case DecodingStepProto::kOutputExprIndex:
result_.add_output_expr_indices(decoding_step_proto.output_expr_index());
return result_.output_expr_indices_size() - 1;
default:
*result_.add_decoding_steps() = std::move(decoding_step_proto);
return result_.decoding_steps_size() - 1;
}
}
ContainerProto ContainerProtoBuilder::Finish() && {
result_.set_version(kContainerProtoVersion);
return std::move(result_);
}
absl::Status ProcessContainerProto(const ContainerProto& container_proto,
ContainerProcessor& container_processor) {
constexpr int kContainerProtoOldVersion = 1;
constexpr int kContainerProtoNewVersion = 2;
if (!container_proto.has_version()) {
return absl::InvalidArgumentError("missing container.version");
}
if (container_proto.version() != kContainerProtoOldVersion &&
container_proto.version() != kContainerProtoNewVersion) {
return absl::InvalidArgumentError(
absl::StrFormat("expected container.version to be %d or %d, got %d",
kContainerProtoOldVersion, kContainerProtoNewVersion,
container_proto.version()));
}
DecodingStepProto decoding_step;
for (int codec_index = 0; codec_index < container_proto.codecs_size();
++codec_index) {
*decoding_step.mutable_codec() = container_proto.codecs(codec_index);
RETURN_IF_ERROR(
container_processor.OnDecodingStep(codec_index, decoding_step))
<< "while handling codecs[" << codec_index << "]";
}
for (int decoding_step_index = 0;
decoding_step_index < container_proto.decoding_steps_size();
++decoding_step_index) {
RETURN_IF_ERROR(container_processor.OnDecodingStep(
decoding_step_index,
container_proto.decoding_steps(decoding_step_index)))
<< "while handling decoding_steps[" << decoding_step_index << "]";
}
for (int i = 0; i < container_proto.output_value_indices_size(); ++i) {
decoding_step.set_output_value_index(
container_proto.output_value_indices(i));
RETURN_IF_ERROR(container_processor.OnDecodingStep(0, decoding_step))
<< "while handling output_value_indices[" << i << "]";
}
for (int i = 0; i < container_proto.output_expr_indices_size(); ++i) {
decoding_step.set_output_expr_index(container_proto.output_expr_indices(i));
RETURN_IF_ERROR(container_processor.OnDecodingStep(0, decoding_step))
<< "while handling output_expr_indices[" << i << "]";
}
return absl::OkStatus();
}
} | #include "arolla/serialization_base/container_proto.h"
#include <cstdint>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "arolla/serialization_base/base.pb.h"
#include "arolla/serialization_base/container.h"
#include "arolla/util/testing/equals_proto.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::serialization_base {
namespace {
using ::arolla::testing::EqualsProto;
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::InSequence;
using ::testing::Return;
TEST(ContainerProtoBuilderTest, TrivialBehaviour) {
ContainerProtoBuilder container_builder;
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.mutable_codec()->set_name("codec1");
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(0));
}
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.mutable_leaf_node()->set_leaf_key("key1");
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(0));
}
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.set_output_expr_index(0);
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(0));
}
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.mutable_codec()->set_name("codec2");
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(1));
}
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.mutable_placeholder_node()->set_placeholder_key("key2");
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(1));
}
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.mutable_value();
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(2));
}
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.set_output_expr_index(1);
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(1));
}
{
DecodingStepProto decoding_step_proto;
decoding_step_proto.set_output_value_index(2);
ASSERT_THAT(container_builder.Add(std::move(decoding_step_proto)),
IsOkAndHolds(0));
}
EXPECT_TRUE(EqualsProto(
std::move(container_builder).Finish(),
R"pb(
version: 1
codecs { name: "codec1" }
codecs { name: "codec2" }
decoding_steps { leaf_node { leaf_key: "key1" } }
decoding_steps { placeholder_node { placeholder_key: "key2" } }
decoding_steps { value {} }
output_value_indices: [ 2 ]
output_expr_indices: [ 0, 1 ]
)pb"));
}
class MockContainerProcessor : public ContainerProcessor {
public:
MOCK_METHOD(absl::Status, OnDecodingStep,
(uint64_t, const DecodingStepProto& decoding_step_proto),
(override));
};
TEST(ProcessContainerProto, TrivialBehaviour) {
ContainerProto container_proto;
container_proto.set_version(1);
container_proto.add_codecs()->set_name("codec1");
container_proto.add_codecs()->set_name("codec2");
container_proto.add_decoding_steps()->mutable_leaf_node()->set_leaf_key(
"key1");
container_proto.add_decoding_steps()
->mutable_placeholder_node()
->set_placeholder_key("key2");
container_proto.add_decoding_steps()->mutable_value();
container_proto.add_output_value_indices(2);
container_proto.add_output_expr_indices(0);
container_proto.add_output_expr_indices(1);
MockContainerProcessor mock_container_processor;
{
InSequence seq;
EXPECT_CALL(
mock_container_processor,
OnDecodingStep(0, EqualsProto(R"pb(codec: { name: "codec1" })pb")));
EXPECT_CALL(
mock_container_processor,
OnDecodingStep(1, EqualsProto(R"pb(codec: { name: "codec2" })pb")));
EXPECT_CALL(mock_container_processor,
OnDecodingStep(
0, EqualsProto(R"pb(leaf_node: { leaf_key: "key1" })pb")));
EXPECT_CALL(mock_container_processor,
OnDecodingStep(1, EqualsProto(R"pb(placeholder_node: {
placeholder_key: "key2"
})pb")));
EXPECT_CALL(mock_container_processor,
OnDecodingStep(2, EqualsProto(R"pb(value: {})pb")));
EXPECT_CALL(mock_container_processor,
OnDecodingStep(0, EqualsProto(R"pb(output_value_index: 2)pb")));
EXPECT_CALL(mock_container_processor,
OnDecodingStep(0, EqualsProto(R"pb(output_expr_index: 0)pb")));
EXPECT_CALL(mock_container_processor,
OnDecodingStep(0, EqualsProto(R"pb(output_expr_index: 1)pb")));
}
EXPECT_OK(ProcessContainerProto(container_proto, mock_container_processor));
}
TEST(ProcessContainerProto, MissingContainerVersion) {
ContainerProto container_proto;
MockContainerProcessor mock_container_processor;
EXPECT_THAT(ProcessContainerProto(container_proto, mock_container_processor),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("missing container.version")));
}
TEST(ProcessContainerProto, WrongContainerVersion) {
ContainerProto container_proto;
container_proto.set_version(100);
MockContainerProcessor mock_container_processor;
EXPECT_THAT(
ProcessContainerProto(container_proto, mock_container_processor),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected container.version to be 1 or 2, got 100")));
}
TEST(ProcessContainerProto, ProcessorFailureOnCodec) {
ContainerProto container_proto;
container_proto.set_version(1);
container_proto.add_codecs()->set_name("codec1");
container_proto.add_codecs()->set_name("codec2");
MockContainerProcessor mock_container_processor;
{
InSequence seq;
EXPECT_CALL(
mock_container_processor,
OnDecodingStep(0, EqualsProto(R"pb(codec: { name: "codec1" })pb")));
EXPECT_CALL(
mock_container_processor,
OnDecodingStep(1, EqualsProto(R"pb(codec: { name: "codec2" })pb")))
.WillOnce(Return(absl::FailedPreconditionError("stop")));
}
EXPECT_THAT(ProcessContainerProto(container_proto, mock_container_processor),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("stop; while handling codecs[1]")));
}
TEST(ProcessContainerProto, ProcessorFailureOnDecodingStep) {
ContainerProto container_proto;
container_proto.set_version(1);
container_proto.add_decoding_steps()->mutable_leaf_node()->set_leaf_key(
"key1");
container_proto.add_decoding_steps()->mutable_value();
MockContainerProcessor mock_container_processor;
{
InSequence seq;
EXPECT_CALL(mock_container_processor,
OnDecodingStep(
0, EqualsProto(R"pb(leaf_node: { leaf_key: "key1" })pb")));
EXPECT_CALL(mock_container_processor,
OnDecodingStep(1, EqualsProto(R"pb(value {})pb")))
.WillOnce(Return(absl::FailedPreconditionError("stop")));
}
EXPECT_THAT(ProcessContainerProto(container_proto, mock_container_processor),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("stop; while handling decoding_steps[1]")));
}
TEST(ProcessContainerProto, ProcessorFailureOnOutputValueIndex) {
ContainerProto container_proto;
container_proto.set_version(1);
container_proto.add_output_value_indices(1);
MockContainerProcessor mock_container_processor;
EXPECT_CALL(mock_container_processor,
OnDecodingStep(0, EqualsProto(R"pb(output_value_index: 1)pb")))
.WillOnce(Return(absl::FailedPreconditionError("stop")));
EXPECT_THAT(
ProcessContainerProto(container_proto, mock_container_processor),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("stop; while handling output_value_indices[0]")));
}
TEST(ProcessContainerProto, ProcessorFailureOnOutputExprIndex) {
ContainerProto container_proto;
container_proto.set_version(1);
container_proto.add_output_expr_indices(2);
MockContainerProcessor mock_container_processor;
EXPECT_CALL(mock_container_processor,
OnDecodingStep(0, EqualsProto(R"pb(output_expr_index: 2)pb")))
.WillOnce(Return(absl::FailedPreconditionError("stop")));
EXPECT_THAT(
ProcessContainerProto(container_proto, mock_container_processor),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("stop; while handling output_expr_indices[0]")));
}
}
} |
144 | #ifndef XLA_SERVICE_COLLECTIVES_SCHEDULE_LINEARIZER_H_
#define XLA_SERVICE_COLLECTIVES_SCHEDULE_LINEARIZER_H_
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/util.h"
namespace xla {
class CollectivesScheduleLinearizer : public HloModulePass {
public:
explicit CollectivesScheduleLinearizer(HloModulePredicate is_enabled = {})
: is_enabled_(is_enabled) {}
absl::string_view name() const override {
return "collectives-schedule-linearizer";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
HloModulePredicate is_enabled_;
};
}
#endif
#include "xla/service/collectives_schedule_linearizer.h"
#include <algorithm>
#include <list>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> CollectivesScheduleLinearizer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (is_enabled_ && !is_enabled_(module)) {
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
std::unique_ptr<HloReachabilityMap> reachability;
HloInstruction* prev_done = nullptr;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
auto* next = DynCast<HloCollectiveInstruction>(inst);
if (!next) {
continue;
}
if (!reachability) {
reachability = HloReachabilityMap::Build(computation);
}
HloInstruction* start = next;
HloInstruction* done = next;
switch (next->opcode()) {
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllGatherStart:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kAsyncStart:
CHECK_EQ(start->user_count(), 1);
done = start->users()[0];
break;
default:
break;
}
if (prev_done && !reachability->IsConnected(start, prev_done)) {
TF_RETURN_IF_ERROR(prev_done->AddControlDependencyTo(next));
VLOG(1) << "Adding control dependency from " << prev_done->ToString()
<< " to " << start->ToString();
changed = true;
}
prev_done = done;
}
}
return changed;
}
} | #include "xla/service/collectives_schedule_linearizer.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace m = match;
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
class CollectivesScheduleLinearizerTest : public HloTestBase {
protected:
void InsertCollectivesSchedule(HloModule* module) {
CollectivesScheduleLinearizer collectives_schedule_linearizer;
ASSERT_IS_OK(collectives_schedule_linearizer.Run(module).status());
}
};
TEST_F(CollectivesScheduleLinearizerTest, FixOrdering) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
HloInstruction *c1 = nullptr, *c2 = nullptr;
for (HloInstruction* instr : module->entry_computation()->instructions()) {
if (Match(instr, m::AllReduce(m::Parameter(0)))) {
c1 = instr;
}
if (Match(instr, m::AllReduce(m::Parameter(1)))) {
c2 = instr;
}
}
EXPECT_TRUE(c1 != nullptr && c2 != nullptr);
EXPECT_TRUE(absl::c_linear_search(c2->control_predecessors(), c1));
}
TEST_F(CollectivesScheduleLinearizerTest, NoFixRequired) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum, control-predecessors={c1}
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
}
TEST_F(CollectivesScheduleLinearizerTest, DependentCollectives) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(c1), replica_groups={}, to_apply=sum
ROOT out = f32[100] add(c1, c2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 0);
}
TEST_F(CollectivesScheduleLinearizerTest, NonPostorder) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
c1 = f32[100] all-reduce(p0), replica_groups={}, to_apply=sum
c2 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
c3 = f32[100] all-reduce(p1), replica_groups={}, to_apply=sum
t = f32[100] add(c1, c2)
ROOT out = f32[100] add(t, c3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(
module->entry_computation()
->GetInstructionWithName("c3")
->AddControlDependencyTo(
module->entry_computation()->GetInstructionWithName("c1")));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 2);
}
TEST_F(CollectivesScheduleLinearizerTest, AsyncOrdering) {
absl::string_view hlo_string = R"(
HloModule module
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = f32[] add(a, b)
}
ENTRY entry {
p0 = f32[100] parameter(0), parameter_replication={false}
p1 = f32[100] parameter(1), parameter_replication={false}
ars0 = f32[100] all-reduce-start(p0), replica_groups={}, to_apply=sum
ard0 = f32[100] all-reduce-done(ars0)
ars1 = f32[100] all-reduce-start(p1), replica_groups={}, to_apply=sum
ard1 = f32[100] all-reduce-done(ars1)
ROOT out = f32[100] add(ard0, ard1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCollectivesSchedule(module.get());
EXPECT_EQ(CountControlEdges(*module->entry_computation()), 1);
const HloInstruction *root = module->entry_computation()->root_instruction();
const HloInstruction *ard0 = root->operand(0);
const HloInstruction *ard1 = root->operand(1);
EXPECT_EQ(ard0->opcode(), HloOpcode::kAllReduceDone);
EXPECT_EQ(ard1->opcode(), HloOpcode::kAllReduceDone);
const HloInstruction *ars1 = ard1->operand(0);
EXPECT_EQ(ars1->opcode(), HloOpcode::kAllReduceStart);
EXPECT_TRUE(absl::c_linear_search(ars1->control_predecessors(), ard0));
}
}
} |
145 | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_FUNC_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_COMMON_FUNC_H_
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
namespace mlir::quant {
func::FuncOp FindMainFuncOp(ModuleOp module_op);
}
#endif
#include <dlfcn.h>
#include <tuple>
#include <type_traits>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/dialects/util.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/registration.h"
namespace mlir {
namespace interpreter {
namespace {
template <typename T>
bool TypeMatches(mlir::Type type) {
if constexpr (std::is_same_v<T, float>) {
return type.isF32();
} else if constexpr (std::is_same_v<T, double>) {
return type.isF64();
} else {
return false;
}
}
template <typename Dummy>
bool TypesMatch(ArrayRef<mlir::Type> types) {
return types.empty();
}
template <typename Dummy, typename T, typename... R>
bool TypesMatch(ArrayRef<mlir::Type> types) {
if (types.empty() || !TypeMatches<T>(types.front())) return false;
return TypesMatch<Dummy, R...>(types.drop_front());
}
template <int n, typename... Args>
using Arg = std::tuple_element_t<n, std::tuple<Args...>>;
template <typename Ret, typename... Args>
bool TryCall(void* sym, func::FuncOp callee,
MutableArrayRef<InterpreterValue> args, InterpreterValue& ret) {
if (args.size() != callee.getNumArguments() || callee.getNumResults() != 1) {
return false;
}
if (!TypeMatches<Ret>(callee.getResultTypes()[0])) {
return false;
}
if (!TypesMatch<void, Args...>(callee.getArgumentTypes())) {
return false;
}
static_assert(sizeof...(Args) <= 2);
using FnType = Ret (*)(Args...);
auto fn = reinterpret_cast<FnType>(sym);
constexpr int n = sizeof...(Args);
if constexpr (n == 1) {
ret = {fn(std::get<Arg<0, Args...>>(args[0].storage))};
} else {
static_assert(n == 2);
ret = {fn(std::get<Arg<0, Args...>>(args[0].storage),
std::get<Arg<1, Args...>>(args[1].storage))};
}
return true;
}
llvm::SmallVector<InterpreterValue> Call(MutableArrayRef<InterpreterValue> args,
mlir::Operation* op,
InterpreterState& state) {
auto call = llvm::cast<func::CallOp>(op);
auto callee =
llvm::cast<func::FuncOp>(state.GetSymbols().lookup(call.getCallee()));
if (callee->getRegion(0).hasOneBlock()) {
return Interpret(state, callee.getRegion(), args);
}
void* sym = dlsym(RTLD_DEFAULT, callee.getSymName().str().c_str());
if (sym == nullptr) {
state.AddFailure("callee not found");
return {};
}
InterpreterValue result;
if (TryCall<float, float>(sym, callee, args, result) ||
TryCall<float, float, float>(sym, callee, args, result) ||
TryCall<double, double>(sym, callee, args, result) ||
TryCall<double, double, double>(sym, callee, args, result)) {
return {result};
}
state.AddFailure("unsupported call target");
return {};
}
REGISTER_MLIR_INTERPRETER_OP("func.call", Call);
REGISTER_MLIR_INTERPRETER_OP("func.return", NoOpTerminator);
}
}
} | #include "tensorflow/compiler/mlir/quantization/common/func.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
namespace mlir::quant {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
using FindMainFuncOpTest = ::mlir::quant::QuantizationTestBase;
TEST_F(FindMainFuncOpTest, ReturnsMainFuncOp) {
constexpr absl::string_view kModuleWithMainFunc = R"mlir(
module {
func.func @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithMainFunc);
EXPECT_THAT(*module_op, NotNull());
func::FuncOp main_func_op = FindMainFuncOp(*module_op);
EXPECT_THAT(main_func_op, NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateMainFunc = R"mlir(
module {
func.func private @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsServingDefaultFuncOp) {
constexpr absl::string_view kModuleWithServingDefaultFunc = R"mlir(
module {
func.func @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenServingDefaultFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateServingDefaultFunc = R"mlir(
module {
func.func private @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncNotFound) {
constexpr absl::string_view kModuleWithNoMainFunc = R"mlir(
module {
func.func @foo() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithNoMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
}
} |
146 | #ifndef XLA_PJRT_PJRT_FUTURE_H_
#define XLA_PJRT_PJRT_FUTURE_H_
#include <algorithm>
#include <atomic>
#include <cstdint>
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/logging.h"
namespace xla {
template <class T = void>
class PjRtFuture;
namespace internal {
template <class T, bool unique>
class PjRtFutureBase;
}
PjRtFuture<> JoinFutures(absl::Span<const PjRtFuture<>> futures);
class ScopedAsyncTrackingEvent {
public:
virtual ~ScopedAsyncTrackingEvent() = default;
private:
template <class T, bool unique>
friend class internal::PjRtFutureBase;
virtual void AddDependency(tsl::RCReference<tsl::AsyncValue> dependency) = 0;
};
struct PjRtFutureHelpers {
public:
struct ProfilingKeys {
uint64_t traceme_context_id = -1;
};
using OnBlockStartFn = std::function<ProfilingKeys()>;
using OnBlockEndFn = std::function<void(ProfilingKeys)>;
};
namespace internal {
template <typename T>
struct IsStatusOr : public std::false_type {};
template <typename T>
struct IsStatusOr<absl::StatusOr<T>> : public std::true_type {};
template <bool unique>
class PjRtFutureMoveControl;
template <>
class PjRtFutureMoveControl<true> {
protected:
PjRtFutureMoveControl() = default;
PjRtFutureMoveControl(const PjRtFutureMoveControl&) = delete;
PjRtFutureMoveControl& operator=(const PjRtFutureMoveControl&) = delete;
PjRtFutureMoveControl(PjRtFutureMoveControl&&) = default;
PjRtFutureMoveControl& operator=(PjRtFutureMoveControl&&) = default;
};
template <>
class PjRtFutureMoveControl<false> {
protected:
PjRtFutureMoveControl() = default;
PjRtFutureMoveControl(const PjRtFutureMoveControl&) = default;
PjRtFutureMoveControl& operator=(const PjRtFutureMoveControl&) = default;
PjRtFutureMoveControl(PjRtFutureMoveControl&&) = default;
PjRtFutureMoveControl& operator=(PjRtFutureMoveControl&&) = default;
};
template <typename T, bool unique = !std::is_copy_constructible_v<T>>
class PjRtFutureBase : public PjRtFutureMoveControl<unique> {
protected:
PjRtFutureBase(tsl::AsyncValueRef<T> promise,
PjRtFutureHelpers::OnBlockStartFn on_block_start,
PjRtFutureHelpers::OnBlockEndFn on_block_end)
: promise_(std::move(promise)),
on_block_start_(std::move(on_block_start)),
on_block_end_(std::move(on_block_end)) {}
public:
PjRtFutureBase() = default;
explicit PjRtFutureBase(
T t, PjRtFutureHelpers::OnBlockStartFn on_block_start = nullptr,
PjRtFutureHelpers::OnBlockEndFn on_block_end = nullptr)
: PjRtFutureBase(tsl::MakeAvailableAsyncValueRef<T>(std::move(t)),
std::move(on_block_start), std::move(on_block_end)) {}
bool IsValid() const { return promise_ != nullptr; }
bool IsReady() {
CHECK(IsValid());
return promise_.IsAvailable();
}
bool IsKnownReady() {
CHECK(IsValid());
return promise_.IsAvailable();
}
void AssertHappensBefore(ScopedAsyncTrackingEvent* event) {
CHECK(IsValid());
if (event) event->AddDependency(promise_.CopyRCRef());
}
protected:
static constexpr bool is_unique() { return unique; }
class Promise {
public:
Promise() = default;
Promise(Promise&& other) = default;
Promise& operator=(Promise&& other) = default;
Promise(const Promise& other) = default;
Promise& operator=(const Promise& other) = default;
operator bool() const { return static_cast<bool>(promise_); }
protected:
explicit Promise(tsl::AsyncValueRef<T> promise)
: promise_(std::move(promise)) {}
template <typename... Args>
void emplace(Args&&... args) const {
DCHECK(promise_) << "Promise must wrap an async value";
promise_.template emplace<T>(std::forward<Args>(args)...);
}
tsl::AsyncValueRef<T> release() { return std::move(promise_); }
tsl::AsyncValue* async_value() const { return promise_.GetAsyncValue(); }
#ifndef NDEBUG
int64_t AddFuture() { return num_futures_->fetch_add(1); }
#endif
private:
tsl::AsyncValueRef<T> promise_;
#ifndef NDEBUG
std::shared_ptr<std::atomic<int64_t>> num_futures_ =
std::make_shared<std::atomic<int64_t>>(0);
#endif
};
PjRtFutureHelpers::ProfilingKeys OnBlockStart() const {
return on_block_start_ ? on_block_start_()
: PjRtFutureHelpers::ProfilingKeys();
}
void OnBlockEnd(PjRtFutureHelpers::ProfilingKeys keys) const {
if (on_block_end_) on_block_end_(std::move(keys));
}
void BlockUntilReady() const {
CHECK(IsValid());
if (!promise_.IsAvailable()) {
PjRtFutureHelpers::ProfilingKeys keys = OnBlockStart();
tsl::BlockUntilReady(promise_);
OnBlockEnd(std::move(keys));
}
DCHECK(promise_.IsConcrete());
}
const T& Await() const& {
BlockUntilReady();
return *promise_;
}
std::conditional_t<unique, T, const T&> Await() && {
BlockUntilReady();
if constexpr (unique) {
return std::move(*promise_);
} else {
return *promise_;
}
}
template <typename F, std::enable_if_t<std::is_invocable_v<F, const T&> &&
!unique>* = nullptr>
void OnReady(F&& f) const& {
CHECK(IsValid());
promise_.AndThen(
[promise = promise_.AsPtr(), f = std::forward<F>(f)]() mutable {
DCHECK(promise.IsConcrete());
f(*promise);
});
}
template <
typename F,
std::enable_if_t<unique ? std::is_invocable_v<F, T>
: std::is_invocable_v<F, const T&>>* = nullptr>
void OnReady(F&& f) && {
CHECK(IsValid());
promise_.AndThen(
[promise = promise_.AsPtr(), f = std::forward<F>(f)]() mutable {
DCHECK(promise.IsConcrete());
if constexpr (unique) {
f(std::move(*promise));
} else {
f(*promise);
}
});
}
private:
tsl::AsyncValueRef<T> promise_;
PjRtFutureHelpers::OnBlockStartFn on_block_start_;
PjRtFutureHelpers::OnBlockEndFn on_block_end_;
};
}
template <class T>
class PjRtFuture : public internal::PjRtFutureBase<absl::StatusOr<T>> {
using Base = internal::PjRtFutureBase<absl::StatusOr<T>>;
static_assert(!std::is_same_v<T, absl::Status>,
"Use PjRtFuture<> specialization for stateless futures");
static_assert(
!internal::IsStatusOr<T>::value,
"PjRtFuture<T> already has an implicit absl::StatusOr<T> semantics");
public:
class Promise : public Base::Promise {
public:
using Base::Promise::Promise;
void Set(absl::StatusOr<T> value) {
Base::Promise::emplace(std::move(value));
}
private:
friend class PjRtFuture<T>;
};
static Promise CreatePromise() {
return Promise(tsl::MakeUnconstructedAsyncValueRef<absl::StatusOr<T>>());
}
using Base::Base;
explicit PjRtFuture(
Promise promise,
PjRtFutureHelpers::OnBlockStartFn on_block_start = nullptr,
PjRtFutureHelpers::OnBlockEndFn on_block_end = nullptr)
: Base(promise.release(), std::move(on_block_start),
std::move(on_block_end)) {
#ifndef NDEBUG
if constexpr (Base::is_unique()) {
DCHECK_EQ(promise.AddFuture(), 0)
<< "Unique PjRtFuture cannot share a promise object";
}
#endif
}
using Base::Await;
using Base::OnReady;
};
template <>
class PjRtFuture<void> : public internal::PjRtFutureBase<absl::Status> {
using Base = internal::PjRtFutureBase<absl::Status>;
public:
class Promise : public Base::Promise {
public:
using Base::Promise::async_value;
using Base::Promise::Promise;
void Set(absl::Status status = absl::OkStatus()) {
Base::Promise::emplace(std::move(status));
}
private:
friend class PjRtFuture<void>;
};
static Promise CreatePromise() {
return Promise(tsl::MakeUnconstructedAsyncValueRef<absl::Status>());
}
using Base::Base;
explicit PjRtFuture(
Promise promise,
PjRtFutureHelpers::OnBlockStartFn on_block_start = nullptr,
PjRtFutureHelpers::OnBlockEndFn on_block_end = nullptr)
: Base(promise.release(), std::move(on_block_start),
std::move(on_block_end)) {}
using Base::Await;
using Base::OnReady;
};
}
#endif
#include "xla/pjrt/pjrt_future.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
struct State {
explicit State(int32_t size)
: pending_count(size), promise(PjRtFuture<>::CreatePromise()) {}
std::atomic<int32_t> pending_count;
PjRtFuture<>::Promise promise;
absl::Mutex mu;
absl::Status status ABSL_GUARDED_BY(&mu);
};
}
PjRtFuture<> JoinFutures(absl::Span<const PjRtFuture<>> futures) {
if (futures.empty()) {
return PjRtFuture<>(absl::OkStatus());
} else if (futures.size() == 1) {
return futures.front();
}
auto state = std::make_shared<State>(futures.size());
for (const PjRtFuture<>& future : futures) {
future.OnReady([state](absl::Status status) {
if (!status.ok()) {
absl::MutexLock lock(&state->mu);
state->status.Update(status);
}
const int pending_count =
state->pending_count.fetch_sub(1, std::memory_order_acq_rel);
CHECK_GE(pending_count, 1) << "Pending count can't drop below 0";
if (pending_count == 1) {
absl::MutexLock lock(&state->mu);
state->promise.Set(std::move(state->status));
}
});
}
return PjRtFuture<>(state->promise);
}
} | #include "xla/pjrt/pjrt_future.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
TEST(PjRtFutureTest, StatelessFuture) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set();
EXPECT_TRUE(future.IsReady());
EXPECT_EQ(future.Await(), absl::OkStatus());
future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
}
TEST(PjRtFutureTest, CopyableFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
PjRtFuture<int32_t> copy_constructed(future);
PjRtFuture<int32_t> copy_assigned = future;
EXPECT_FALSE(copy_constructed.IsReady());
EXPECT_FALSE(copy_assigned.IsReady());
promise.Set(42);
EXPECT_TRUE(copy_constructed.IsReady());
EXPECT_TRUE(copy_assigned.IsReady());
}
TEST(PjRtFutureTest, MoveConstructedFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
PjRtFuture<std::unique_ptr<int32_t>> move_constructed(std::move(future));
EXPECT_FALSE(move_constructed.IsReady());
promise.Set(std::make_unique<int32_t>(42));
EXPECT_TRUE(move_constructed.IsReady());
}
TEST(PjRtFutureTest, MoveAssignedFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
PjRtFuture<std::unique_ptr<int32_t>> move_assigned = std::move(future);
EXPECT_FALSE(move_assigned.IsReady());
promise.Set(std::make_unique<int32_t>(42));
EXPECT_TRUE(move_assigned.IsReady());
}
TEST(PjRtFutureTest, AwaitMoveOnlyFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
promise.Set(std::make_unique<int32_t>(42));
EXPECT_EQ(**future.Await(), 42);
EXPECT_EQ(**std::move(future).Await(), 42);
}
TEST(PjRtFutureTest, OnReadyRvalueFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
promise.Set(42);
std::move(future).OnReady(
[](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, OnReadyMoveOnlyFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
promise.Set(std::make_unique<int32_t>(42));
std::move(future).OnReady([](absl::StatusOr<std::unique_ptr<int32_t>> value) {
EXPECT_EQ(**value, 42);
});
}
TEST(PjRtFutureTest, StatelessError) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(absl::InternalError("test"));
EXPECT_TRUE(future.IsReady());
absl::Status status = future.Await();
EXPECT_EQ(status, absl::InternalError("test"));
future.OnReady([](absl::Status status) {
EXPECT_EQ(status, absl::InternalError("test"));
});
}
TEST(PjRtFutureTest, StatelessImmediate) {
PjRtFuture<> ok_future(absl::OkStatus());
PjRtFuture<> error_future(absl::InternalError("test"));
EXPECT_TRUE(ok_future.IsReady());
EXPECT_TRUE(error_future.IsReady());
EXPECT_EQ(ok_future.Await(), absl::OkStatus());
EXPECT_EQ(error_future.Await(), absl::InternalError("test"));
ok_future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
error_future.OnReady([](absl::Status status) {
EXPECT_EQ(status, absl::InternalError("test"));
});
}
TEST(PjRtFutureTest, StatefulFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(42);
EXPECT_TRUE(future.IsReady());
future.OnReady([](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, StatusFuture) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(absl::OkStatus());
EXPECT_TRUE(future.IsReady());
future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
}
TEST(PjRtFutureTest, StatusOrFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(42);
EXPECT_TRUE(future.IsReady());
future.OnReady([](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, JoinFutures) {
auto empty_join = JoinFutures({});
EXPECT_TRUE(empty_join.IsReady());
EXPECT_EQ(empty_join.Await(), absl::OkStatus());
auto promise0 = PjRtFuture<>::CreatePromise();
auto promise1 = PjRtFuture<>::CreatePromise();
std::vector<PjRtFuture<>> futures0 = {PjRtFuture<>(promise0)};
std::vector<PjRtFuture<>> futures1 = {PjRtFuture<>(promise0),
PjRtFuture<>(promise1)};
auto join_one = JoinFutures(futures0);
EXPECT_FALSE(join_one.IsReady());
auto join_two = JoinFutures(futures1);
EXPECT_FALSE(join_two.IsReady());
promise0.Set();
EXPECT_TRUE(join_one.IsReady());
EXPECT_FALSE(join_two.IsReady());
EXPECT_EQ(join_one.Await(), absl::OkStatus());
promise1.Set();
EXPECT_TRUE(join_two.IsReady());
EXPECT_EQ(join_two.Await(), absl::OkStatus());
}
TEST(PjRtFutureTest, JoinErrors) {
auto empty_join = JoinFutures({});
EXPECT_TRUE(empty_join.IsReady());
EXPECT_EQ(empty_join.Await(), absl::OkStatus());
auto promise0 = PjRtFuture<>::CreatePromise();
auto promise1 = PjRtFuture<>::CreatePromise();
std::vector<PjRtFuture<>> futures0 = {PjRtFuture<>(promise0)};
std::vector<PjRtFuture<>> futures1 = {PjRtFuture<>(promise0),
PjRtFuture<>(promise1)};
auto join_one = JoinFutures(futures0);
EXPECT_FALSE(join_one.IsReady());
auto join_two = JoinFutures(futures1);
EXPECT_FALSE(join_two.IsReady());
promise0.Set(absl::InternalError("error #0"));
EXPECT_TRUE(join_one.IsReady());
EXPECT_FALSE(join_two.IsReady());
EXPECT_EQ(join_one.Await(), absl::InternalError("error #0"));
promise1.Set(absl::InternalError("error #1"));
EXPECT_TRUE(join_two.IsReady());
EXPECT_EQ(join_two.Await(), absl::InternalError("error #0"));
}
} |
147 | #ifndef XLA_PJRT_C_PJRT_C_API_GPU_H_
#define XLA_PJRT_C_PJRT_C_API_GPU_H_
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_macros.h"
#ifdef __cplusplus
extern "C" {
#endif
PJRT_CAPI_EXPORT const PJRT_Api* GetPjrtApi();
#ifdef __cplusplus
}
#endif
#endif
#include "xla/pjrt/c/pjrt_c_api_gpu.h"
#include "absl/base/call_once.h"
#include "absl/log/initialize.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_internal.h"
#include "tsl/platform/platform.h"
const PJRT_Api* GetPjrtApi() {
#ifndef PLATFORM_GOOGLE
static absl::once_flag once;
absl::call_once(once, []() { absl::InitializeLog(); });
#endif
return pjrt::gpu_plugin::GetGpuPjrtApi();
} | #include "xla/pjrt/c/pjrt_c_api_gpu.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <thread>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/ffi/api/ffi.h"
#include "xla/ffi/execution_context.h"
#include "xla/ffi/ffi_api.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_ffi_extension.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_extension.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "xla/pjrt/c/pjrt_c_api_test.h"
#include "xla/pjrt/c/pjrt_c_api_test_base.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/tests/literal_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace pjrt {
namespace {
#ifdef TENSORFLOW_USE_ROCM
const bool kUnused = (RegisterPjRtCApiTestFactory([]() { return GetPjrtApi(); },
"rocm"),
true);
#else
const bool kUnused = (RegisterPjRtCApiTestFactory([]() { return GetPjrtApi(); },
"cuda"),
true);
#endif
class PjrtCApiGpuTest : public PjrtCApiTestBase {
public:
PjrtCApiGpuTest() : PjrtCApiTestBase(GetPjrtApi()) {}
};
TEST_F(PjrtCApiGpuTest, CreateViewOfDeviceBuffer) {
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> buffer =
create_buffer().first;
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args device_buffer_ptr_args;
device_buffer_ptr_args.struct_size =
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE;
device_buffer_ptr_args.extension_start = nullptr;
device_buffer_ptr_args.buffer = buffer.get();
PJRT_Error* device_buffer_ptr_error =
api_->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(&device_buffer_ptr_args);
ASSERT_EQ(device_buffer_ptr_error, nullptr);
PJRT_Buffer_Device_Args device_args = PJRT_Buffer_Device_Args{
PJRT_Buffer_Device_Args_STRUCT_SIZE,
nullptr,
buffer.get(),
};
PJRT_Error* device_error = api_->PJRT_Buffer_Device(&device_args);
ASSERT_EQ(device_error, nullptr);
PJRT_Client_CreateViewOfDeviceBuffer_Args create_view_args;
create_view_args.struct_size =
PJRT_Client_CreateViewOfDeviceBuffer_Args_STRUCT_SIZE;
create_view_args.extension_start = nullptr;
create_view_args.client = client_;
create_view_args.device_buffer_ptr = device_buffer_ptr_args.device_memory_ptr;
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {4});
create_view_args.dims = shape.dimensions().data();
create_view_args.num_dims = shape.dimensions().size();
create_view_args.element_type =
pjrt::ConvertToPjRtBufferType(shape.element_type());
pjrt::BufferMemoryLayoutData c_layout_data;
TF_ASSERT_OK_AND_ASSIGN(
c_layout_data, pjrt::ConvertToBufferMemoryLayoutData(shape.layout()));
create_view_args.layout = &(c_layout_data.c_layout);
create_view_args.device = device_args.device;
std::function<void()> on_delete_callback = []() mutable {};
create_view_args.on_delete_callback_arg =
new std::function(on_delete_callback);
create_view_args.on_delete_callback = [](void* device_buffer_ptr,
void* user_arg) {
auto c_func = reinterpret_cast<std::function<void()>*>(user_arg);
(*c_func)();
delete c_func;
};
create_view_args.stream = reinterpret_cast<intptr_t>(nullptr);
PJRT_Error* error =
api_->PJRT_Client_CreateViewOfDeviceBuffer(&create_view_args);
ASSERT_EQ(error, nullptr);
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> view_buffer(
create_view_args.buffer, ::pjrt::MakeBufferDeleter(api_));
PJRT_Buffer_ToHostBuffer_Args to_host_args;
to_host_args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE;
to_host_args.extension_start = nullptr;
to_host_args.src = view_buffer.get();
xla::Shape host_shape = xla::ShapeUtil::MakeShape(xla::F32, {4});
auto literal = std::make_shared<xla::Literal>(host_shape);
to_host_args.host_layout = nullptr;
to_host_args.dst = literal->untyped_data();
to_host_args.dst_size = xla::ShapeUtil::ByteSizeOfElements(host_shape);
to_host_args.event = nullptr;
PJRT_Error* to_host_error = api_->PJRT_Buffer_ToHostBuffer(&to_host_args);
ASSERT_EQ(to_host_error, nullptr);
xla::PjRtFuture<> transfer_to_host =
::pjrt::ConvertCEventToCppFuture(to_host_args.event, api_);
TF_CHECK_OK(transfer_to_host.Await());
ASSERT_EQ(literal->data<float>().size(), 4);
std::vector<float> float_data(4);
std::iota(float_data.begin(), float_data.end(), 41.0f);
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
xla::LiteralUtil::CreateR1<float>(float_data), *literal));
}
TEST_F(PjrtCApiGpuTest, CreateAndDestroyExecuteContext) {
PJRT_ExecuteContext_Create_Args create_arg;
create_arg.struct_size = PJRT_ExecuteContext_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.context = nullptr;
EXPECT_EQ(api_->PJRT_ExecuteContext_Create(&create_arg), nullptr);
EXPECT_NE(create_arg.context, nullptr);
const PJRT_FFI_Extension* ffi_extension =
pjrt::FindExtension<PJRT_FFI_Extension>(
api_, PJRT_Extension_Type::PJRT_Extension_Type_FFI);
ASSERT_NE(ffi_extension, nullptr);
std::string string_data = "string_data";
PJRT_FFI_UserData_Add_Args add_args;
add_args.struct_size = PJRT_FFI_UserData_Add_Args_STRUCT_SIZE;
add_args.extension_start = nullptr;
add_args.user_data.type_id = 42;
add_args.user_data.data = &string_data;
add_args.user_data.deleter = nullptr;
add_args.context = create_arg.context;
EXPECT_EQ(ffi_extension->user_data_add(&add_args), nullptr);
TF_ASSERT_OK_AND_ASSIGN(
auto lookup_user_data,
create_arg.context->execute_context->ffi_context().Lookup(
xla::ffi::ExecutionContext::TypeId(42)));
EXPECT_EQ(lookup_user_data, &string_data);
PJRT_ExecuteContext_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.context = create_arg.context;
api_->PJRT_ExecuteContext_Destroy(&destroy_args);
}
absl::StatusOr<PJRT_Client_Create_Args> BuildCreateArg(
::pjrt::PJRT_KeyValueCallbackData* kv_callback_data,
std::vector<PJRT_NamedValue>& c_options) {
PJRT_Client_Create_Args args;
args.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.create_options = c_options.data();
args.num_options = c_options.size();
args.kv_get_callback = kv_callback_data->c_kv_get;
args.kv_get_user_arg = &kv_callback_data->kv_get_c_func;
args.kv_put_callback = kv_callback_data->c_kv_put;
args.kv_put_user_arg = &kv_callback_data->kv_put_c_func;
args.client = nullptr;
return args;
}
TEST(PjrtCApiGpuKVStoreTest, CreateClientWithKVCallback) {
auto api = GetPjrtApi();
auto kv_store = std::make_shared<xla::InMemoryKeyValueStore>();
std::shared_ptr<::pjrt::PJRT_KeyValueCallbackData> kv_callback_data =
::pjrt::ConvertToCKeyValueCallbacks(kv_store);
int num_nodes = 2;
std::vector<std::thread> threads;
for (int i = 0; i < num_nodes; i++) {
threads.emplace_back([api, i, num_nodes,
kv_callback_data = kv_callback_data,
kv_store = kv_store] {
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"num_nodes", static_cast<int64_t>(num_nodes)},
{"node_id", static_cast<int64_t>(i)}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
TF_ASSERT_OK_AND_ASSIGN(
PJRT_Client_Create_Args create_arg,
BuildCreateArg(kv_callback_data.get(), c_options));
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_Devices_Args device_args;
device_args.struct_size = PJRT_Client_Devices_Args_STRUCT_SIZE;
device_args.extension_start = nullptr;
device_args.client = create_arg.client;
PJRT_Error* device_error = api->PJRT_Client_Devices(&device_args);
EXPECT_EQ(device_error, nullptr);
EXPECT_EQ(device_args.num_devices, 2);
PJRT_Client_AddressableDevices_Args addressable_device_args;
addressable_device_args.struct_size =
PJRT_Client_AddressableDevices_Args_STRUCT_SIZE;
addressable_device_args.extension_start = nullptr;
addressable_device_args.client = create_arg.client;
PJRT_Error* addressable_device_error =
api->PJRT_Client_AddressableDevices(&addressable_device_args);
EXPECT_EQ(addressable_device_error, nullptr);
EXPECT_EQ(addressable_device_args.num_addressable_devices, 1);
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
});
}
for (auto& t : threads) {
t.join();
}
}
TEST(PjrtCApiGpuAllocatorTest, ValidOptionsParsing) {
auto api = GetPjrtApi();
std::vector<std::string> allocator_options = {"default", "platform", "bfc",
"cuda_async"};
for (const std::string& allocator_option : allocator_options) {
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"allocator", allocator_option},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
if (allocator_option == "bfc" || allocator_option == "cuda_async") {
options["memory_fraction"] = 0.5f;
}
if (allocator_option == "cuda_async") {
options["preallocate"] = true;
}
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
}
}
TEST(PjrtCApiGpuAllocatorTest, InvalidAllocatorOptionsParsing) {
auto api = GetPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"allocator", static_cast<std::string>("invalid_allocator")},
{"memory_fraction", 0.5f},
{"preallocate", true},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_NE(error, nullptr);
EXPECT_THAT(error->status,
::tsl::testing::StatusIs(
absl::StatusCode::kUnimplemented,
"Allocator invalid_allocator not supported for PJRT GPU "
"plugin. Supported allocator options are: 'default', "
"'platform', 'bfc' and 'cuda_async'."));
PJRT_Error_Destroy_Args error_destroy_args;
error_destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
error_destroy_args.extension_start = nullptr;
error_destroy_args.error = error;
api->PJRT_Error_Destroy(&error_destroy_args);
}
TEST(PjrtCApiPlatformNameTest, AvailablePlatformName) {
auto api = GetPjrtApi();
std::string expected_platform_name_for_cuda = "cuda";
std::string expected_platform_name_for_rocm = "rocm";
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"platform_name", static_cast<std::string>("gpu")},
{"allocator", static_cast<std::string>("default")},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_PlatformName_Args platform_name_args;
platform_name_args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE;
platform_name_args.extension_start = nullptr;
platform_name_args.client = create_arg.client;
PJRT_Error* platform_name_error =
api->PJRT_Client_PlatformName(&platform_name_args);
EXPECT_EQ(platform_name_error, nullptr);
#if TENSORFLOW_USE_ROCM
EXPECT_EQ(platform_name_args.platform_name, expected_platform_name_for_rocm);
#else
EXPECT_EQ(platform_name_args.platform_name, expected_platform_name_for_cuda);
#endif
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
}
TEST(PjrtCApiPlatformNameTest, UnavailablePlatformName) {
auto api = GetPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"platform_name", static_cast<std::string>("invalid_platform_name")},
{"allocator", static_cast<std::string>("default")},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_NE(error, nullptr);
EXPECT_THAT(error->status,
::tsl::testing::StatusIs(
absl::StatusCode::kNotFound,
testing::StartsWith("Could not find registered platform with "
"name: \"invalid_platform_name\". "
"Available platform names are:")));
PJRT_Error_Destroy_Args error_destroy_args;
error_destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
error_destroy_args.extension_start = nullptr;
error_destroy_args.error = error;
api->PJRT_Error_Destroy(&error_destroy_args);
}
void TestCustomCallV2() {}
TEST(PjrtCApiGpuExtensionTest, CustomCallUntyped) {
PJRT_Gpu_Register_Custom_Call_Args args;
args.struct_size = PJRT_Gpu_Register_Custom_Call_Args_STRUCT_SIZE;
std::string function_name = "untyped_function_name";
args.function_name = function_name.c_str();
args.function_name_size = function_name.size();
args.api_version = 0;
args.custom_call_function = reinterpret_cast<void*>(&TestCustomCallV2);
auto api = GetPjrtApi();
const PJRT_Extension_Base* next =
reinterpret_cast<const PJRT_Extension_Base*>(api->extension_start);
while (next != nullptr &&
next->type !=
PJRT_Extension_Type::PJRT_Extension_Type_Gpu_Custom_Call) {
next = next->next;
}
ASSERT_NE(next, nullptr);
PJRT_Error* error =
reinterpret_cast<const PJRT_Gpu_Custom_Call*>(next)->custom_call(&args);
CHECK_EQ(error, nullptr);
void* custom_call = xla::CustomCallTargetRegistry::Global()->Lookup(
function_name, stream_executor::GpuPlatformName());
EXPECT_EQ(custom_call, reinterpret_cast<void*>(&TestCustomCallV2));
}
static void* kNoop = xla::ffi::Ffi::Bind()
.To([]() { return xla::ffi::Error::Success(); })
.release();
TEST(PjrtCApiGpuExtensionTest, CustomCallTyped) {
PJRT_Gpu_Register_Custom_Call_Args args;
args.struct_size = PJRT_Gpu_Register_Custom_Call_Args_STRUCT_SIZE;
std::string function_name = "typed_function_name";
args.function_name = function_name.c_str();
args.function_name_size = function_name.size();
args.api_version = 1;
args.custom_call_function = kNoop;
auto api = GetPjrtApi();
const PJRT_Extension_Base* next =
reinterpret_cast<const PJRT_Extension_Base*>(api->extension_start);
while (next != nullptr &&
next->type !=
PJRT_Extension_Type::PJRT_Extension_Type_Gpu_Custom_Call) {
next = next->next;
}
ASSERT_NE(next, nullptr);
PJRT_Error* error =
reinterpret_cast<const PJRT_Gpu_Custom_Call*>(next)->custom_call(&args);
CHECK_EQ(error, nullptr);
auto registration =
xla::ffi::FindHandler(function_name, stream_executor::GpuPlatformName())
.value();
EXPECT_EQ(reinterpret_cast<void*>(registration.bundle.execute), kNoop);
}
}
} |
148 | #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_ZONE_PROVIDER_H_
#define TENSORFLOW_TSL_PLATFORM_CLOUD_COMPUTE_ENGINE_ZONE_PROVIDER_H_
#include "tsl/platform/cloud/compute_engine_metadata_client.h"
#include "tsl/platform/cloud/zone_provider.h"
namespace tsl {
class ComputeEngineZoneProvider : public ZoneProvider {
public:
explicit ComputeEngineZoneProvider(
std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client);
virtual ~ComputeEngineZoneProvider();
Status GetZone(string* zone) override;
private:
std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client_;
string cached_zone;
ComputeEngineZoneProvider(const ComputeEngineZoneProvider&) = delete;
void operator=(const ComputeEngineZoneProvider&) = delete;
};
}
#endif
#include "tsl/platform/cloud/compute_engine_zone_provider.h"
#include <utility>
#include "tsl/platform/str_util.h"
namespace tsl {
namespace {
constexpr char kGceMetadataZonePath[] = "instance/zone";
}
ComputeEngineZoneProvider::ComputeEngineZoneProvider(
std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client)
: google_metadata_client_(std::move(google_metadata_client)) {}
Status ComputeEngineZoneProvider::GetZone(string* zone) {
if (!cached_zone.empty()) {
*zone = cached_zone;
return OkStatus();
}
std::vector<char> response_buffer;
TF_RETURN_IF_ERROR(google_metadata_client_->GetMetadata(kGceMetadataZonePath,
&response_buffer));
StringPiece location(&response_buffer[0], response_buffer.size());
std::vector<string> elems = str_util::Split(location, "/");
if (elems.size() == 4) {
cached_zone = elems.back();
*zone = cached_zone;
} else {
LOG(ERROR) << "Failed to parse the zone name from location: "
<< string(location);
}
return OkStatus();
}
ComputeEngineZoneProvider::~ComputeEngineZoneProvider() {}
} | #include "tsl/platform/cloud/compute_engine_zone_provider.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/test.h"
namespace tsl {
class ComputeEngineZoneProviderTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
TEST_F(ComputeEngineZoneProviderTest, GetZone) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"Header Metadata-Flavor: Google\n",
"projects/123456789/zones/us-west1-b")});
auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadata_client = std::make_shared<ComputeEngineMetadataClient>(
httpRequestFactory, RetryConfig(0 ));
ComputeEngineZoneProvider provider(metadata_client);
string zone;
TF_EXPECT_OK(provider.GetZone(&zone));
EXPECT_EQ("us-west1-b", zone);
TF_EXPECT_OK(provider.GetZone(&zone));
}
TEST_F(ComputeEngineZoneProviderTest, InvalidZoneString) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"Header Metadata-Flavor: Google\n",
"invalidresponse")});
auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadata_client = std::make_shared<ComputeEngineMetadataClient>(
httpRequestFactory, RetryConfig(0 ));
ComputeEngineZoneProvider provider(metadata_client);
string zone;
TF_EXPECT_OK(provider.GetZone(&zone));
EXPECT_EQ("", zone);
}
} |
149 | #ifndef XLA_SERVICE_VALUE_RANGE_H_
#define XLA_SERVICE_VALUE_RANGE_H_
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "xla/service/constant_value.h"
namespace xla {
class Range {
public:
Range()
: min_(ConstantValue::GetZero(64, false)),
max_(ConstantValue::GetZero(64, false)),
empty_(true),
is_linear_(false) {}
Range(const ConstantValue& min, const ConstantValue& max, bool is_linear)
: min_(min), max_(max), empty_(false), is_linear_(is_linear) {}
const ConstantValue& min() const { return min_; }
const ConstantValue& max() const { return max_; }
bool IsEmpty() const { return empty_; }
bool IsSingleValue() const { return !IsEmpty() && min_ == max_; }
bool IsLinear() const { return is_linear_; }
std::optional<int64_t> GetSingleSignedValue() const;
std::optional<int64_t> GetSingleUnsignedValue() const;
std::string ToString() const;
private:
ConstantValue min_;
ConstantValue max_;
bool empty_;
bool is_linear_;
};
Range RecursivelyIdentifyRange(
const HloInstruction* instr,
const absl::flat_hash_map<const HloInstruction*, Range>& predefined_ranges);
}
#endif
#include "xla/service/value_range.h"
#include <optional>
#include <string>
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
std::optional<int64_t> Range::GetSingleSignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetSignedValue();
}
std::optional<int64_t> Range::GetSingleUnsignedValue() const {
if (!IsSingleValue()) {
return std::nullopt;
}
return min_.GetUnsignedValue();
}
std::string Range::ToString() const {
if (IsEmpty()) {
return std::string("Empty");
}
return absl::StrCat("min: ", min_.ToString(), " max: ", max_.ToString());
}
Range RecursivelyIdentifyRange(
const HloInstruction* instr,
const absl::flat_hash_map<const HloInstruction*, Range>&
predefined_ranges) {
if ((!instr->shape().IsInteger() && instr->shape().element_type() != PRED) ||
instr->shape().dimensions_size() != 0) {
return Range{};
}
VLOG(5) << "Computing Range for " << instr->ToString();
auto it = predefined_ranges.find(instr);
if (it != predefined_ranges.end()) {
VLOG(5) << "Found range! " << it->second.max().GetSignedValue() << " "
<< it->second.min().GetSignedValue();
return it->second;
}
switch (instr->opcode()) {
case HloOpcode::kCompare: {
VLOG(5) << "Handling Compare";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (instr->comparison_direction() != ComparisonDirection::kLt) {
return Range{};
}
if (lhs.max().lt(rhs.min())) {
return Range{ConstantValue::GetOne(1, false),
ConstantValue::GetOne(1, false),
true};
}
if (!lhs.min().lt(rhs.max())) {
return Range{
ConstantValue::GetZero(1, false),
ConstantValue::GetZero(1, false),
true};
}
VLOG(5) << "Compare failed";
VLOG(5) << "rhs max " << rhs.max().GetSignedValue() << " rhs min "
<< rhs.min().GetSignedValue() << " lhs max "
<< lhs.max().GetSignedValue() << " lhs min "
<< lhs.min().GetSignedValue();
return Range{};
}
case HloOpcode::kConstant: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Constant";
const int64_t bitwidth =
primitive_util::BitWidth(instr->shape().element_type());
const bool is_signed =
primitive_util::IsSignedIntegralType(instr->shape().element_type());
if (is_signed) {
const int64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetSigned(value, bitwidth),
ConstantValue::GetSigned(value, bitwidth),
true};
}
const uint64_t value = *instr->literal().GetFirstInteger();
return Range{ConstantValue::GetUnsigned(value, bitwidth),
ConstantValue::GetUnsigned(value, bitwidth),
true};
}
case HloOpcode::kAdd: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Add";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().add(rhs.min());
ConstantValue max = lhs.max().add(rhs.max());
if (max.lt(min)) {
VLOG(5) << "Add wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
case HloOpcode::kSelect: {
VLOG(5) << "Handling Select";
const HloInstruction* cmp = instr->operand(0);
Range cmp_range = RecursivelyIdentifyRange(cmp, predefined_ranges);
if (cmp_range.IsEmpty() || !cmp_range.IsSingleValue()) {
VLOG(5) << "Select failed";
return Range{};
}
if (cmp_range.GetSingleSignedValue() == 0) {
return RecursivelyIdentifyRange(instr->operand(2), predefined_ranges);
}
return RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
}
case HloOpcode::kSubtract: {
if (!instr->shape().IsInteger()) {
return Range{};
}
VLOG(5) << "Handling Subtract";
Range lhs =
RecursivelyIdentifyRange(instr->operand(0), predefined_ranges);
Range rhs =
RecursivelyIdentifyRange(instr->operand(1), predefined_ranges);
VLOG(5) << "Returned Rhs: " << rhs.ToString()
<< " Lhs: " << lhs.ToString();
if (lhs.IsEmpty() || rhs.IsEmpty()) {
return Range{};
}
ConstantValue min = lhs.min().sub(rhs.max());
ConstantValue max = lhs.max().sub(rhs.min());
if (max.lt(min)) {
VLOG(5) << "Subtract wrapped";
return Range{};
}
return Range{min, max, lhs.IsLinear() && rhs.IsLinear()};
}
default:
break;
}
VLOG(5) << "Unsupported instruction: " << instr->ToString();
return Range{};
}
} | #include "xla/service/value_range.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ValueRangeTest : public HloTestBase {};
TEST_F(ValueRangeTest, AddedValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 124);
EXPECT_EQ(range.max().GetSignedValue(), 129);
}
TEST_F(ValueRangeTest, AddedValueUnsigned) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = u16[] constant(32768)
p0 = u16[] parameter(0)
ROOT %a = u16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, false),
ConstantValue::GetUnsigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetUnsignedValue(), 32768);
EXPECT_EQ(range.max().GetUnsignedValue(), 32773);
}
TEST_F(ValueRangeTest, SubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
ROOT %a = s32[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), -124);
EXPECT_EQ(range.max().GetSignedValue(), -119);
}
TEST_F(ValueRangeTest, SelectValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(p0, c0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), -119);
EXPECT_EQ(range.min().GetSignedValue(), -124);
}
TEST_F(ValueRangeTest, SelectValue2) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
p0 = s32[] parameter(0)
c = pred[] compare(c0, p0), direction=LT
%s = s32[] subtract(p0, c0)
%a = s32[] add(c0, p0)
ROOT slct = s32[] select(c, s, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0)->operand(1);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.max().GetSignedValue(), 129);
EXPECT_EQ(range.min().GetSignedValue(), 124);
}
TEST_F(ValueRangeTest, AddSubtractValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s32[] constant(124)
c1 = s32[] constant(12)
c2 = s32[] constant(5)
p0 = s32[] parameter(0)
sub = s32[] subtract(p0, c0)
a = s32[] add(sub, c1)
sub2 = s32[] subtract(c2, a)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(1)->operand(0)->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(std::make_pair(
p0, Range{ConstantValue::GetZero(32, true),
ConstantValue::GetSigned(5, 32), true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_FALSE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_TRUE(range.IsLinear());
EXPECT_EQ(range.min().GetSignedValue(), 112);
EXPECT_EQ(range.max().GetSignedValue(), 117);
}
TEST_F(ValueRangeTest, SubtractWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] subtract(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetSigned(-32768, 16),
ConstantValue::GetZero(16, true),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
TEST_F(ValueRangeTest, AddWrapAroundValue) {
constexpr absl::string_view hlo_string = R"(
HloModule module
ENTRY entry {
c0 = s16[] constant(124)
p0 = s16[] parameter(0)
ROOT %a = s16[] add(p0, c0)
}
)";
auto module =
ParseAndReturnUnverifiedModule(hlo_string, HloModuleConfig{}).value();
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* p0 = root->operand(0);
absl::flat_hash_map<const HloInstruction*, Range> fs;
fs.insert(
std::make_pair(p0, Range{ConstantValue::GetZero(16, true),
ConstantValue::GetSigned(32760, 16),
true}));
auto range = RecursivelyIdentifyRange(root, fs);
EXPECT_TRUE(range.IsEmpty());
EXPECT_FALSE(range.IsSingleValue());
EXPECT_FALSE(range.IsLinear());
}
}
} |
150 | #ifndef XLA_SERVICE_ALL_REDUCE_CONTIGUOUS_H_
#define XLA_SERVICE_ALL_REDUCE_CONTIGUOUS_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceContiguous : public HloModulePass {
public:
absl::string_view name() const override { return "all-reduce-contiguous"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/all_reduce_contiguous.h"
#include <vector>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
namespace xla {
namespace {
absl::Status ReplaceWithContiguousAllReduce(
HloAllReduceInstruction* all_reduce) {
TF_RET_CHECK(all_reduce);
TF_RET_CHECK(!all_reduce->has_sharding());
HloComputation& computation = *all_reduce->parent();
PrimitiveType element_type = all_reduce->operand(0)->shape().element_type();
std::vector<HloInstruction*> flat_operands;
flat_operands.reserve(all_reduce->operand_count());
int64_t total_size = 0;
for (HloInstruction* operand : all_reduce->operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements});
flat_operands.push_back(computation.AddInstruction(
HloInstruction::CreateBitcast(flat_shape, operand)));
total_size += num_elements;
}
Shape concat_shape = ShapeUtil::MakeShape(element_type, {total_size});
HloInstruction* concatenated =
computation.AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, flat_operands, 0));
HloInstruction* new_all_reduce =
computation.AddInstruction(HloInstruction::CreateAllReduce(
concat_shape, {concatenated}, all_reduce->to_apply(),
all_reduce->device_list(),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
std::vector<HloInstruction*> outputs;
outputs.reserve(all_reduce->operand_count());
int64_t offset = 0;
for (int64_t i = 0; i < all_reduce->operand_count(); ++i) {
const Shape& flat_shape = flat_operands[i]->shape();
int64_t end = offset + flat_shape.dimensions(0);
HloInstruction* sliced = computation.AddInstruction(
HloInstruction::CreateSlice(flat_shape, new_all_reduce,
{offset},
{end},
{1}));
outputs.push_back(computation.AddInstruction(HloInstruction::CreateBitcast(
all_reduce->operand(i)->shape(), sliced)));
offset = end;
}
TF_RETURN_IF_ERROR(computation.ReplaceWithNewInstruction(
all_reduce, HloInstruction::CreateTuple(outputs)));
return absl::OkStatus();
}
}
absl::StatusOr<bool> AllReduceContiguous::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceContiguous";
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceContiguous because the module contains all-reduce "
"with constrained layouts";
return false;
}
std::vector<HloAllReduceInstruction*> all_reduces;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAllReduce &&
instruction->operand_count() > 1) {
all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction));
}
}
}
for (HloAllReduceInstruction* all_reduce : all_reduces) {
TF_RETURN_IF_ERROR(ReplaceWithContiguousAllReduce(all_reduce));
}
return !all_reduces.empty();
}
} | #include "xla/service/all_reduce_contiguous.h"
#include <memory>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllReduceContiguousTest = HloTestBase;
TEST_F(AllReduceContiguousTest, Simple) {
const absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[128] parameter(0)
p1 = f32[4,4] parameter(1)
ROOT crs = (f32[128], f32[4,4]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceContiguous pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* root = module->entry_computation()->root_instruction();
auto crs =
AllOf(op::Shape("f32[144]"),
op::AllReduce(op::Concatenate(op::Bitcast(op::Parameter(0)),
op::Bitcast(op::Parameter(1)))));
ASSERT_THAT(
root,
op::Tuple(AllOf(op::Shape("f32[128]"), op::Bitcast(op::Slice(crs))),
AllOf(op::Shape("f32[4,4]"), op::Bitcast(op::Slice(crs)))));
EXPECT_EQ(root->operand(0)->operand(0)->slice_starts(0), 0);
EXPECT_EQ(root->operand(0)->operand(0)->slice_limits(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_starts(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_limits(0), 128 + 4 * 4);
}
}
} |
151 | #ifndef AROLLA_SERVING_INPLACE_EXPR_COMPILER_H_
#define AROLLA_SERVING_INPLACE_EXPR_COMPILER_H_
#include <functional>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/expr_node.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/slot_listener.h"
#include "arolla/io/struct_io.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace inplace_expr_compiler_impl {
using TypedSlotMap = absl::flat_hash_map<std::string, TypedSlot>;
TypedSlotMap CollectInternalSlots(TypedSlot root_slot);
struct IoSlots {
TypedSlotMap input_slots;
TypedSlot output_slot;
TypedSlotMap named_output_slots;
};
absl::StatusOr<IoSlots> CollectIoSlots(QTypePtr qtype,
const CompiledExpr& compiled_expr,
absl::string_view final_output_name);
}
template <class T>
using InplaceModelFunction = std::function<absl::Status(T&)>;
template <typename T>
absl::StatusOr<InplaceModelFunction<T>> CompileInplaceExprOnStruct(
const InplaceCompiledExpr& compiled_expr,
absl::string_view final_output_name) {
static_assert(
std::is_standard_layout<T>::value,
"Data must be standard layout to be used with CompileExprInplace.");
QTypePtr qtype = GetQType<T>();
ASSIGN_OR_RETURN(inplace_expr_compiler_impl::IoSlots slots,
inplace_expr_compiler_impl::CollectIoSlots(
qtype, compiled_expr, final_output_name));
ASSIGN_OR_RETURN(auto executable, compiled_expr.InplaceBind(
slots.input_slots, slots.output_slot,
slots.named_output_slots));
return [qtype, executable(std::shared_ptr<BoundExpr>(std::move(executable)))](
T& input) -> absl::Status {
FramePtr frame(&input, &qtype->type_layout());
EvaluationContext ctx;
executable->Execute(&ctx, frame);
return ctx.status();
};
}
template <typename Struct>
absl::StatusOr<InputLoaderPtr<Struct>> CreateStructInputLoader() {
return StructInputLoader<Struct>::Create(
inplace_expr_compiler_impl::CollectInternalSlots(
TypedSlot::UnsafeFromOffset(GetQType<Struct>(), 0)));
}
template <typename Struct>
absl::StatusOr<std::unique_ptr<SlotListener<Struct>>>
CreateStructSlotListener() {
return StructSlotListener<Struct>::Create(
inplace_expr_compiler_impl::CollectInternalSlots(
TypedSlot::UnsafeFromOffset(GetQType<Struct>(), 0)));
}
}
#endif
#include "arolla/serving/inplace_expr_compiler.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/naming/table.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/named_field_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::inplace_expr_compiler_impl {
TypedSlotMap CollectInternalSlots(TypedSlot root_slot) {
TypedSlotMap result;
if (GetFieldNames(root_slot.GetType()).empty()) {
return result;
}
std::vector<std::pair<TypedSlot, naming::TablePath>> stack{{root_slot, {}}};
while (!stack.empty()) {
auto [slot, table] = stack.back();
stack.pop_back();
auto field_names = GetFieldNames(slot.GetType());
for (size_t i = 0; i < field_names.size(); ++i) {
const auto& field_name = field_names[i];
const TypedSlot& field_slot = slot.SubSlot(i);
result.emplace(table.Column(naming::FieldAccess(field_name)).FullName(),
field_slot);
if (!GetFieldNames(field_slot.GetType()).empty()) {
stack.emplace_back(field_slot,
table.Child(naming::FieldAccess(field_name)));
}
}
}
return result;
}
namespace {
absl::Status CheckField(QTypePtr qtype, const TypedSlotMap& slot_map,
QTypePtr field_qtype, absl::string_view field_name) {
if (GetFieldNames(qtype).empty()) {
return absl::FailedPreconditionError(
absl::StrCat("no registered field names for ", qtype->name(),
" in Compile.*ExprOnStructInput"));
}
if (!slot_map.contains(field_name)) {
return absl::FailedPreconditionError(
absl::StrCat("input `", field_name, "` not found in ", qtype->name(),
" in Compile.*ExprOnStructInput"));
}
QTypePtr result_type = slot_map.at(field_name).GetType();
if (result_type != field_qtype) {
return absl::FailedPreconditionError(absl::StrCat(
"input `", field_name, "` type mismatch for ", qtype->name(),
" in Compile.*ExprOnStructInput, expected in struct: ",
result_type->name(), ", found in expr: ", field_qtype->name()));
}
return absl::OkStatus();
}
absl::StatusOr<TypedSlotMap> CollectInputSlots(
QTypePtr qtype, const TypedSlotMap& struct_slot_map,
const CompiledExpr& compiled_expr) {
TypedSlotMap input_slots;
input_slots.reserve(compiled_expr.input_types().size());
for (const auto& [name, field_qtype] : compiled_expr.input_types()) {
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map, field_qtype, name));
input_slots.emplace(name, struct_slot_map.at(name));
}
return input_slots;
}
}
absl::StatusOr<IoSlots> CollectIoSlots(QTypePtr qtype,
const CompiledExpr& compiled_expr,
absl::string_view final_output_name) {
TypedSlotMap struct_slot_map =
CollectInternalSlots(TypedSlot::UnsafeFromOffset(qtype, 0));
ASSIGN_OR_RETURN(TypedSlotMap input_slots,
CollectInputSlots(qtype, struct_slot_map, compiled_expr));
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map,
compiled_expr.output_type(), final_output_name));
if (compiled_expr.input_types().contains(final_output_name)) {
return absl::FailedPreconditionError(absl::StrCat(
final_output_name, " present both as an input and as final output"));
}
if (compiled_expr.named_output_types().contains(final_output_name)) {
return absl::FailedPreconditionError(
absl::StrCat(final_output_name,
" present both as final output and as named output"));
}
for (const auto& [name, field_qtype] : compiled_expr.input_types()) {
if (compiled_expr.named_output_types().contains(name)) {
return absl::FailedPreconditionError(
absl::StrCat(name, " present both as an input and as named output"));
}
}
for (const auto& [name, field_qtype] : compiled_expr.named_output_types()) {
RETURN_IF_ERROR(CheckField(qtype, struct_slot_map, field_qtype, name));
}
absl::flat_hash_map<std::string, TypedSlot> named_output_slots;
named_output_slots.reserve(compiled_expr.named_output_types().size());
for (const auto& [name, _] : compiled_expr.named_output_types()) {
named_output_slots.emplace(name, struct_slot_map.at(name));
}
return IoSlots{.input_slots = input_slots,
.output_slot = struct_slot_map.at(final_output_name),
.named_output_slots = named_output_slots};
}
} | #include "arolla/serving/inplace_expr_compiler.h"
#include <array>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/serving/expr_compiler.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/struct_field.h"
#include "arolla/util/testing/status_matchers_backport.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using ::arolla::testing::IsOkAndHolds;
using ::arolla::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::MatchesRegex;
struct UnsupportedType {};
struct TestOutputStruct {
double x_plus_y;
double x_times_y;
UnsupportedType unsupported_type_field;
double unused;
static auto ArollaStructFields() {
using CppType = TestOutputStruct;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x_plus_y),
AROLLA_DECLARE_STRUCT_FIELD(x_times_y),
AROLLA_SKIP_STRUCT_FIELD(unsupported_type_field),
AROLLA_DECLARE_STRUCT_FIELD(unused),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStruct {
float x;
double y;
void* unsupported_field;
TestOutputStruct side_outputs;
static auto ArollaStructFields() {
using CppType = TestStruct;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
AROLLA_SKIP_STRUCT_FIELD(unsupported_field),
AROLLA_DECLARE_STRUCT_FIELD(side_outputs),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStructWithOptional {
OptionalValue<float> x;
OptionalValue<double> y;
std::array<int, 6> skip_me;
OptionalValue<double> x_plus_y;
constexpr static auto ArollaStructFields() {
using CppType = TestStructWithOptional;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(x),
AROLLA_DECLARE_STRUCT_FIELD(y),
AROLLA_SKIP_STRUCT_FIELD(skip_me),
AROLLA_DECLARE_STRUCT_FIELD(x_plus_y),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
struct TestStructWithString {
std::string title;
UnsupportedType it_is_not_supported;
OptionalValue<::arolla::Bytes> name;
UnsupportedType not_supported_sorry;
std::string full_name;
static auto ArollaStructFields() {
using CppType = TestStructWithString;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(title),
AROLLA_SKIP_STRUCT_FIELD(it_is_not_supported),
AROLLA_DECLARE_STRUCT_FIELD(name),
AROLLA_SKIP_STRUCT_FIELD(not_supported_sorry),
AROLLA_DECLARE_STRUCT_FIELD(full_name),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
}
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_OUTPUT_STRUCT, TestOutputStruct);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_OUTPUT_STRUCT, TestOutputStruct);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT, TestStruct);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT, TestStruct);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT_WITH_OPTIONAL, TestStructWithOptional);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT_WITH_OPTIONAL, TestStructWithOptional);
AROLLA_DECLARE_SIMPLE_QTYPE(TEST_STRUCT_WITH_STRING, TestStructWithString);
AROLLA_DEFINE_SIMPLE_QTYPE(TEST_STRUCT_WITH_STRING, TestStructWithString);
namespace {
class FailingCompiledExpr : public InplaceCompiledExpr {
public:
using InplaceCompiledExpr::InplaceCompiledExpr;
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
return absl::InternalError("Fake:(");
}
};
TEST(CompileInplaceExprOnStruct, NoFieldNames) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<int32_t>(compiled_expr, "/final_output"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*registered field.*INT32.*")));
}
TEST(CompileInplaceExprOnStruct, NoFinalOutputName) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/final_output"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*input.*/final_output.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, InputTypeMismatch) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/x"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/x.*TEST_STRUCT.*expected.*FLOAT32.*found.*FLOAT64")));
}
TEST(CompileInplaceExprOnStruct, InputTypeUnknown) {
FailingCompiledExpr compiled_expr({}, GetQType<double>(), {});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/qq"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*input.*/qq.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, FinalOutputTypeMismatch) {
FailingCompiledExpr compiled_expr({{"/x", GetQType<double>()}},
GetQType<double>(), {});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/x.*TEST_STRUCT.*expected.*FLOAT32.*found.*FLOAT64")));
}
TEST(CompileInplaceExprOnStruct, SideOutputTypeMismatch) {
FailingCompiledExpr compiled_expr(
{{"/x", GetQType<float>()}}, GetQType<double>(),
{{"/side_outputs/x_times_y", GetQType<float>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/side_outputs/"
"x_times_y.*TEST_STRUCT.*expected.*FLOAT64.*found.*FLOAT32")));
}
TEST(CompileInplaceExprOnStruct, SideOutputUnknown) {
FailingCompiledExpr compiled_expr(
{{"/x", GetQType<float>()}}, GetQType<double>(),
{{"/side_outputs/x_power_y", GetQType<double>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(
absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/side_outputs/x_power_y.*not found.*TEST_STRUCT.*")));
}
TEST(CompileInplaceExprOnStruct, CompiledExprBindingFailure) {
FailingCompiledExpr compiled_expr({{"/x", GetQType<float>()}},
GetQType<double>(), {});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kInternal, "Fake:("));
}
TEST(CompileInplaceExprOnStruct, InputSideOutputCollision) {
FailingCompiledExpr compiled_expr({{"/y", GetQType<double>()}},
GetQType<double>(),
{{"/y", GetQType<double>()}});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/y.*input.*named output.*")));
}
TEST(CompileInplaceExprOnStruct, InputFinalOutputCollision) {
FailingCompiledExpr compiled_expr(
{{"/y", GetQType<double>()}}, GetQType<double>(),
{{"/side_outputs/x_plus_y", GetQType<double>()}});
EXPECT_THAT(CompileInplaceExprOnStruct<TestStruct>(compiled_expr, "/y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*/y.*input.*final output.*")));
}
TEST(CompileInplaceExprOnStruct, SideOutputFinalOutputCollision) {
FailingCompiledExpr compiled_expr(
{{"/y", GetQType<double>()}}, GetQType<double>(),
{{"/side_outputs/x_plus_y", GetQType<double>()}});
EXPECT_THAT(
CompileInplaceExprOnStruct<TestStruct>(compiled_expr,
"/side_outputs/x_plus_y"),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(
".*/side_outputs/x_plus_y.*final output.*named output.*")));
}
class TestBoundExpr final : public BoundExpr {
public:
TestBoundExpr(FrameLayout::Slot<float> x, FrameLayout::Slot<double> y,
FrameLayout::Slot<double> x_plus_y,
FrameLayout::Slot<double> x_times_y)
: BoundExpr(
{{"/x", TypedSlot::FromSlot(x)}, {"/y", TypedSlot::FromSlot(y)}},
TypedSlot::FromSlot(x_plus_y),
{{"/side_outputs/x_times_y", TypedSlot::FromSlot(x_times_y)}}),
x_(x),
y_(y),
x_plus_y_(x_plus_y),
x_times_y_(x_times_y) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
frame.Set(x_plus_y_, frame.Get(x_) + frame.Get(y_));
frame.Set(x_times_y_, frame.Get(x_) * frame.Get(y_));
}
private:
FrameLayout::Slot<float> x_;
FrameLayout::Slot<double> y_;
FrameLayout::Slot<double> x_plus_y_;
FrameLayout::Slot<double> x_times_y_;
};
class TestCompiledExpr : public InplaceCompiledExpr {
public:
TestCompiledExpr()
: InplaceCompiledExpr(
{{"/x", GetQType<float>()}, {"/y", GetQType<double>()}},
GetQType<double>(),
{{"/side_outputs/x_times_y", GetQType<double>()}}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& named_output_slots)
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExpr>(
slots.at("/x").ToSlot<float>().value(),
slots.at("/y").ToSlot<double>().value(),
output_slot.ToSlot<double>().value(),
named_output_slots.at("/side_outputs/x_times_y")
.ToSlot<double>()
.value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessXPlusY) {
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(std::function<absl::Status(TestStruct&)> eval_fn,
CompileInplaceExprOnStruct<TestStruct>(
compiled_expr, "/side_outputs/x_plus_y"));
TestStruct input{
.x = 5.f,
.y = 7.,
.side_outputs = {.x_plus_y = -1, .x_times_y = -1, .unused = -1}};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.side_outputs.x_plus_y, 12);
EXPECT_EQ(input.side_outputs.x_times_y, 35.);
EXPECT_EQ(input.x, 5);
EXPECT_EQ(input.y, 7);
EXPECT_EQ(input.side_outputs.unused, -1.);
}
class TestBoundExprWithOptionals final : public BoundExpr {
public:
TestBoundExprWithOptionals(FrameLayout::Slot<OptionalValue<float>> x,
FrameLayout::Slot<OptionalValue<double>> y,
FrameLayout::Slot<OptionalValue<double>> x_plus_y)
: BoundExpr(
{{"/x", TypedSlot::FromSlot(x)}, {"/y", TypedSlot::FromSlot(y)}},
TypedSlot::FromSlot(x_plus_y), {}),
x_(x),
y_(y),
x_plus_y_(x_plus_y) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
if (frame.Get(x_).present && frame.Get(y_).present) {
frame.Set(x_plus_y_, frame.Get(x_).value + frame.Get(y_).value);
} else {
frame.Set(x_plus_y_, std::nullopt);
}
}
private:
FrameLayout::Slot<OptionalValue<float>> x_;
FrameLayout::Slot<OptionalValue<double>> y_;
FrameLayout::Slot<OptionalValue<double>> x_plus_y_;
};
class TestCompiledExprWithOptionals : public InplaceCompiledExpr {
public:
TestCompiledExprWithOptionals()
: InplaceCompiledExpr({{"/x", GetQType<OptionalValue<float>>()},
{"/y", GetQType<OptionalValue<double>>()}},
GetQType<OptionalValue<double>>(), {}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExprWithOptionals>(
slots.at("/x").ToSlot<OptionalValue<float>>().value(),
slots.at("/y").ToSlot<OptionalValue<double>>().value(),
output_slot.ToSlot<OptionalValue<double>>().value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessXPlusYWithOptionals) {
TestCompiledExprWithOptionals compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::Status(TestStructWithOptional&)> eval_fn,
CompileInplaceExprOnStruct<TestStructWithOptional>(compiled_expr,
"/x_plus_y"));
TestStructWithOptional input{.x = 5.f, .y = 7., .x_plus_y = -1};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.x_plus_y, 12.);
EXPECT_EQ(input.x, 5.f);
EXPECT_EQ(input.y, 7.);
}
class TestBoundExprWithStrings final : public BoundExpr {
public:
TestBoundExprWithStrings(FrameLayout::Slot<arolla::Bytes> title,
FrameLayout::Slot<OptionalValue<arolla::Bytes>> name,
FrameLayout::Slot<arolla::Bytes> output)
: BoundExpr({{"/title", TypedSlot::FromSlot(title)},
{"/name", TypedSlot::FromSlot(name)}},
TypedSlot::FromSlot(output), {}),
title_(title),
name_(name),
output_(output) {}
void InitializeLiterals(EvaluationContext*, FramePtr) const final {}
void Execute(EvaluationContext*, FramePtr frame) const final {
if (!frame.Get(name_).present) {
frame.Set(output_, "UNKNOWN");
return;
}
frame.Set(output_,
absl::StrCat(frame.Get(title_), " ", frame.Get(name_).value));
}
private:
FrameLayout::Slot<arolla::Bytes> title_;
FrameLayout::Slot<OptionalValue<arolla::Bytes>> name_;
FrameLayout::Slot<arolla::Bytes> output_;
};
class TestCompiledExprWithStrings : public InplaceCompiledExpr {
public:
TestCompiledExprWithStrings()
: InplaceCompiledExpr(
{{"/title", GetQType<arolla::Bytes>()},
{"/name", GetQType<OptionalValue<arolla::Bytes>>()}},
GetQType<arolla::Bytes>(), {}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& )
const final {
RETURN_IF_ERROR(VerifySlotTypes(input_types(), slots));
return std::make_unique<TestBoundExprWithStrings>(
slots.at("/title").ToSlot<arolla::Bytes>().value(),
slots.at("/name").ToSlot<OptionalValue<arolla::Bytes>>().value(),
output_slot.ToSlot<arolla::Bytes>().value());
}
};
TEST(CompileInplaceExprOnStructTest, SuccessStringsIO) {
TestCompiledExprWithStrings compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::Status(TestStructWithString&)> eval_fn,
CompileInplaceExprOnStruct<TestStructWithString>(compiled_expr,
"/full_name"));
TestStructWithString input{
.title = "Mr.", .name = arolla::Bytes("Abc"), .full_name = "????"};
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.full_name, "Mr. Abc");
input.name = std::nullopt;
ASSERT_OK(eval_fn(input));
EXPECT_EQ(input.full_name, "UNKNOWN");
}
TEST(CompileDynamicExprOnStructInputTest, TypeError) {
ASSERT_OK(InitArolla());
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("annotation.qtype",
{expr::Leaf("/x"), expr::Literal(GetQType<int>())}));
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr)
.status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*inconsistent.*qtype.*INT32.*")));
}
TEST(CompileDynamicExprOnStructInputTest, UnknownLeaf) {
ASSERT_OK(InitArolla());
expr::ExprNodePtr expr = expr::Leaf("/unknown");
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr)
.status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unknown inputs: /unknown")));
}
TEST(CompileDynamicExprOnStructInputTest, TypeErrorOnCodegenModel) {
ASSERT_OK(InitArolla());
TestCompiledExprWithOptionals compiled_expr;
EXPECT_THAT((ExprCompiler<TestStruct, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(compiled_expr)
.status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
MatchesRegex(".*slot types mismatch.*")));
}
TEST(CompileDynamicExprOnStructInputTest, Nested) {
ASSERT_OK(InitArolla());
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("math.add",
{expr::Leaf("/x"), expr::Leaf("/side_outputs/x_plus_y")}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&)> eval_fn,
(ExprCompiler<TestStruct, double>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(expr));
TestStruct input{
.x = 5.f,
.y = -1.,
.side_outputs = {.x_plus_y = 7., .x_times_y = -1, .unused = -1}};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessXPlusYWithOptionals) {
ASSERT_OK(InitArolla());
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("math.add", {expr::Leaf("/x"), expr::Leaf("/y")}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<std::optional<double>>(
const TestStructWithOptional&)>
eval_fn,
(ExprCompiler<TestStructWithOptional, std::optional<double>>())
.SetInputLoader(CreateStructInputLoader<TestStructWithOptional>())
.Compile(expr));
TestStructWithOptional input{.x = 5.f, .y = 7., .x_plus_y = -1};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
input.x = std::nullopt;
EXPECT_THAT(eval_fn(input), IsOkAndHolds(std::nullopt));
}
TEST(CompileDynamicExprOnStructInputTest, ErrorStatus) {
ASSERT_OK(InitArolla());
absl::StatusOr<expr::ExprNodePtr> status_or_expr =
absl::InternalError("input error");
auto result =
ExprCompiler<TestStructWithOptional, std::optional<double>>()
.SetInputLoader(CreateStructInputLoader<TestStructWithOptional>())
.Compile(status_or_expr);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInternal,
MatchesRegex("input error")));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessXPlusYOnCodegenModel) {
ASSERT_OK(InitArolla());
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&)> eval_fn,
(ExprCompiler<TestStruct, double>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.Compile(compiled_expr));
TestStruct input{.x = 5.f, .y = 7.};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(12.));
}
TEST(CompileDynamicExprOnStructInputTest, SuccessSideOutputOnCodegenModel) {
ASSERT_OK(InitArolla());
TestCompiledExpr compiled_expr;
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<double>(const TestStruct&, TestStruct*)>
eval_fn,
(ExprCompiler<TestStruct, double, TestStruct>())
.SetInputLoader(CreateStructInputLoader<TestStruct>())
.SetSlotListener(CreateStructSlotListener<TestStruct>())
.Compile(compiled_expr));
TestStruct input{.x = 5.f, .y = 7.};
EXPECT_THAT(eval_fn(input, nullptr), IsOkAndHolds(12.));
EXPECT_THAT(eval_fn(input, &input), IsOkAndHolds(12.));
EXPECT_EQ(input.side_outputs.x_times_y, 35);
}
TEST(CompileDynamicExprOnStructWithBytesInputTest, SuccessUpper) {
ASSERT_OK(InitArolla());
ASSERT_OK_AND_ASSIGN(expr::ExprNodePtr title,
expr::CallOp("strings.decode", {expr::Leaf("/title")}));
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr name,
expr::CallOp("strings.upper",
{expr::CallOp("strings.decode", {expr::Leaf("/name")})}));
ASSERT_OK_AND_ASSIGN(
expr::ExprNodePtr expr,
expr::CallOp("strings.join", {title, expr::Literal(Text(" ")), name}));
ASSERT_OK_AND_ASSIGN(expr,
expr::CallOp("core.get_optional_value",
{expr::CallOp("strings.encode", {expr})}));
ASSERT_OK_AND_ASSIGN(
std::function<absl::StatusOr<arolla::Bytes>(const TestStructWithString&)>
eval_fn,
(ExprCompiler<TestStructWithString, arolla::Bytes>())
.SetInputLoader(CreateStructInputLoader<TestStructWithString>())
.Compile(expr));
TestStructWithString input{.title = "Mr.", .name = Bytes("abc")};
EXPECT_THAT(eval_fn(input), IsOkAndHolds(Bytes("Mr. ABC")));
input.name = std::nullopt;
EXPECT_THAT(eval_fn(input), StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("expects present value")));
}
}
} |
152 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_TYPE_INFERENCE_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_TYPE_INFERENCE_H_
#include "tensorflow/core/common_runtime/optimization_registry.h"
namespace tensorflow {
class TypeInferencePass : public GraphOptimizationPass {
public:
Status Run(const GraphOptimizationPassOptions& options) override;
};
class WeakTypeInferencePass : public GraphOptimizationPass {
public:
Status Run(const GraphOptimizationPassOptions& options) override;
};
}
#endif
#include "tensorflow/core/common_runtime/type_inference.h"
#include <functional>
#include <list>
#include <queue>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/full_type_util.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
int MAX_VISITS_PER_NODE = 3;
typedef absl::flat_hash_map<int, std::reference_wrapper<TypeInferenceFn const>>
ForwardInferMap;
typedef absl::flat_hash_map<
int, std::pair<int, std::reference_wrapper<TypeInferenceFn const>>>
ReverseInferMap;
bool all_sources_closed(const Node& n, const absl::flat_hash_set<int>& closed,
const ForwardInferMap& forward,
const ReverseInferMap& reverse) {
for (const auto& e : n.out_edges()) {
if (e->IsControlEdge()) {
continue;
}
int dst_id = e->dst()->id();
if (reverse.contains(dst_id) && !closed.contains(dst_id)) {
return false;
}
}
if (forward.contains(n.id())) {
for (const auto& e : n.in_edges()) {
if (e->IsControlEdge()) {
continue;
}
if (!closed.contains(e->src()->id())) {
return false;
}
}
}
return true;
}
std::vector<std::reference_wrapper<const FullTypeDef>> input_types(
const Node& n) {
static FullTypeDef* no_type = new FullTypeDef();
std::vector<std::reference_wrapper<const FullTypeDef>> input_types;
for (const auto& in_edge : n.in_edges()) {
if (in_edge->IsControlEdge()) {
continue;
}
input_types.push_back(*no_type);
}
for (const auto& in_edge : n.in_edges()) {
if (in_edge->IsControlEdge()) {
continue;
}
VLOG(5) << " in edge: " << in_edge->DebugString();
NodeDef* ndef = in_edge->src()->mutable_def();
if (ndef->has_experimental_type()) {
const auto& t = ndef->experimental_type();
if (t.type_id() != TFT_UNSET) {
DCHECK(t.type_id() == TFT_PRODUCT) << ndef->DebugString();
DCHECK(t.args_size() > in_edge->src_output()) << ndef->DebugString();
input_types.at(in_edge->dst_input()) = t.args(in_edge->src_output());
}
}
}
return input_types;
}
Status update_inferred_type(Node* target, const FullTypeDef& t, bool& updated) {
if (t.type_id() == TFT_UNSET) {
VLOG(3) << " " << target->name() << " no inferred type";
return absl::OkStatus();
}
if (target->def().has_experimental_type()) {
const auto existing = target->def().experimental_type();
if (full_type::IsSubtype(existing, t)) {
VLOG(3) << " " << target->name() << " no new type info";
return absl::OkStatus();
} else if (!full_type::IsSubtype(t, existing)) {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("type mismatch for node '", target->name(),
"': expected a subtype of:\n", existing.DebugString(),
"\n got:\n", t.DebugString(), "\n "));
}
}
*(target->mutable_def()->mutable_experimental_type()) = t;
updated = true;
VLOG(3) << " " << target->name() << " updated";
return absl::OkStatus();
}
absl::StatusOr<FullTypeDef> run_inference(const string& fn_name,
const TypeRefVector& in_types) {
return absl::OkStatus();
}
}
Status TypeInferencePass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "TypeInferencePass::Run";
DCHECK(options.graph != nullptr);
Graph* g = options.graph->get();
DCHECK(g != nullptr);
FunctionLibraryDefinition* flib_def = options.flib_def;
DCHECK(flib_def != nullptr);
if (VLOG_IS_ON(1)) {
DumpGraphToFile("forward_type_inference_before", *g, flib_def);
}
for (Node* n : g->nodes()) {
n->UpdateProperties();
}
ForwardInferMap forward;
ReverseInferMap reverse;
for (Node* n : g->nodes()) {
VLOG(4) << "\n node: " << n->def().DebugString()
<< "\n op def: " << n->op_def().DebugString();
const OpRegistrationData* reg;
TF_RETURN_IF_ERROR(flib_def->LookUp(n->op_def().name(), ®));
if (reg->fwd_type_fn != nullptr) {
forward.emplace(n->id(), reg->fwd_type_fn);
}
if (reg->rev_type_fn != nullptr) {
reverse.emplace(n->id(), std::make_pair(reg->rev_type_input,
std::cref(reg->rev_type_fn)));
}
}
auto infer_forward = [&forward](Node* n, bool& updated) {
if (!forward.contains(n->id())) {
return absl::OkStatus();
}
VLOG(4) << " " << n->name() << " has forward function";
auto in_types = input_types(*n);
const auto& infer_ret = forward.at(n->id())(in_types, run_inference);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
infer_ret.status(),
absl::StrCat("while inferring type of node '", n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
update_inferred_type(n, *infer_ret, updated),
"while updating its output type.");
return absl::OkStatus();
};
auto infer_reverse = [&reverse](Node* n, bool& updated) {
if (!reverse.contains(n->id())) {
return absl::OkStatus();
}
VLOG(4) << " " << n->name() << " has reverse function";
auto in_types = input_types(*n);
auto rev_idx_and_fn = reverse.at(n->id());
const auto& infer_ret = rev_idx_and_fn.second(in_types, run_inference);
const Edge* e;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
n->input_edge(rev_idx_and_fn.first, &e),
absl::StrCat("while querying input ", rev_idx_and_fn.first, " of '",
n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
infer_ret.status(),
absl::StrCat("while inferring type of node '", e->src()->name(),
"' via '", n->name(), "'"));
TF_RETURN_WITH_CONTEXT_IF_ERROR(
update_inferred_type(e->src(), *infer_ret, updated),
absl::StrCat("while updating its output type inferred from '",
n->name(), ","));
return absl::OkStatus();
};
std::list<int> queue;
absl::flat_hash_set<int> in_queue;
absl::flat_hash_map<int, int> visit_count;
absl::flat_hash_set<int> open;
absl::flat_hash_set<int> closed;
int max_passes = g->num_nodes();
int visits = 0;
for (Node* n : g->nodes()) {
const int nid = n->id();
bool niladic = true;
for (const auto& e : n->in_edges()) {
if (!e->IsControlEdge()) {
niladic = false;
break;
}
}
if (niladic) {
queue.emplace_back(nid);
in_queue.emplace(nid);
}
open.emplace(nid);
visit_count.emplace(nid, 0);
}
for (int i = 0; i < max_passes; i++) {
VLOG(2) << "Iteration " << i << ", " << queue.size() << " nodes in queue";
while (!queue.empty()) {
int nid = queue.front();
Node* n = g->FindNodeId(nid);
VLOG(3) << " visiting " << n->name();
visits++;
visit_count[nid]++;
DCHECK(!closed.contains(nid));
bool updated = false;
TF_RETURN_IF_ERROR(infer_forward(n, updated));
TF_RETURN_IF_ERROR(infer_reverse(n, updated));
VLOG(4) << " done " << n->def().DebugString();
queue.pop_front();
in_queue.erase(nid);
open.erase(nid);
if (visit_count.at(nid) >= MAX_VISITS_PER_NODE) {
VLOG(3) << " closing " << n->name() << " - visit limit reached";
closed.emplace(nid);
} else if (all_sources_closed(*n, closed, forward, reverse)) {
VLOG(3) << " closing " << n->name() << " - all sources closed";
closed.emplace(nid);
}
for (const auto& out_edge : n->out_edges()) {
if (out_edge->IsControlEdge()) {
continue;
}
Node* c = out_edge->dst();
int cid = c->id();
if (closed.contains(cid) || in_queue.contains(cid)) {
continue;
}
if (updated || all_sources_closed(*c, closed, forward, reverse)) {
queue.emplace_back(cid);
in_queue.emplace(cid);
}
}
if (updated && reverse.contains(nid)) {
const Edge* e;
TF_RETURN_IF_ERROR(n->input_edge(reverse.at(nid).first, &e));
Node* c = e->src();
int cid = c->id();
if (!closed.contains(cid) && !in_queue.contains(cid)) {
queue.emplace_back(cid);
in_queue.emplace(cid);
}
}
}
VLOG(2) << "Done iteration " << i << ", " << closed.size()
<< " nodes closed";
if (open.empty()) {
VLOG(1) << "Finished after " << i + 1 << " iterations; done "
<< closed.size() << " of " << g->num_nodes() << " nodes in "
<< visits << " visits";
break;
} else {
queue.emplace_back(*(open.begin()));
}
}
if (VLOG_IS_ON(1)) {
DumpGraphToFile("forward_type_inference_after", *g, flib_def);
}
return absl::OkStatus();
}
Status WeakTypeInferencePass::Run(
const GraphOptimizationPassOptions& options) {
TypeInferencePass pass;
const auto& pass_status = pass.Run(options);
if (!pass_status.ok()) {
LOG_FIRST_N(WARNING, 1)
<< "Type inference failed. This indicates an "
"invalid graph that escaped type checking. Error message: "
<< pass_status.ToString();
}
return absl::OkStatus();
}
REGISTER_OPTIMIZATION(OptimizationPassRegistry::PRE_PLACEMENT, 99999,
WeakTypeInferencePass);
} | #include "tensorflow/core/common_runtime/type_inference.h"
#include <functional>
#include <string>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
TypeInferencePass pass;
return pass.Run(opt_options);
}
TEST(TypeInferenceTest, BasicStraightline) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Attr("T", DT_VARIANT)
.Finalize(root.graph(), &id));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if ((node->name() == "ds") || ((node->name() == "id"))) {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_DATASET) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, CyclicGraphWithV1ControlFlow) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* enter;
TF_ASSERT_OK(NodeBuilder("enter", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &enter));
Node* loop_cond;
TF_ASSERT_OK(NodeBuilder("loop_cond", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(cond.node())})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &loop_cond));
Node* merge;
TF_ASSERT_OK(
NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(enter), NodeBuilder::NodeOut(enter)})
.Finalize(root.graph(), &merge));
Node* sw;
TF_ASSERT_OK(NodeBuilder("sw", "Switch", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(merge)})
.Input({NodeBuilder::NodeOut(loop_cond)})
.Finalize(root.graph(), &sw));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &id));
Node* next;
TF_ASSERT_OK(NodeBuilder("next", "NextIteration", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(id)})
.Finalize(root.graph(), &next));
TF_ASSERT_OK(root.graph()->UpdateEdge(next, 0, merge, 1));
Node* exit;
TF_ASSERT_OK(NodeBuilder("exit", "Exit", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &exit));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if ((node->name() == "ds") || (node->name() == "id") ||
(node->name() == "enter") || (node->name() == "exit") ||
(node->name() == "sw") || (node->name() == "merge") ||
(node->name() == "next")) {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_DATASET) << node->def().DebugString();
}
}
}
REGISTER_OP("TestSourceOp").Output("o: variant");
REGISTER_OP("TestTensorUnaryOp")
.Input("i: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_TENSOR);
return t;
});
REGISTER_OP("TestArrayUnaryOp")
.Input("i: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
t.add_args()->set_type_id(TFT_ARRAY);
return t;
});
REGISTER_OP("TestMergeOp")
.Input("i1: variant")
.Input("i2: variant")
.Output("o: variant")
.SetForwardTypeFn([](const TypeRefVector& input_types,
const FunctionTypeInferrer& call_infer) {
EXPECT_EQ(input_types.size(), 2);
FullTypeDef t;
t.set_type_id(TFT_PRODUCT);
if ((input_types[0].get().type_id() == TFT_TENSOR) &&
(input_types[1].get().type_id() == TFT_ARRAY)) {
t.add_args()->set_type_id(TFT_ARRAY);
} else {
t.add_args()->set_type_id(TFT_ANY);
}
return t;
});
TEST(TypeInferenceTest, TernaryNodeWithIgnoredInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* id;
TF_ASSERT_OK(NodeBuilder("id", "Identity", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &id));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(id)})
.Finalize(root.graph(), &an));
Node* m;
TF_ASSERT_OK(NodeBuilder("m", "TestMergeOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(tn)})
.Input({NodeBuilder::NodeOut(an)})
.Finalize(root.graph(), &m));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ARRAY) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, BinaryNodeWithUnorderedInputs) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* m;
TF_ASSERT_OK(NodeBuilder("m", "TestMergeOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &m));
TF_ASSERT_OK(root.ToGraph(graph.get()));
Node* m_copy = nullptr;
Node* tn_copy = nullptr;
Node* an_copy = nullptr;
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
m_copy = node;
} else if (node->name() == "tn") {
tn_copy = node;
} else if (node->name() == "an") {
an_copy = node;
}
}
TF_ASSERT_OK(graph->UpdateEdge(an_copy, 0, m_copy, 1));
TF_ASSERT_OK(graph->UpdateEdge(tn_copy, 0, m_copy, 0));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "m") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ARRAY) << node->def().DebugString();
}
}
}
TEST(TypeInferenceTest, BinaryNodeWithCycleInput) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* enter;
TF_ASSERT_OK(NodeBuilder("enter", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(an)})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &enter));
Node* loop_cond;
TF_ASSERT_OK(NodeBuilder("loop_cond", "Enter", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(cond.node())})
.Attr("frame_name", "loop")
.Finalize(root.graph(), &loop_cond));
Node* merge;
TF_ASSERT_OK(
NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(enter), NodeBuilder::NodeOut(enter)})
.Finalize(root.graph(), &merge));
Node* sw;
TF_ASSERT_OK(NodeBuilder("sw", "Switch", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(merge)})
.Input({NodeBuilder::NodeOut(loop_cond)})
.Finalize(root.graph(), &sw));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &tn));
Node* next;
TF_ASSERT_OK(NodeBuilder("next", "NextIteration", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(tn)})
.Finalize(root.graph(), &next));
TF_ASSERT_OK(root.graph()->UpdateEdge(next, 0, merge, 1));
Node* exit;
TF_ASSERT_OK(NodeBuilder("exit", "Exit", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(sw)})
.Finalize(root.graph(), &exit));
TF_ASSERT_OK(root.ToGraph(graph.get()));
const auto& status = Rewrite(&graph);
ASSERT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("expected compatible input types"));
}
TEST(WeakTypeInferenceTest, AlwaysSucceeds) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* an;
TF_ASSERT_OK(NodeBuilder("an", "TestArrayUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &an));
Node* tn;
TF_ASSERT_OK(NodeBuilder("tn", "TestTensorUnaryOp", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Finalize(root.graph(), &tn));
Node* merge;
TF_ASSERT_OK(NodeBuilder("merge", "Merge", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(an), NodeBuilder::NodeOut(tn)})
.Finalize(root.graph(), &merge));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(graph->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options;
opt_options.session_options = &session_options;
opt_options.graph = &graph;
opt_options.flib_def = &flib_def;
WeakTypeInferencePass pass;
TF_ASSERT_OK(pass.Run(opt_options));
}
TEST(ReverseTypeInferenceTest, BasicVDependency) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto start = ops::Placeholder(root.WithOpName("start"), DT_INT64);
auto stop = ops::Placeholder(root.WithOpName("stop"), DT_INT64);
auto step = ops::Placeholder(root.WithOpName("step"), DT_INT64);
Node* ds;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(NodeBuilder("ds", "RangeDataset", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(start.node())})
.Input({NodeBuilder::NodeOut(stop.node())})
.Input({NodeBuilder::NodeOut(step.node())})
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &ds));
Node* it;
TF_ASSERT_OK(
NodeBuilder("it", "AnonymousIteratorV2", &root.graph()->flib_def())
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &it));
Node* it_ctor;
TF_ASSERT_OK(NodeBuilder("it_ctor", "MakeIterator", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(ds)})
.Input({NodeBuilder::NodeOut(it)})
.Finalize(root.graph(), &it_ctor));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "it") {
const auto& t = node->def().experimental_type();
ASSERT_EQ(t.type_id(), TFT_PRODUCT) << node->def().DebugString();
EXPECT_EQ(t.args(0).type_id(), TFT_ITERATOR) << node->def().DebugString();
}
}
}
TEST(ReverseTypeInferenceTest, FromUnsetType) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
Node* s;
TF_ASSERT_OK(NodeBuilder("s", "TestSourceOp", &root.graph()->flib_def())
.Finalize(root.graph(), &s));
Node* it;
TensorShapeProto shape;
shape.mutable_dim();
shape.set_unknown_rank(false);
TF_ASSERT_OK(
NodeBuilder("it", "AnonymousIteratorV2", &root.graph()->flib_def())
.Attr("output_types", {DT_INT32})
.Attr("output_shapes", {shape})
.Finalize(root.graph(), &it));
Node* it_ctor;
TF_ASSERT_OK(NodeBuilder("it_ctor", "MakeIterator", &root.graph()->flib_def())
.Input({NodeBuilder::NodeOut(s)})
.Input({NodeBuilder::NodeOut(it)})
.Finalize(root.graph(), &it_ctor));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
for (const auto& node : graph->nodes()) {
if (node->name() == "it") {
ASSERT_FALSE(node->def().has_experimental_type());
}
}
}
}
} |
153 | #ifndef TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_MGR_H_
#define TENSORFLOW_CORE_DISTRIBUTED_RUNTIME_EAGER_REMOTE_MGR_H_
#include <unordered_map>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace eager {
class RemoteMgr {
public:
RemoteMgr(bool is_master, EagerContext* ctx)
: is_master_(is_master), parent_(ctx) {}
~RemoteMgr() {
for (const auto& entry : remote_tensor_handle_map_) {
entry.second->Unref();
}
}
bool IsMaster() { return is_master_; }
void AddOperationOutputs(
const absl::Span<tensorflow::TensorHandle* const> handles,
int64_t operation_id);
void AddOperationOutput(tensorflow::TensorHandle* handles,
int64_t operation_id, int32_t output_num);
Status GetTensorHandle(const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle);
Status DeleteTensorHandle(const RemoteTensorHandleInternal& remote_handle);
uint64 NextOpId() {
DCHECK(is_master_);
mutex_lock l(next_id_mutex_);
return next_op_id_++;
}
Status SerializeRemoteTensorHandle(
TensorHandle* in, const bool wait_until_ready, RemoteTensorHandle* out,
Device* device, absl::string_view device_name = "",
const bool serialize_resource_dtype_and_shape = false);
Status DeserializeRemoteTensorHandle(const RemoteTensorHandle& in,
TensorHandle** out);
EagerExecutor& GetOrCreateExecutorForStream(uint64 stream_id);
void DeleteExecutorForStream(uint64 stream_id);
protected:
mutex next_id_mutex_;
uint64 next_op_id_ TF_GUARDED_BY(next_id_mutex_) = 1;
private:
Status GetRemoteTensorHandle(const tensorflow::TensorHandle* handle,
const bool wait_until_ready, int64_t* op_id,
int32* output_num)
TF_SHARED_LOCKS_REQUIRED(remote_tensor_handle_mu_);
Status GetTensorHandleImpl(const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle)
TF_SHARED_LOCKS_REQUIRED(remote_tensor_handle_mu_);
Status GetMirroredResourceShape(
const RemoteTensorHandleInternal& remote_handle,
std::vector<DtypeAndPartialTensorShape>* handle);
bool is_master_;
using RemoteTensorHandleMap =
gtl::FlatMap<RemoteTensorHandleInternal, tensorflow::TensorHandle*,
RemoteTensorHandleInternalHash,
RemoteTensorHandleInternalEquals>;
using MirroredResourceShapeMap = gtl::FlatMap<
RemoteTensorHandleInternal, std::vector<DtypeAndPartialTensorShape>,
RemoteTensorHandleInternalHash, RemoteTensorHandleInternalEquals>;
mutex remote_tensor_handle_mu_;
RemoteTensorHandleMap remote_tensor_handle_map_
TF_GUARDED_BY(remote_tensor_handle_mu_);
mutex mirrored_resource_shape_mu_;
MirroredResourceShapeMap mirrored_resource_shape_map_
TF_GUARDED_BY(mirrored_resource_shape_mu_);
EagerContext* parent_;
mutex executor_map_mu_;
std::unordered_map<uint64, EagerExecutor> executor_map_
TF_GUARDED_BY(executor_map_mu_);
};
}
}
#endif
#include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
Status WithErrorSourcePayload(Status error) {
core::platform::ErrorSourceProto error_source_proto;
error_source_proto.set_error_source(
core::platform::ErrorSourceProto::EAGER_REMOTE_MGR);
error.SetPayload(tensorflow::kErrorSource,
absl::Cord(error_source_proto.SerializeAsString()));
return error;
}
}
namespace eager {
void RemoteMgr::AddOperationOutputs(
const absl::Span<tensorflow::TensorHandle* const> handles,
int64_t operation_id) {
mutex_lock l(remote_tensor_handle_mu_);
for (int i = 0, end = handles.size(); i < end; i++) {
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, i), handles[i]);
}
}
void RemoteMgr::AddOperationOutput(tensorflow::TensorHandle* handle,
int64_t operation_id, int32_t output_num) {
mutex_lock l(remote_tensor_handle_mu_);
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, output_num), handle);
}
Status RemoteMgr::GetTensorHandleImpl(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter == remote_tensor_handle_map_.end()) {
std::string error_message = absl::StrCat(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup.");
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
if (result) {
std::string error_message_ext;
absl::StrAppend(
&error_message_ext, error_message,
"Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem.");
return WithErrorSourcePayload(
absl::InvalidArgumentError(error_message_ext));
}
return WithErrorSourcePayload(absl::InvalidArgumentError(error_message));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetTensorHandle(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
tf_shared_lock l(remote_tensor_handle_mu_);
return GetTensorHandleImpl(remote_handle, handle);
}
Status RemoteMgr::GetMirroredResourceShape(
const RemoteTensorHandleInternal& remote_handle,
std::vector<DtypeAndPartialTensorShape>* handle) {
tf_shared_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter == mirrored_resource_shape_map_.end()) {
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup. Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem."));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetRemoteTensorHandle(const tensorflow::TensorHandle* handle,
const bool wait_until_ready,
int64_t* op_id, int32* output_num) {
TF_RETURN_IF_ERROR(handle->RemoteAddress(handle->device(), wait_until_ready,
op_id, output_num));
tensorflow::TensorHandle* h;
TF_RETURN_IF_ERROR(
GetTensorHandleImpl(RemoteTensorHandleInternal(*op_id, *output_num), &h));
if (handle != h) {
return WithErrorSourcePayload(errors::Internal(
"Found two different tensor handles with the same op_id:", *op_id,
" and output_num:", *output_num));
}
return absl::OkStatus();
}
Status RemoteMgr::DeleteTensorHandle(
const RemoteTensorHandleInternal& remote_handle) {
{
mutex_lock l(remote_tensor_handle_mu_);
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter != remote_tensor_handle_map_.end()) {
iter->second->Unref();
remote_tensor_handle_map_.erase(iter);
return absl::OkStatus();
}
}
{
mutex_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter != mirrored_resource_shape_map_.end()) {
mirrored_resource_shape_map_.erase(iter);
return absl::OkStatus();
}
}
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num));
}
Status RemoteMgr::SerializeRemoteTensorHandle(
TensorHandle* in, const bool wait_until_ready, RemoteTensorHandle* out,
Device* device, absl::string_view device_name,
const bool serialize_resource_dtype_and_shape) {
int64_t op_id;
int32_t output_num;
auto status =
in->RemoteAddress(device, wait_until_ready, &op_id, &output_num);
if (!status.ok()) {
LOG(ERROR)
<< "Failed to get remote address for tensor handle with given device "
<< device->name() << " error " << status.message();
tf_shared_lock l(remote_tensor_handle_mu_);
TF_RETURN_IF_ERROR(
GetRemoteTensorHandle(in, wait_until_ready, &op_id, &output_num));
}
out->Clear();
out->set_op_id(op_id);
out->set_output_num(output_num);
out->set_op_device(in->op_device() ? in->op_device()->name() : "");
out->set_device(device_name.empty()
? std::string(in->DeviceOrHostCPU(*parent_)->name())
: std::string(device_name));
out->set_dtype(in->dtype);
if (serialize_resource_dtype_and_shape) {
std::vector<DtypeAndPartialTensorShape> resource_dtypes_and_shapes;
TF_RETURN_IF_ERROR(
in->GetResourceHandleDtypesAndShapes(&resource_dtypes_and_shapes));
for (const auto& dtype_and_shape : resource_dtypes_and_shapes) {
ResourceDtypeAndShape* dtype_and_shape_proto =
out->add_resource_dtypes_and_shapes();
dtype_and_shape_proto->set_dtype(dtype_and_shape.dtype);
dtype_and_shape.shape.AsProto(dtype_and_shape_proto->mutable_shape());
}
}
return absl::OkStatus();
}
Status RemoteMgr::DeserializeRemoteTensorHandle(const RemoteTensorHandle& in,
TensorHandle** out) {
Device* device;
if (parent_->local_device_mgr()->LookupDevice(in.op_device(), &device).ok() ||
parent_->local_device_mgr()->LookupDevice(in.device(), &device).ok()) {
TF_RETURN_IF_ERROR(GetTensorHandle(RemoteTensorHandleInternal(in), out));
(*out)->Ref();
} else {
const string& device_name =
in.op_device().empty() ? in.device() : in.op_device();
TF_RETURN_IF_ERROR(
parent_->FindDeviceFromName(device_name.c_str(), &device));
*out = TensorHandle::CreateLazyRemoteHandle(in.op_id(), in.output_num(),
in.dtype(), device,
true, parent_);
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
if (!GetMirroredResourceShape(RemoteTensorHandleInternal(in),
&dtypes_and_shapes)
.ok()) {
for (const auto& dtype_and_shape_proto :
in.resource_dtypes_and_shapes()) {
dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{
dtype_and_shape_proto.dtype(),
TensorShape(dtype_and_shape_proto.shape())});
}
mutex_lock l(mirrored_resource_shape_mu_);
mirrored_resource_shape_map_.emplace(
RemoteTensorHandleInternal(in.op_id(), in.output_num()),
dtypes_and_shapes);
}
(*out)->SetResourceHandleDtypeAndShape(std::move(dtypes_and_shapes));
}
return absl::OkStatus();
}
EagerExecutor& RemoteMgr::GetOrCreateExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
auto it_and_bool = executor_map_.emplace(
std::piecewise_construct, std::forward_as_tuple(stream_id),
std::forward_as_tuple(true));
DCHECK(it_and_bool.second);
it = it_and_bool.first;
}
return it->second;
}
void RemoteMgr::DeleteExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
return;
}
Status s = it->second.ShutDown();
if (!s.ok()) {
LOG(ERROR) << "EagerExecutor shutdown with error " << s.message();
}
executor_map_.erase(it);
}
}
} | #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
namespace tensorflow {
namespace eager {
namespace {
class TestRemoteMgr : public RemoteMgr {
public:
TestRemoteMgr(bool is_master, EagerContext* ctx)
: RemoteMgr(is_master, ctx) {}
uint64 OpId() {
tf_shared_lock l(next_id_mutex_);
return next_op_id_;
}
};
class RemoteMgrTest : public ::testing::Test {
public:
RemoteMgrTest() {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
local_device_ = devices.back().get();
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:worker/replica:0/task:0"));
remote_device_ = devices.back().get();
auto device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
auto rendezvous = tsl::core::RefCountPtr<tensorflow::Rendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
ctx_ = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr.release(), true, std::move(rendezvous),
nullptr, nullptr, true);
}
~RemoteMgrTest() override { ctx_->Unref(); }
Device* local_device_;
Device* remote_device_;
EagerContext* ctx_;
};
TEST_F(RemoteMgrTest, SerializeLocalTensorHandleWithRemoteMirror) {
RemoteMgr remote_mgr(false, ctx_);
const TensorShape shape({0});
Tensor t(DT_FLOAT, shape);
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
TF_ASSERT_OK(
handle->SetRemoteShape(shape, remote_device_, ctx_->GetContextViewId()));
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_,
remote_device_->name()));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, SerializeRemoteTensorHandle) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, InvalidateRemoteMirrorWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
Tensor t(DT_FLOAT, TensorShape({0}));
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
EXPECT_TRUE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
ctx_->IncrementContextViewId();
EXPECT_FALSE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
EXPECT_FALSE(handle
->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId())
.ok());
handle->Unref();
}
TEST_F(RemoteMgrTest, SetRemoteShapeWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
ctx_->IncrementContextViewId();
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
}
TEST_F(RemoteMgrTest, ErrorSourcesShouldExist) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
remote_mgr.AddOperationOutput(handle, op_id, output_num);
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
auto remote_handle_internal = RemoteTensorHandleInternal(remote_handle);
TF_ASSERT_OK(remote_mgr.DeleteTensorHandle(remote_handle_internal));
Status s = remote_mgr.DeleteTensorHandle(remote_handle_internal);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
TensorHandle* out;
s = remote_mgr.GetTensorHandle(remote_handle_internal, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
s = remote_mgr.DeserializeRemoteTensorHandle(remote_handle, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
}
}
}
} |
154 | #ifndef TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_POD_STATS_H_
#define TENSORFLOW_CORE_PROFILER_CONVERT_OP_STATS_TO_POD_STATS_H_
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/pod_stats.pb.h"
namespace tensorflow {
namespace profiler {
PodStatsDatabase ConvertOpStatsToPodStats(const OpStats& op_stats);
}
}
#endif
#include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
PodStatsRecord CreatePodStatsRecord(absl::string_view host_name,
const StepInfoResult& step_info) {
PodStatsRecord record;
GenericStepBreakdown generic;
bool success = step_info.step_breakdown().UnpackTo(&generic);
DCHECK(success);
record.set_host_name(string(host_name));
record.set_step_num(step_info.step_num());
record.set_total_duration_us(
tsl::profiler::PicoToMicro(step_info.duration_ps()));
auto& step_breakdown_map = *record.mutable_step_breakdown_us();
std::vector<std::pair<uint64, absl::string_view>> metrics;
auto add_event = [&](GenericEventType type,
std::initializer_list<EventType> event_list) {
uint64 ps = 0;
for (const auto& event_type : event_list) {
ps += gtl::FindWithDefault(generic.type_ps(), event_type, 0);
}
step_breakdown_map[type] = tsl::profiler::PicoToMicro(ps);
metrics.emplace_back(ps, GetGenericEventTypeStr(type));
};
add_event(kDeviceCompute, {DEVICE_COMPUTE_32, DEVICE_COMPUTE_16});
add_event(kDeviceToDevice, {DEVICE_TO_DEVICE, DEVICE_WAIT_DEVICE});
add_event(kDeviceCollectives, {DEVICE_COLLECTIVES});
add_event(kHostCompute, {HOST_COMPUTE});
add_event(kHostPrepare, {HOST_PREPARE});
add_event(kInput, {HOST_WAIT_INPUT, HOST_TO_DEVICE, DEVICE_WAIT_HOST});
add_event(kOutput, {DEVICE_TO_HOST});
add_event(kCompile, {HOST_COMPILE});
add_event(kAllOthers, {UNKNOWN_TIME});
std::sort(metrics.begin(), metrics.end());
record.set_bottleneck(metrics.back().second.data(),
metrics.back().second.size());
return record;
}
}
PodStatsDatabase ConvertOpStatsToPodStats(const OpStats& op_stats) {
PodStatsDatabase pod_stats_db;
const auto& core_id_map = op_stats.core_id_to_details();
for (int i = GenericEventType::kFirstGenericEventType;
i <= GenericEventType::kLastGenericEventType; i++) {
auto& event = *pod_stats_db.add_step_breakdown_events();
event.set_id(i);
absl::string_view type_str =
GetGenericEventTypeStr(static_cast<GenericEventType>(i));
event.set_name(type_str.data(), type_str.size());
}
for (const auto& step_sequence : op_stats.step_db().step_sequence()) {
for (const auto& entry : step_sequence.step_info_per_core()) {
if (!core_id_map.contains(entry.first)) {
LOG(WARNING) << "core_id_map does not contain " << entry.first;
continue;
}
const CoreDetails& details = core_id_map.at(entry.first);
*pod_stats_db.add_pod_stats_record() =
CreatePodStatsRecord(details.hostname(), entry.second);
}
}
PopulateStepDiagnostics(op_stats, pod_stats_db.mutable_diagnostics());
return pod_stats_db;
}
}
} | #include "tensorflow/core/profiler/convert/op_stats_to_pod_stats.h"
#include "google/protobuf/any.pb.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/diagnostics.pb.h"
#include "tensorflow/core/profiler/protobuf/op_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/utils/diagnostics.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/math_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
const double kMaxError = 1e-6;
constexpr int kStepNum = 2;
constexpr int kCoreId = 1001;
constexpr int kStepTimePs = 1000;
constexpr int kHostComputePs = 50;
constexpr int kHostCompilePs = 50;
constexpr int kHostToHostPs = 50;
constexpr int kHostToDevicePs = 50;
constexpr int kHostPreparePs = 50;
constexpr int kDeviceCollectivePs = 350;
constexpr int kHostWaitInputPs = 50;
constexpr int kDeviceToDevicePs = 50;
constexpr int kDeviceToHostPs = 50;
constexpr int kDeviceCompute32Ps = 50;
constexpr int kDeviceCompute16Ps = 50;
constexpr int kDeviceWaitDevicePs = 50;
constexpr int kDeviceWaitHostPs = 50;
constexpr int kUnknownTimePs = 50;
static constexpr char kHostname[] = "host:123";
void CreateOpStats(OpStats* op_stats) {
PerCoreStepInfo* info = op_stats->mutable_step_db()->add_step_sequence();
info->set_step_num(kStepNum);
StepInfoResult& step_info = (*info->mutable_step_info_per_core())[kCoreId];
step_info.set_step_num(kStepNum);
step_info.set_duration_ps(kStepTimePs);
GenericStepBreakdown breakdown;
auto& type_ps = *breakdown.mutable_type_ps();
type_ps[HOST_COMPUTE] = kHostComputePs;
type_ps[HOST_COMPILE] = kHostCompilePs;
type_ps[HOST_TO_HOST] = kHostToHostPs;
type_ps[HOST_TO_DEVICE] = kHostToDevicePs;
type_ps[HOST_PREPARE] = kHostPreparePs;
type_ps[DEVICE_COLLECTIVES] = kDeviceCollectivePs;
type_ps[HOST_WAIT_INPUT] = kHostWaitInputPs;
type_ps[DEVICE_TO_DEVICE] = kDeviceToDevicePs;
type_ps[DEVICE_TO_HOST] = kDeviceToHostPs;
type_ps[DEVICE_COMPUTE_32] = kDeviceCompute32Ps;
type_ps[DEVICE_COMPUTE_16] = kDeviceCompute16Ps;
type_ps[DEVICE_WAIT_DEVICE] = kDeviceWaitDevicePs;
type_ps[DEVICE_WAIT_HOST] = kDeviceWaitHostPs;
type_ps[UNKNOWN_TIME] = kUnknownTimePs;
step_info.mutable_step_breakdown()->PackFrom(breakdown);
CoreDetails& details = (*op_stats->mutable_core_id_to_details())[kCoreId];
details.set_hostname(kHostname);
}
TEST(OpStatsToPodStats, GpuPodStats) {
OpStats op_stats;
CreateOpStats(&op_stats);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.pod_stats_record_size());
const PodStatsRecord& record = pod_stats_db.pod_stats_record(0);
EXPECT_EQ(kStepNum, record.step_num());
EXPECT_EQ(kHostname, record.host_name());
EXPECT_NEAR(tsl::profiler::PicoToMicro(kStepTimePs),
record.total_duration_us(), kMaxError);
const auto& breakdown = record.step_breakdown_us();
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceCompute32Ps + kDeviceCompute16Ps),
breakdown.at(kDeviceCompute), kMaxError);
EXPECT_NEAR(
tsl::profiler::PicoToMicro(kDeviceToDevicePs + kDeviceWaitDevicePs),
breakdown.at(kDeviceToDevice), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceCollectivePs),
breakdown.at(kDeviceCollectives), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostComputePs),
breakdown.at(kHostCompute), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostPreparePs),
breakdown.at(kHostPrepare), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostWaitInputPs + kHostToDevicePs +
kDeviceWaitHostPs),
breakdown.at(kInput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kDeviceToHostPs),
breakdown.at(kOutput), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kHostCompilePs),
breakdown.at(kCompile), kMaxError);
EXPECT_NEAR(tsl::profiler::PicoToMicro(kUnknownTimePs),
breakdown.at(kAllOthers), kMaxError);
EXPECT_EQ(GetGenericEventTypeStr(kDeviceCollectives), record.bottleneck());
}
TEST(OpStatsToPodStats, Diagnostics) {
OpStats op_stats;
op_stats.mutable_step_db()->set_use_incomplete_step(true);
PodStatsDatabase pod_stats_db = ConvertOpStatsToPodStats(op_stats);
EXPECT_EQ(1, pod_stats_db.diagnostics().warnings_size());
EXPECT_EQ(kErrorIncompleteStep, pod_stats_db.diagnostics().warnings(0));
}
}
}
} |
155 | #ifndef XLA_SERVICE_ASYNC_COLLECTIVE_CREATOR_H_
#define XLA_SERVICE_ASYNC_COLLECTIVE_CREATOR_H_
#include <functional>
#include <utility>
#include <vector>
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AsyncCollectiveCreator : public HloModulePass {
public:
using ContextShapeQuery =
std::function<std::vector<Shape>(const HloInstruction *)>;
struct CollectiveCreatorConfig {
HloPredicate convert_all_reduce = HloPredicateFalse;
HloPredicate convert_all_gather = HloPredicateFalse;
HloPredicate convert_collective_broadcast = HloPredicateFalse;
HloPredicate convert_collective_permute = HloPredicateFalse;
HloPredicate convert_all_to_all = HloPredicateFalse;
HloPredicate convert_reduce_scatter = HloPredicateFalse;
ContextShapeQuery get_context_shapes = [](const HloInstruction *) {
return std::vector<Shape>{};
};
};
explicit AsyncCollectiveCreator(CollectiveCreatorConfig creator_config)
: config_(std::move(creator_config)) {}
absl::string_view name() const override { return "async-collective-creator"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) override;
std::vector<HloInstruction *> MatchCollectives(HloComputation *computation);
absl::StatusOr<bool> ReplaceCollectives(
HloComputation *computation,
std::vector<HloInstruction *> &supported_collectives);
const CollectiveCreatorConfig *config() const { return &config_; }
private:
CollectiveCreatorConfig config_;
};
}
#endif
#include "xla/service/async_collective_creator.h"
#include <cstdint>
#include <iterator>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "xla/frontend_attributes.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/shape_inference.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
struct ReplacedAsync {
HloInstruction* start;
HloInstruction* done;
};
absl::StatusOr<ReplacedAsync> CreateAsyncAllReduce(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ar = Cast<HloAllReduceInstruction>(instruction);
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllReduceStart(
ar->shape(), ar->operands(), ar->to_apply(), ar->device_list(),
ar->constrain_layout(), ar->channel_id(),
ar->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ar->shape(), HloOpcode::kAllReduceDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncAllGather(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
auto* ag = Cast<HloAllGatherInstruction>(instruction);
std::vector<const Shape*> operand_shapes;
operand_shapes.reserve(ag->operand_count());
for (const HloInstruction* op : ag->operands()) {
operand_shapes.push_back(&op->shape());
}
Shape shape = ShapeUtil::MakeTupleShape(
{ag->operand_count() > 1
? ShapeUtil::MakeTupleShapeWithPtrs(operand_shapes)
: *operand_shapes[0],
ag->shape()});
HloInstruction* start =
computation->AddInstruction(HloInstruction::CreateAllGatherStart(
shape, ag->operands(), ag->all_gather_dimension(), ag->device_list(),
ag->constrain_layout(), ag->channel_id(),
ag->use_global_device_ids()));
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
ag->shape(), HloOpcode::kAllGatherDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncCollectivePermute(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
auto* cp = Cast<HloCollectivePermuteInstruction>(instruction);
HloInstruction* start;
HloInstruction* operand = cp->mutable_operand(0);
if (cp->operand_count() == 1) {
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(
{&operand->shape()}, context_shapes)
.value(),
operand, cp->source_target_pairs(), cp->channel_id()));
} else {
CHECK_EQ(cp->operand_count(), 4);
std::vector<const Shape*> operand_shapes;
absl::c_transform(
cp->operands(), std::back_inserter(operand_shapes),
[](const HloInstruction* operand) { return &(operand->shape()); });
start = computation->AddInstruction(
HloInstruction::CreateCollectivePermuteStart(
ShapeInference::InferCollectivePermuteStartShape(operand_shapes,
context_shapes)
.value(),
operand, cp->mutable_operand(1), cp->mutable_operand(2),
cp->mutable_operand(3), cp->source_target_pairs(),
cp->dynamic_slice_sizes_list(), cp->channel_id()));
if (HasDisjointReadWriteRegionsAttr(cp)) {
SetDisjointReadWriteRegionsAttr(start);
}
}
HloInstruction* done =
computation->AddInstruction(HloInstruction::CreateUnary(
cp->shape(), HloOpcode::kCollectivePermuteDone, start));
return ReplacedAsync{start, done};
}
absl::StatusOr<ReplacedAsync> CreateAsyncStartDone(
HloInstruction* instruction, absl::Span<const Shape> context_shapes) {
HloComputation* computation = instruction->parent();
TF_ASSIGN_OR_RETURN(
HloInstruction * done,
computation->CreateAsyncInstructions(instruction, context_shapes,
HloInstruction::kMainExecutionThread,
false));
HloInstruction* start = done->mutable_operand(0);
return ReplacedAsync{start, done};
}
}
std::vector<HloInstruction*> AsyncCollectiveCreator::MatchCollectives(
HloComputation* computation) {
std::vector<HloInstruction*> supported_collectives;
for (HloInstruction* instruction : computation->instructions()) {
const HloOpcode op = instruction->opcode();
if ((op == HloOpcode::kAllReduce &&
config_.convert_all_reduce(instruction)) ||
(op == HloOpcode::kAllGather &&
config_.convert_all_gather(instruction)) ||
(op == HloOpcode::kCollectiveBroadcast &&
config_.convert_collective_broadcast(instruction)) ||
(op == HloOpcode::kCollectivePermute &&
config_.convert_collective_permute(instruction)) ||
(op == HloOpcode::kAllToAll &&
config_.convert_all_to_all(instruction)) ||
(op == HloOpcode::kReduceScatter &&
config_.convert_reduce_scatter(instruction))) {
supported_collectives.push_back(instruction);
}
}
return supported_collectives;
}
absl::StatusOr<bool> AsyncCollectiveCreator::ReplaceCollectives(
HloComputation* computation,
std::vector<HloInstruction*>& supported_collectives) {
bool changed = false;
HloModule* module = computation->parent();
absl::flat_hash_map<HloInstruction*, ReplacedAsync> replaced_pairs;
const bool should_update_schedule =
module->has_schedule() &&
module->schedule().is_computation_scheduled(computation);
for (HloInstruction* instruction : supported_collectives) {
absl::StatusOr<ReplacedAsync> async_pair;
switch (instruction->opcode()) {
case HloOpcode::kAllReduce:
async_pair = CreateAsyncAllReduce(instruction);
break;
case HloOpcode::kAllGather:
async_pair = CreateAsyncAllGather(instruction);
break;
case HloOpcode::kCollectivePermute:
async_pair = CreateAsyncCollectivePermute(
instruction, config_.get_context_shapes(instruction));
break;
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kAllToAll:
case HloOpcode::kReduceScatter:
async_pair = CreateAsyncStartDone(
instruction, config_.get_context_shapes(instruction));
break;
default:
return Internal("Unexpected opcode %s",
HloOpcodeString(instruction->opcode()));
}
TF_RETURN_IF_ERROR(async_pair.status());
async_pair->start->set_metadata(instruction->metadata());
async_pair->start->CopyBackendConfigFrom(instruction);
if (should_update_schedule) {
replaced_pairs[instruction] = *async_pair;
}
TF_RETURN_IF_ERROR(
instruction->CopyAllControlDepsTo(async_pair->start, async_pair->done));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
computation->ReplaceInstruction(instruction, async_pair->done),
"replacing ", instruction->ToShortString());
changed = true;
}
if (should_update_schedule) {
std::vector<HloInstruction*> new_sequence;
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
new_sequence.reserve(sequence.size() + replaced_pairs.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_pairs.find(instr);
if (it != replaced_pairs.end()) {
new_sequence.push_back(it->second.start);
new_sequence.push_back(it->second.done);
continue;
}
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
}
return changed;
}
absl::StatusOr<bool> AsyncCollectiveCreator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
int64_t collectives_replaced = 0;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
std::vector<HloInstruction*> supported_collectives =
MatchCollectives(computation);
if (supported_collectives.empty()) {
continue;
}
TF_ASSIGN_OR_RETURN(bool comp_changed,
ReplaceCollectives(computation, supported_collectives));
collectives_replaced += supported_collectives.size();
changed |= comp_changed;
}
VLOG(1) << "Replaced " << collectives_replaced
<< " sync collectives with async versions.";
return changed;
}
} | #include "xla/service/async_collective_creator.h"
#include <string>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::NotNull;
using ::testing::SizeIs;
using AsyncAllReduceCreatorTest = HloTestBase;
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllReduce) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[8] parameter(0)
ROOT ar = f32[8] all-reduce(p0), to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_reduce = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllReduceDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllReduceStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllGather) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ROOT ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAllGatherDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAllGatherStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleInPlaceCollectivePermute) {
std::string hlo_string = std::string(R"(
HloModule module
ENTRY %module_spmd () -> f32[4,4,128] {
%constant.8 = u32[] constant(0)
%constant.5 = u32[] constant(2)
%tuple.1 = (u32[], u32[], u32[]) tuple(u32[] %constant.8, u32[] %constant.8, u32[] %constant.8)
%tuple = (u32[], u32[], u32[]) tuple(u32[] %constant.5, u32[] %constant.8, u32[] %constant.8)
%custom-call = f32[4,4,128]{2,1,0:T(4,128)} custom-call(), custom_call_target="SomeCustomCall"
ROOT %collective-permute = f32[4,4,128]{2,1,0:T(4,128)} collective-permute(f32[4,4,128]{2,1,0:T(4,128)} %custom-call, f32[4,4,128]{2,1,0:T(4,128)} %custom-call, (u32[], u32[], u32[]) %tuple, (u32[], u32[], u32[]) %tuple.1), channel_id=958, source_target_pairs={{0,4},{4,0},{1,5},{5,1},{2,6},{6,2},{3,7},{7,3}}, slice_sizes={{2,4,128}}
}
)");
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 7);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectivePermuteScheduled) {
constexpr absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY entry {
%p0 = bf16[8]{0} parameter(0)
ROOT %collective-permute.1 = bf16[8]{0} collective-permute(bf16[8]{0} p0), source_target_pairs={{0,1},{1,2},{2,3}}, channel_id=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
const int64_t original_instr_sequence_size =
hlo_module->schedule().sequence(hlo_module->entry_computation()).size();
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_permute = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kCollectivePermuteDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kCollectivePermuteStart);
EXPECT_EQ(
hlo_module->schedule().sequence(hlo_module->entry_computation()).size(),
original_instr_sequence_size + 1);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleCollectiveBroadcast) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT cb = f32[8,16] collective-broadcast(p0), replica_groups={{7,0,1,2,3,4,5,6}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_collective_broadcast = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kCollectiveBroadcast);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleAllToAll) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[8,16] all-to-all(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_to_all = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kAllToAll);
}
TEST_F(AsyncAllReduceCreatorTest, SplitsSingleReduceScatter) {
constexpr absl::string_view hlo_string = R"(
HloModule test
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry {
p0 = f32[8,16] parameter(0)
ROOT ata = f32[1,16] reduce-scatter(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_reduce_scatter = HloPredicateTrue;
TF_ASSERT_OK(AsyncCollectiveCreator(config).Run(hlo_module.get()).status());
XLA_VLOG_LINES(0, hlo_module->ToString());
HloComputation* computation = hlo_module->entry_computation();
ASSERT_THAT(computation, NotNull());
ASSERT_EQ(computation->instruction_count(), 3);
const HloInstruction* done = computation->root_instruction();
EXPECT_EQ(done->opcode(), HloOpcode::kAsyncDone);
ASSERT_THAT(done->operands(), SizeIs(1));
const HloInstruction* start = done->operand(0);
EXPECT_EQ(start->opcode(), HloOpcode::kAsyncStart);
ASSERT_THAT(start->async_wrapped_instruction(), NotNull());
EXPECT_THAT(start->async_wrapped_opcode(), HloOpcode::kReduceScatter);
}
TEST_F(AsyncAllReduceCreatorTest, ControlPredecessor) {
constexpr absl::string_view hlo_string = R"(
HloModule test
ENTRY entry {
p0 = f32[1] parameter(0)
ag = f32[8] all-gather(p0), dimensions={0}, replica_groups={{0,1,2,3,4,5,6,7}}, control-predecessors={p0}
p1 = f32[1] parameter(1), control-predecessors={ag}
ROOT sum = add(ag, ag)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
AsyncCollectiveCreator::CollectiveCreatorConfig config;
config.convert_all_gather = HloPredicateTrue;
TF_ASSERT_OK(
RunHloPass(AsyncCollectiveCreator(config), hlo_module.get()).status());
SCOPED_TRACE(hlo_module->ToString());
HloInstruction* start;
HloInstruction* done;
ASSERT_THAT(
hlo_module->entry_computation()->root_instruction(),
GmockMatch(m::Add(m::Op(),
m::Op(&done)
.WithOpcode(HloOpcode::kAllGatherDone)
.WithOperand(0, m::Op(&start).WithOpcode(
HloOpcode::kAllGatherStart)))));
EXPECT_EQ(start->control_successors().size(), 0);
ASSERT_EQ(start->control_predecessors().size(), 1);
EXPECT_THAT(start->control_predecessors()[0], GmockMatch(m::Parameter(0)));
EXPECT_EQ(done->control_predecessors().size(), 0);
ASSERT_EQ(done->control_successors().size(), 1);
EXPECT_THAT(done->control_successors()[0], GmockMatch(m::Parameter(1)));
}
}
} |
156 | #ifndef TENSORFLOW_TSL_PLATFORM_CLOUD_GOOGLE_AUTH_PROVIDER_H_
#define TENSORFLOW_TSL_PLATFORM_CLOUD_GOOGLE_AUTH_PROVIDER_H_
#include <memory>
#include "tsl/platform/cloud/auth_provider.h"
#include "tsl/platform/cloud/compute_engine_metadata_client.h"
#include "tsl/platform/cloud/oauth_client.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tsl {
class GoogleAuthProvider : public AuthProvider {
public:
GoogleAuthProvider(std::shared_ptr<ComputeEngineMetadataClient>
compute_engine_metadata_client);
explicit GoogleAuthProvider(std::unique_ptr<OAuthClient> oauth_client,
std::shared_ptr<ComputeEngineMetadataClient>
compute_engine_metadata_client,
Env* env);
virtual ~GoogleAuthProvider() {}
Status GetToken(string* token) override;
private:
Status GetTokenFromFiles() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status GetTokenFromGce() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
Status GetTokenForTesting() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
std::unique_ptr<OAuthClient> oauth_client_;
std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client_;
Env* env_;
mutex mu_;
string current_token_ TF_GUARDED_BY(mu_);
uint64 expiration_timestamp_sec_ TF_GUARDED_BY(mu_) = 0;
GoogleAuthProvider(const GoogleAuthProvider&) = delete;
void operator=(const GoogleAuthProvider&) = delete;
};
}
#endif
#include "tsl/platform/cloud/google_auth_provider.h"
#ifndef _WIN32
#include <pwd.h>
#include <unistd.h>
#else
#include <sys/types.h>
#endif
#include <fstream>
#include <utility>
#include "absl/strings/match.h"
#include "json/json.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/retrying_utils.h"
namespace tsl {
namespace {
constexpr char kGoogleApplicationCredentials[] =
"GOOGLE_APPLICATION_CREDENTIALS";
constexpr char kGoogleAuthTokenForTesting[] = "GOOGLE_AUTH_TOKEN_FOR_TESTING";
constexpr char kCloudSdkConfig[] = "CLOUDSDK_CONFIG";
constexpr char kNoGceCheck[] = "NO_GCE_CHECK";
constexpr char kGCloudConfigFolder[] = ".config/gcloud/";
constexpr char kWellKnownCredentialsFile[] =
"application_default_credentials.json";
constexpr int kExpirationTimeMarginSec = 60;
constexpr char kOAuthV3Url[] = "https:
constexpr char kOAuthV4Url[] = "https:
constexpr char kGceTokenPath[] = "instance/service-accounts/default/token";
constexpr char kOAuthScope[] = "https:
bool IsFile(const string& filename) {
std::ifstream fstream(filename.c_str());
return fstream.good();
}
Status GetEnvironmentVariableFileName(string* filename) {
if (!filename) {
return errors::FailedPrecondition("'filename' cannot be nullptr.");
}
const char* result = std::getenv(kGoogleApplicationCredentials);
if (!result || !IsFile(result)) {
return errors::NotFound(strings::StrCat("$", kGoogleApplicationCredentials,
" is not set or corrupt."));
}
*filename = result;
return OkStatus();
}
Status GetWellKnownFileName(string* filename) {
if (!filename) {
return errors::FailedPrecondition("'filename' cannot be nullptr.");
}
string config_dir;
const char* config_dir_override = std::getenv(kCloudSdkConfig);
if (config_dir_override) {
config_dir = config_dir_override;
} else {
const char* home_dir = std::getenv("HOME");
if (!home_dir) {
return errors::FailedPrecondition("Could not read $HOME.");
}
config_dir = io::JoinPath(home_dir, kGCloudConfigFolder);
}
auto result = io::JoinPath(config_dir, kWellKnownCredentialsFile);
if (!IsFile(result)) {
return errors::NotFound(
"Could not find the credentials file in the standard gcloud location.");
}
*filename = result;
return OkStatus();
}
}
GoogleAuthProvider::GoogleAuthProvider(
std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client)
: GoogleAuthProvider(std::unique_ptr<OAuthClient>(new OAuthClient()),
std::move(compute_engine_metadata_client),
Env::Default()) {}
GoogleAuthProvider::GoogleAuthProvider(
std::unique_ptr<OAuthClient> oauth_client,
std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client,
Env* env)
: oauth_client_(std::move(oauth_client)),
compute_engine_metadata_client_(
std::move(compute_engine_metadata_client)),
env_(env) {}
Status GoogleAuthProvider::GetToken(string* t) {
mutex_lock lock(mu_);
const uint64 now_sec = env_->NowSeconds();
if (now_sec + kExpirationTimeMarginSec < expiration_timestamp_sec_) {
*t = current_token_;
return OkStatus();
}
if (GetTokenForTesting().ok()) {
*t = current_token_;
return OkStatus();
}
auto token_from_files_status = GetTokenFromFiles();
if (token_from_files_status.ok()) {
*t = current_token_;
return OkStatus();
}
char* no_gce_check_var = std::getenv(kNoGceCheck);
bool skip_gce_check = no_gce_check_var != nullptr &&
absl::EqualsIgnoreCase(no_gce_check_var, "true");
Status token_from_gce_status;
if (skip_gce_check) {
token_from_gce_status =
Status(absl::StatusCode::kCancelled,
strings::StrCat("GCE check skipped due to presence of $",
kNoGceCheck, " environment variable."));
} else {
token_from_gce_status = GetTokenFromGce();
}
if (token_from_gce_status.ok()) {
*t = current_token_;
return OkStatus();
}
if (skip_gce_check) {
LOG(INFO)
<< "Attempting an empty bearer token since no token was retrieved "
<< "from files, and GCE metadata check was skipped.";
} else {
LOG(WARNING)
<< "All attempts to get a Google authentication bearer token failed, "
<< "returning an empty token. Retrieving token from files failed with "
"\""
<< token_from_files_status.ToString() << "\"."
<< " Retrieving token from GCE failed with \""
<< token_from_gce_status.ToString() << "\".";
}
*t = "";
if (skip_gce_check) {
expiration_timestamp_sec_ = 0;
} else {
expiration_timestamp_sec_ = UINT64_MAX;
}
current_token_ = "";
return OkStatus();
}
Status GoogleAuthProvider::GetTokenFromFiles() {
string credentials_filename;
if (!GetEnvironmentVariableFileName(&credentials_filename).ok() &&
!GetWellKnownFileName(&credentials_filename).ok()) {
return errors::NotFound("Could not locate the credentials file.");
}
Json::Value json;
Json::Reader reader;
std::ifstream credentials_fstream(credentials_filename);
if (!reader.parse(credentials_fstream, json)) {
return errors::FailedPrecondition(
"Couldn't parse the JSON credentials file.");
}
if (json.isMember("refresh_token")) {
TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromRefreshTokenJson(
json, kOAuthV3Url, ¤t_token_, &expiration_timestamp_sec_));
} else if (json.isMember("private_key")) {
TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromServiceAccountJson(
json, kOAuthV4Url, kOAuthScope, ¤t_token_,
&expiration_timestamp_sec_));
} else {
return errors::FailedPrecondition(
"Unexpected content of the JSON credentials file.");
}
return OkStatus();
}
Status GoogleAuthProvider::GetTokenFromGce() {
std::vector<char> response_buffer;
const uint64 request_timestamp_sec = env_->NowSeconds();
TF_RETURN_IF_ERROR(compute_engine_metadata_client_->GetMetadata(
kGceTokenPath, &response_buffer));
StringPiece response =
StringPiece(&response_buffer[0], response_buffer.size());
TF_RETURN_IF_ERROR(oauth_client_->ParseOAuthResponse(
response, request_timestamp_sec, ¤t_token_,
&expiration_timestamp_sec_));
return OkStatus();
}
Status GoogleAuthProvider::GetTokenForTesting() {
const char* token = std::getenv(kGoogleAuthTokenForTesting);
if (!token) {
return errors::NotFound("The env variable for testing was not set.");
}
expiration_timestamp_sec_ = UINT64_MAX;
current_token_ = token;
return OkStatus();
}
} | #include "tsl/platform/cloud/google_auth_provider.h"
#include <stdlib.h>
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
string TestData() {
return io::JoinPath(testing::TslSrcRoot(), "platform", "cloud", "testdata");
}
class FakeEnv : public EnvWrapper {
public:
FakeEnv() : EnvWrapper(Env::Default()) {}
uint64 NowSeconds() const override { return now; }
uint64 now = 10000;
};
class FakeOAuthClient : public OAuthClient {
public:
Status GetTokenFromServiceAccountJson(
Json::Value json, StringPiece oauth_server_uri, StringPiece scope,
string* token, uint64* expiration_timestamp_sec) override {
provided_credentials_json = json;
*token = return_token;
*expiration_timestamp_sec = return_expiration_timestamp;
return OkStatus();
}
Status GetTokenFromRefreshTokenJson(
Json::Value json, StringPiece oauth_server_uri, string* token,
uint64* expiration_timestamp_sec) override {
provided_credentials_json = json;
*token = return_token;
*expiration_timestamp_sec = return_expiration_timestamp;
return OkStatus();
}
string return_token;
uint64 return_expiration_timestamp;
Json::Value provided_credentials_json;
};
}
class GoogleAuthProviderTest : public ::testing::Test {
protected:
void SetUp() override { ClearEnvVars(); }
void TearDown() override { ClearEnvVars(); }
void ClearEnvVars() {
unsetenv("CLOUDSDK_CONFIG");
unsetenv("GOOGLE_APPLICATION_CREDENTIALS");
unsetenv("GOOGLE_AUTH_TOKEN_FOR_TESTING");
unsetenv("NO_GCE_CHECK");
}
};
TEST_F(GoogleAuthProviderTest, EnvironmentVariable_Caching) {
setenv("GOOGLE_APPLICATION_CREDENTIALS",
io::JoinPath(TestData(), "service_account_credentials.json").c_str(),
1);
setenv("CLOUDSDK_CONFIG", TestData().c_str(),
1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
oauth_client->return_token = "fake-token";
oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600;
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
EXPECT_EQ("fake_key_id",
oauth_client->provided_credentials_json.get("private_key_id", "")
.asString());
oauth_client->return_token = "new-fake-token";
env.now += 3000;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
env.now += 598;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("new-fake-token", token);
}
TEST_F(GoogleAuthProviderTest, GCloudRefreshToken) {
setenv("CLOUDSDK_CONFIG", TestData().c_str(), 1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
oauth_client->return_token = "fake-token";
oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600;
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
EXPECT_EQ("fake-refresh-token",
oauth_client->provided_credentials_json.get("refresh_token", "")
.asString());
}
TEST_F(GoogleAuthProviderTest, RunningOnGCE) {
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
R"(
{
"access_token":"fake-gce-token",
"expires_in": 3920,
"token_type":"Bearer"
})"),
new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
R"(
{
"access_token":"new-fake-gce-token",
"expires_in": 3920,
"token_type":"Bearer"
})")});
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-gce-token", token);
env.now += 3700;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-gce-token", token);
env.now += 598;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("new-fake-gce-token", token);
}
TEST_F(GoogleAuthProviderTest, OverrideForTesting) {
setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "tokenForTesting", 1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> empty_requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&empty_requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("tokenForTesting", token);
}
TEST_F(GoogleAuthProviderTest, NothingAvailable) {
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
"", errors::NotFound("404"), 404)});
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
}
TEST_F(GoogleAuthProviderTest, NoGceCheckEnvironmentVariable) {
setenv("NO_GCE_CHECK", "True", 1);
auto oauth_client = new FakeOAuthClient;
FakeEnv env;
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
nullptr, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
setenv("NO_GCE_CHECK", "true", 1);
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "newToken", 1);
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("newToken", token);
}
} |
157 | #ifndef TENSORFLOW_LITE_TOOLS_DELEGATES_DELEGATE_PROVIDER_H_
#define TENSORFLOW_LITE_TOOLS_DELEGATES_DELEGATE_PROVIDER_H_
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
using TfLiteDelegatePtr =
std::unique_ptr<TfLiteOpaqueDelegate, void (*)(TfLiteOpaqueDelegate*)>;
class DelegateProvider {
public:
virtual ~DelegateProvider() {}
virtual std::vector<Flag> CreateFlags(ToolParams* params) const = 0;
virtual void LogParams(const ToolParams& params, bool verbose) const = 0;
virtual TfLiteDelegatePtr CreateTfLiteDelegate(
const ToolParams& params) const = 0;
virtual std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate(
const ToolParams& params) const = 0;
virtual std::string GetName() const = 0;
const ToolParams& DefaultParams() const { return default_params_; }
protected:
template <typename T>
Flag CreateFlag(const char* name, ToolParams* params,
const std::string& usage) const {
return Flag(
name,
[params, name](const T& val, int argv_position) {
params->Set<T>(name, val, argv_position);
},
default_params_.Get<T>(name), usage, Flag::kOptional);
}
ToolParams default_params_;
};
using DelegateProviderPtr = std::unique_ptr<DelegateProvider>;
using DelegateProviderList = std::vector<DelegateProviderPtr>;
class DelegateProviderRegistrar {
public:
template <typename T>
struct Register {
Register() {
auto* const instance = DelegateProviderRegistrar::GetSingleton();
instance->providers_.emplace_back(DelegateProviderPtr(new T()));
}
};
static const DelegateProviderList& GetProviders() {
return GetSingleton()->providers_;
}
private:
DelegateProviderRegistrar() {}
DelegateProviderRegistrar(const DelegateProviderRegistrar&) = delete;
DelegateProviderRegistrar& operator=(const DelegateProviderRegistrar&) =
delete;
static DelegateProviderRegistrar* GetSingleton() {
static auto* instance = new DelegateProviderRegistrar();
return instance;
}
DelegateProviderList providers_;
};
#define REGISTER_DELEGATE_PROVIDER_VNAME(T) gDelegateProvider_##T##_
#define REGISTER_DELEGATE_PROVIDER(T) \
static tflite::tools::DelegateProviderRegistrar::Register<T> \
REGISTER_DELEGATE_PROVIDER_VNAME(T);
TfLiteDelegatePtr CreateNullDelegate();
inline const DelegateProviderList& GetRegisteredDelegateProviders() {
return DelegateProviderRegistrar::GetProviders();
}
class ProvidedDelegateList {
public:
struct ProvidedDelegate {
ProvidedDelegate()
: provider(nullptr), delegate(CreateNullDelegate()), rank(0) {}
const DelegateProvider* provider;
TfLiteDelegatePtr delegate;
int rank;
};
ProvidedDelegateList() : ProvidedDelegateList( nullptr) {}
explicit ProvidedDelegateList(ToolParams* params)
: providers_(GetRegisteredDelegateProviders()), params_(params) {}
const DelegateProviderList& providers() const { return providers_; }
void AddAllDelegateParams() const;
void AppendCmdlineFlags(std::vector<Flag>& flags) const;
void RemoveCmdlineFlag(std::vector<Flag>& flags,
const std::string& name) const;
std::vector<ProvidedDelegate> CreateAllRankedDelegates(
const ToolParams& params) const;
std::vector<ProvidedDelegate> CreateAllRankedDelegates() const {
return CreateAllRankedDelegates(*params_);
}
private:
const DelegateProviderList& providers_;
ToolParams* const params_;
};
}
}
#endif
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace tflite {
namespace tools {
TfLiteDelegatePtr CreateNullDelegate() {
return TfLiteDelegatePtr(nullptr, [](TfLiteOpaqueDelegate*) {});
}
void ProvidedDelegateList::AddAllDelegateParams() const {
for (const auto& provider : providers_) {
params_->Merge(provider->DefaultParams());
}
}
void ProvidedDelegateList::AppendCmdlineFlags(std::vector<Flag>& flags) const {
for (const auto& provider : providers_) {
auto delegate_flags = provider->CreateFlags(params_);
flags.insert(flags.end(), delegate_flags.begin(), delegate_flags.end());
}
}
void ProvidedDelegateList::RemoveCmdlineFlag(std::vector<Flag>& flags,
const std::string& name) const {
decltype(flags.begin()) it;
for (it = flags.begin(); it < flags.end();) {
if (it->GetFlagName() == name) {
it = flags.erase(it);
} else {
++it;
}
}
}
std::vector<ProvidedDelegateList::ProvidedDelegate>
ProvidedDelegateList::CreateAllRankedDelegates(const ToolParams& params) const {
std::vector<ProvidedDelegateList::ProvidedDelegate> delegates;
for (const auto& provider : providers_) {
auto ptr_rank = provider->CreateRankedTfLiteDelegate(params);
if (ptr_rank.first == nullptr) continue;
static bool already_logged = false;
if (!already_logged) {
TFLITE_LOG(INFO) << provider->GetName() << " delegate created.";
#ifndef NDEBUG
provider->LogParams(params, false);
#endif
already_logged = true;
}
ProvidedDelegateList::ProvidedDelegate info;
info.provider = provider.get();
info.delegate = std::move(ptr_rank.first);
info.rank = ptr_rank.second;
delegates.emplace_back(std::move(info));
}
std::sort(delegates.begin(), delegates.end(),
[](const ProvidedDelegateList::ProvidedDelegate& a,
const ProvidedDelegateList::ProvidedDelegate& b) {
return a.rank < b.rank;
});
return delegates;
}
}
} | #include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/test_util.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
TEST(ProvidedDelegateListTest, AddAllDelegateParams) {
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
EXPECT_TRUE(params.HasParam("use_xnnpack"));
#if !TFLITE_WITH_STABLE_ABI
EXPECT_TRUE(params.HasParam("use_nnapi"));
#endif
}
TEST(ProvidedDelegateListTest, AppendCmdlineFlags) {
std::vector<Flag> flags;
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
providers.AppendCmdlineFlags(flags);
EXPECT_FALSE(flags.empty());
}
TEST(KernelTestDelegateProvidersTest, CreateAllRankedDelegates) {
#if !defined(__Fuchsia__) && !defined(__s390x__) && \
!defined(TFLITE_WITHOUT_XNNPACK)
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
#if TFLITE_WITH_STABLE_ABI
ASSERT_EQ(TfLiteInitializeShimsForTest(), 0);
params.Set<bool>("use_xnnpack", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("XNNPACK", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
#else
params.Set<bool>("use_xnnpack", true, 2);
params.Set<bool>("use_dummy_delegate", true, 1);
auto delegates = providers.CreateAllRankedDelegates();
EXPECT_EQ(2, delegates.size());
EXPECT_EQ("DummyDelegate", delegates.front().provider->GetName());
EXPECT_EQ(1, delegates.front().rank);
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ("XNNPACK", delegates.back().provider->GetName());
EXPECT_NE(nullptr, delegates.back().delegate.get());
EXPECT_EQ(2, delegates.back().rank);
#endif
#endif
}
}
}
} |
158 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_ADD_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_ADD_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewAddNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/add.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Add : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
auto adds = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
auto scalar = std::get_if<float>(&attr.param);
const auto* hwc_tensor =
std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.param);
if (hwc_tensor) {
std::string code;
const std::string x_coord = hwc_tensor->shape.w == 1 ? "0" : "gid.x";
const std::string y_coord = hwc_tensor->shape.h == 1 ? "0" : "gid.y";
const std::string s_coord = hwc_tensor->shape.c == 1 ? "0" : "gid.z";
code = absl::StrCat("vec4 second_val = $hwc_buffer[", x_coord, ", ",
y_coord, ", ", s_coord, "]$;\n");
if (hwc_tensor->shape.c == 1) {
code += " second_val.y = second_val.x;\n";
code += " second_val.z = second_val.x;\n";
code += " second_val.w = second_val.x;\n";
}
code += " value_0 += second_val;\n";
*generated_code = {
{},
{{"hwc_buffer",
MakeReadonlyObject(
uint3(hwc_tensor->shape.w, hwc_tensor->shape.h,
DivideRoundUp(hwc_tensor->shape.c, 4)),
ConvertToPHWC4(
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param)))}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (!adds && !scalar) {
if (ctx.input_shapes.size() == 2 &&
ctx.input_shapes[0] != ctx.input_shapes[1] &&
ctx.input_shapes[1][1] == 1 && ctx.input_shapes[1][2] == 1 &&
ctx.input_shapes[0][3] == ctx.input_shapes[1][3]) {
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
"value_0 = $input_data_0[gid.x, gid.y, gid.z]$ + "
" $input_data_1[0, 0, gid.z]$;",
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
std::string code = "value_0 = value_0";
for (int index = 1; index < ctx.input_shapes.size(); ++index) {
if (ctx.input_shapes[index] != ctx.input_shapes[0]) {
return absl::InvalidArgumentError("Shapes are not equal");
}
absl::StrAppend(&code, " + value_", index);
}
absl::StrAppend(&code, ";");
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (scalar) {
*generated_code = {
{{"scalar", *scalar}},
{},
{},
uint3(),
uint3(),
"value_0 += $scalar$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
*generated_code = {
{},
{{"add_buffer", MakeReadonlyObject(adds->data)}},
{},
uint3(ctx.input_shapes[0][2], ctx.input_shapes[0][1],
DivideRoundUp(ctx.input_shapes[0][3], 4)),
uint3(),
"value_0 += $add_buffer[gid.z]$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewAddNodeShader() {
return std::make_unique<Add>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/add.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(AddTest, TwoInputTensorsOfTheSameShape) {
TensorRef<BHWC> augend, addend, output;
augend.type = DataType::FLOAT32;
augend.ref = 0;
augend.shape = BHWC(1, 2, 2, 1);
addend.type = DataType::FLOAT32;
addend.ref = 1;
addend.shape = BHWC(1, 2, 2, 1);
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 1);
ElementwiseAttributes attr;
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)},
{augend, addend}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8}));
ASSERT_TRUE(model.PopulateTensor(1, {0.1, 0.2, 0.3, 0.5}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.9, 0.4, 1.0, 1.3}));
}
TEST(AddTest, InputTensorAndScalar) {
ElementwiseAttributes attr;
attr.param = 0.1f;
TensorRef<BHWC> input, output;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 1, 2);
output.type = DataType::FLOAT32;
output.ref = 1;
output.shape = BHWC(1, 3, 1, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {-1.9, 0.3, 0.8, 0.9, 1.2, 2.1}));
}
TEST(AddTest, InputTensorWithConstantBroadcast) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<Linear, DataType::FLOAT32> tensor;
tensor.shape.v = 2;
tensor.id = 1;
tensor.data.push_back(10.0);
tensor.data.push_back(20.0);
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{11.0, 22.0, 13.0, 24.0, 15.0, 26.0, 17.0, 28.0}));
}
TEST(AddTest, InputTensorWithRuntimeBroadcast) {
TensorRef<BHWC> input1;
input1.type = DataType::FLOAT32;
input1.ref = 0;
input1.shape = BHWC(1, 2, 2, 2);
TensorRef<BHWC> input2;
input2.type = DataType::FLOAT32;
input2.ref = 1;
input2.shape = BHWC(1, 1, 1, 2);
ElementwiseAttributes attr;
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)},
{input1, input2}, {output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_TRUE(model.PopulateTensor(1, {10.0, 20.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6),
{11.0, 22.0, 13.0, 24.0, 15.0, 26.0, 17.0, 28.0}));
}
TEST(AddTest, InputTensorWithConstantHWC) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor;
tensor.shape = HWC(2, 2, 2);
tensor.id = 1;
tensor.data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0};
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0}));
}
TEST(AddTest, InputTensorWithConstantHWCBroadcastChannels) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor;
tensor.shape = HWC(2, 2, 1);
tensor.id = 1;
tensor.data = {1.0, 2.0, 3.0, 4.0};
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2.0, 3.0, 5.0, 6.0, 8.0, 9.0, 11.0, 12.0}));
}
TEST(AddTest, InputTensorWithConstantHWCBroadcastWidth) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
ElementwiseAttributes attr;
Tensor<HWC, DataType::FLOAT32> tensor;
tensor.shape = HWC(2, 1, 2);
tensor.id = 1;
tensor.data = {1.0, 2.0, 3.0, 4.0};
attr.param = std::move(tensor);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input},
{output});
ASSERT_TRUE(
model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
ASSERT_OK(model.Invoke(*NewAddNodeShader()));
EXPECT_THAT(
model.GetOutput(0),
Pointwise(FloatNear(1e-6), {2.0, 4.0, 4.0, 6.0, 8.0, 10.0, 10.0, 12.0}));
}
}
}
}
} |
159 | #ifndef TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_GRAPH_ANALYZER_H_
#define TENSORFLOW_CORE_GRAPPLER_GRAPH_ANALYZER_GRAPH_ANALYZER_H_
#include <deque>
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/graph_analyzer/map_tools.h"
#include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
#include "tensorflow/core/grappler/graph_analyzer/subgraph.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
class GraphAnalyzerTest;
}
class GraphAnalyzer {
public:
GraphAnalyzer(const GraphDef& graph, int subgraph_size);
virtual ~GraphAnalyzer();
Status Run();
std::vector<string> DumpSubgraphs();
Status OutputSubgraphs();
private:
GraphAnalyzer() = delete;
GraphAnalyzer(const GraphAnalyzer&) = delete;
void operator=(const GraphAnalyzer&) = delete;
friend class tensorflow::grappler::graph_analyzer::test::GraphAnalyzerTest;
Status BuildMap();
void FindSubgraphs();
void DropInvalidSubgraphs();
Status CollateResult();
std::vector<string> DumpRawSubgraphs();
void ExtendSubgraph(Subgraph* parent);
void ExtendSubgraphAllOrNone(Subgraph* parent, const GenNode* node);
void ExtendSubgraphPortAllOrNone(Subgraph* parent, const GenNode* node,
GenNode::Port port);
void AddExtendedSubgraph(Subgraph* parent, const Subgraph::Identity& id);
bool HasInvalidMultiInputs(Subgraph* sg);
GraphDef graph_;
int subgraph_size_;
GenNodeMap nodes_;
SubgraphPtrSet result_;
SubgraphPtrSet partial_;
std::deque<Subgraph*> todo_;
struct CollationEntry {
std::shared_ptr<Signature> sig;
size_t count = 0;
};
using CollationMap =
std::unordered_map<Signature*, CollationEntry, HashAtPtr<Signature*>,
EqAtPtr<Signature*> >;
CollationMap collation_map_;
struct ReverseLessByCount {
bool operator()(CollationEntry* left, CollationEntry* right) const {
return left->count > right->count;
}
};
using CollationOrderByCount =
std::multiset<CollationEntry*, ReverseLessByCount>;
CollationOrderByCount ordered_collation_;
};
}
}
}
#endif
#include <deque>
#include <iostream>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
GraphAnalyzer::GraphAnalyzer(const GraphDef& graph, int subgraph_size)
: graph_(graph), subgraph_size_(subgraph_size) {}
GraphAnalyzer::~GraphAnalyzer() {}
Status GraphAnalyzer::Run() {
if (subgraph_size_ > Signature::kMaxGraphSize) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrFormat("Subgraphs of %d nodes are not supported, "
"the maximal supported node count is %d.",
subgraph_size_, Signature::kMaxGraphSize));
}
Status st = BuildMap();
if (!st.ok()) {
return st;
}
FindSubgraphs();
DropInvalidSubgraphs();
st = CollateResult();
if (!st.ok()) {
return st;
}
return absl::OkStatus();
}
Status GraphAnalyzer::BuildMap() {
nodes_.clear();
return GenNode::BuildGraphInMap(graph_, &nodes_);
}
void GraphAnalyzer::FindSubgraphs() {
result_.clear();
if (subgraph_size_ < 1) {
return;
}
partial_.clear();
todo_.clear();
const Subgraph::Identity empty_parent;
for (const auto& node : nodes_) {
if (subgraph_size_ == 1) {
result_.ExtendParent(empty_parent, node.second.get());
} else {
todo_.push_back(partial_.ExtendParent(empty_parent, node.second.get()));
}
}
while (!todo_.empty()) {
ExtendSubgraph(todo_.front());
todo_.pop_front();
}
partial_.clear();
}
void GraphAnalyzer::ExtendSubgraph(Subgraph* parent) {
const int next_parent_id = parent->id().size() + 1;
bool will_complete = (next_parent_id == subgraph_size_);
SubgraphPtrSet& sg_set = will_complete ? result_ : partial_;
const GenNode* last_all_or_none_node = nullptr;
for (SubgraphIterator sit(parent); !sit.AtEnd(); sit.Next()) {
const GenNode* node = sit.GetNode();
GenNode::Port port = sit.GetPort();
const GenNode::LinkTarget& neighbor = sit.GetNeighbor();
if (node->AllInputsOrNone() && port.IsInbound() && !port.IsControl()) {
if (node != last_all_or_none_node) {
ExtendSubgraphAllOrNone(parent, node);
last_all_or_none_node = node;
}
sit.SkipPort();
} else if (neighbor.node->AllInputsOrNone() && !port.IsInbound() &&
!port.IsControl()) {
if (parent->id().find(neighbor.node) == parent->id().end()) {
ExtendSubgraphAllOrNone(parent, neighbor.node);
}
} else if (node->IsMultiInput(port)) {
ExtendSubgraphPortAllOrNone(parent, node, port);
sit.SkipPort();
} else if (neighbor.node->IsMultiInput(neighbor.port)) {
if (parent->id().find(neighbor.node) != parent->id().end()) {
continue;
}
ExtendSubgraphPortAllOrNone(parent, neighbor.node, neighbor.port);
} else {
Subgraph* sg = sg_set.ExtendParent(parent->id(), neighbor.node);
if (!will_complete && sg != nullptr) {
todo_.push_back(sg);
}
}
}
}
void GraphAnalyzer::ExtendSubgraphAllOrNone(Subgraph* parent,
const GenNode* node) {
Subgraph::Identity id = parent->id();
id.insert(node);
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::ExtendSubgraphPortAllOrNone(Subgraph* parent,
const GenNode* node,
GenNode::Port port) {
auto nbit = node->links().find(port);
if (nbit == node->links().end()) {
return;
}
Subgraph::Identity id = parent->id();
id.insert(node);
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::AddExtendedSubgraph(Subgraph* parent,
const Subgraph::Identity& id) {
if (id.size() == parent->id().size()) {
return;
}
auto sg = std::make_unique<Subgraph>(id);
SubgraphPtrSet& spec_sg_set =
(id.size() == subgraph_size_) ? result_ : partial_;
if (spec_sg_set.find(sg) != spec_sg_set.end()) {
return;
}
const int id_size = id.size();
if (id_size != subgraph_size_) {
todo_.push_back(sg.get());
}
spec_sg_set.insert(std::move(sg));
}
void GraphAnalyzer::DropInvalidSubgraphs() {
auto resit = result_.begin();
while (resit != result_.end()) {
if (HasInvalidMultiInputs(resit->get())) {
auto delit = resit;
++resit;
result_.erase(delit);
} else {
++resit;
}
}
}
bool GraphAnalyzer::HasInvalidMultiInputs(Subgraph* sg) {
for (auto const& node : sg->id()) {
if (!node->AllInputsOrNone()) {
continue;
}
bool anyIn = false;
bool anyOut = false;
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
if (sg->id().find(link.node) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
}
}
if (anyIn && anyOut) {
return true;
}
}
for (SubgraphIterator sit(sg); !sit.AtEnd(); sit.Next()) {
if (sit.GetNode()->IsMultiInput(sit.GetPort())) {
bool anyIn = false;
bool anyOut = false;
do {
GenNode* peer = sit.GetNeighbor().node;
if (sg->id().find(peer) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
} while (sit.NextIfSamePort());
if (anyIn && anyOut) {
return true;
}
}
}
return false;
}
Status GraphAnalyzer::CollateResult() {
ordered_collation_.clear();
collation_map_.clear();
for (const auto& it : result_) {
auto sig = std::make_unique<Signature>();
it->ExtractForSignature(&sig->map);
Status status = sig->Compute();
if (!status.ok()) {
return status;
}
auto& coll_entry = collation_map_[sig.get()];
if (coll_entry.sig == nullptr) {
coll_entry.sig = std::move(sig);
}
++coll_entry.count;
}
for (auto& entry : collation_map_) {
ordered_collation_.insert(&entry.second);
}
result_.clear();
return absl::OkStatus();
}
std::vector<string> GraphAnalyzer::DumpRawSubgraphs() {
std::vector<string> result;
for (const auto& it : result_) {
result.emplace_back(it->Dump());
}
return result;
}
std::vector<string> GraphAnalyzer::DumpSubgraphs() {
std::vector<string> result;
for (auto ptr : ordered_collation_) {
result.emplace_back(
absl::StrFormat("%d %s", ptr->count, ptr->sig->ToString()));
}
return result;
}
Status GraphAnalyzer::OutputSubgraphs() {
size_t total = 0;
for (auto ptr : ordered_collation_) {
std::cout << ptr->count << ' ' << ptr->sig->ToString() << '\n';
total += ptr->count;
}
std::cout << "Total: " << total << '\n';
if (std::cout.fail()) {
return Status(absl::StatusCode::kDataLoss, "Failed to write to stdout");
} else {
return absl::OkStatus();
}
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class GraphAnalyzerTest : public ::testing::Test, protected TestGraphs {
protected:
Status BuildMap() { return gran_->BuildMap(); }
void FindSubgraphs() { gran_->FindSubgraphs(); }
void DropInvalidSubgraphs() { gran_->DropInvalidSubgraphs(); }
Status CollateResult() { return gran_->CollateResult(); }
void ExtendSubgraph(Subgraph* parent) { gran_->ExtendSubgraph(parent); }
void ExtendSubgraphPortAllOrNone(Subgraph* parent, GenNode* node,
GenNode::Port port) {
gran_->ExtendSubgraphPortAllOrNone(parent, node, port);
}
void ExtendSubgraphAllOrNone(Subgraph* parent, GenNode* node) {
gran_->ExtendSubgraphAllOrNone(parent, node);
}
std::vector<string> DumpRawSubgraphs() { return gran_->DumpRawSubgraphs(); }
std::vector<string> DumpPartials() {
std::vector<string> result;
for (const auto& it : gran_->partial_) {
result.emplace_back(it->Dump());
}
return result;
}
const GenNodeMap& GetNodes() { return gran_->nodes_; }
GenNode* GetNode(const string& name) { return gran_->nodes_.at(name).get(); }
SubgraphPtrSet& GetResult() { return gran_->result_; }
SubgraphPtrSet& GetPartial() { return gran_->partial_; }
std::deque<Subgraph*>& GetTodo() { return gran_->todo_; }
std::unique_ptr<GraphAnalyzer> gran_;
};
TEST_F(GraphAnalyzerTest, BuildMap) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
EXPECT_THAT(st, Eq(absl::OkStatus()));
auto& map = GetNodes();
EXPECT_THAT(map.find("node1"), Ne(map.end()));
EXPECT_THAT(map.find("node2"), Ne(map.end()));
EXPECT_THAT(map.find("node3"), Ne(map.end()));
}
TEST_F(GraphAnalyzerTest, BuildMapError) {
(*graph_3n_self_control_.add_node()) = MakeNodeConst("node1");
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name 'node1'.")));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs0) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 0);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(0));
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs1) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(3));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: BroadcastGradientArgs(node3)",
"1: Const(node1)",
"1: Sub(node2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphsTooLarge) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseIn) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto parent = std::make_unique<Subgraph>(Subgraph::Identity());
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(parent.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsIncomplete) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("add2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsMulti) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("add1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add1"),
GetNode("add2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("add1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add1), AddN(add2), Sub(sub)",
"1: AddN(add1), Const(const1_1), Const(const1_2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsNoControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass1"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSeparateControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("pass1"),
GenNode::Port(true, -1));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("pass2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: IdentityN(pass2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)",
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsAllOrNone) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("pass1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass1"),
GetNode("pass2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("pass1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: IdentityN(pass1), IdentityN(pass2), Sub(sub)",
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
}
}
}
} |
160 | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONST_OP_SIZE_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_TENSORFLOW_CC_CONST_OP_SIZE_H_
#include <cstdint>
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
int64_t GetSizeInBytes(TF::ConstOp const_op);
}
}
#endif
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include <climits>
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/Types.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
namespace mlir {
namespace quant {
namespace {
constexpr int64_t kAssumedNumBytesPerElem = 4;
int64_t GetSizeOfIntOrFloatConst(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
const ElementsAttr const_value = const_op.getValue();
const auto bytes_per_elem =
static_cast<int64_t>(dtype.getIntOrFloatBitWidth() / CHAR_BIT);
return bytes_per_elem * const_value.getNumElements();
}
int64_t GetSizeOfStringConst(TF::ConstOp const_op) {
const ElementsAttr const_value = const_op.getValue();
const auto str_attr = cast<DenseStringElementsAttr>(const_value);
return absl::c_accumulate(
str_attr.getRawStringData(), 0,
[](int64_t acc, const StringRef str_value) -> int64_t {
return acc + str_value.size();
});
}
int64_t GetSizeOfUnsupportedTypeConst(TF::ConstOp const_op) {
return kAssumedNumBytesPerElem * const_op.getValue().getNumElements();
}
}
int64_t GetSizeInBytes(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
if (dtype.isIntOrFloat()) {
return GetSizeOfIntOrFloatConst(const_op);
} else if (isa<TF::StringType>(dtype)) {
return GetSizeOfStringConst(const_op);
} else {
return GetSizeOfUnsupportedTypeConst(const_op);
}
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::Eq;
class GetSizeInBytesTest : public ::testing::Test {
protected:
GetSizeInBytesTest() : ctx_() { ctx_.loadDialect<TF::TensorFlowDialect>(); }
MLIRContext ctx_;
};
TF::ConstOp ParseConstOp(const absl::string_view const_op_str, Block& block,
MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(const_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto const_op = dyn_cast_or_null<TF::ConstOp>(block.front());
EXPECT_TRUE(const_op);
return const_op;
}
TEST_F(GetSizeInBytesTest, Int32ScalarConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
TEST_F(GetSizeInBytesTest, Int32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<2xi32>} : () -> tensor<2xi32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(8));
}
TEST_F(GetSizeInBytesTest, Int8ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<2> : tensor<3xi8>} : () -> tensor<3xi8>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(3));
}
TEST_F(GetSizeInBytesTest, Float32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<4xf32>} : () -> tensor<4xf32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Float64ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<2xf64>} : () -> tensor<2xf64>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Bfloat16ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<1.0> : tensor<7xbf16>} : () -> tensor<7xbf16>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(14));
}
TEST_F(GetSizeInBytesTest, TfStringConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<["Hello World", "Quantization"]> : tensor<2x!tf_type.string>} : () -> tensor<2x!tf_type.string>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(23));
}
TEST_F(GetSizeInBytesTest, ConstOpWithUnknownSizeAssumes4BytesPerElement) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = #tf_type<tensor_proto : "0xDEADBAAD"> : tensor<!tf_type.variant>} : () -> tensor<!tf_type.variant>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
}
}
} |
161 | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_integer_ops {
template <typename T>
inline void Dequantize(const tflite::DequantizationParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int32 zero_point = op_params.zero_point;
const double scale = op_params.scale;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
const int32 val = static_cast<int32>(input_data[i]);
const float result = static_cast<float>(scale * (val - zero_point));
output_data[i] = result;
}
}
}
}
#endif
#include "tensorflow/lite/kernels/dequantize.h"
#include <stddef.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dequantize {
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
bool float_dequantized_weights_initialized;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->float_dequantized_weights_initialized = false;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type == kTfLiteInt4 ||
op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16 ||
op_context.input->type == kTfLiteFloat16);
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
}
op_context.output->type = kTfLiteFloat32;
if (IsConstantTensor(op_context.input)) {
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
}
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
if (IsConstantTensor(op_context.input) &&
op_data->float_dequantized_weights_initialized) {
return kTfLiteOk;
}
auto status = DequantizeImpl<kernel_type>(context, node, op_context.input,
op_context.output);
if (status != kTfLiteOk) {
return status;
}
if (IsConstantTensor(op_context.input)) {
op_data->float_dequantized_weights_initialized = true;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DEQUANTIZE_OPT() {
static TfLiteRegistration r = {
dequantize::Init, dequantize::Free, dequantize::Prepare,
dequantize::Eval<dequantize::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_DEQUANTIZE_REF() {
static TfLiteRegistration r = {dequantize::Init, dequantize::Free,
dequantize::Prepare,
dequantize::Eval<dequantize::kReference>};
return &r;
}
TfLiteRegistration* Register_DEQUANTIZE() {
#ifdef USE_NEON
return Register_DEQUANTIZE_OPT();
#else
return Register_DEQUANTIZE_REF();
#endif
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "Eigen/Core"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DEQUANTIZE();
}
}
namespace {
using ::testing::ElementsAreArray;
class DequantizeOpModel : public SingleOpModel {
public:
explicit DequantizeOpModel() {}
DequantizeOpModel(TensorType type, std::initializer_list<int> shape,
float scale, int32_t zero_point, int version) {
const TensorData input_tensor_data = {type, shape, 0, 0, scale, zero_point};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <typename T>
void SetInputInt4(int input, const std::vector<T> data) {
auto non_const = *const_cast<std::vector<T>*>(&data);
std::vector<int8_t> data_int8(non_const.size());
std::copy(non_const.begin(), non_const.end(), data_int8.begin());
PopulateTensor4bit(input, 0, data_int8.data(),
data_int8.data() + data_int8.size());
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
};
TEST(DequantizeOpTest, Int4) {
DequantizeOpModel m(TensorType_INT4, {2, 2}, 0.5, -1, 6);
m.SetInputInt4<int8_t>(0, {7, 6, -7, -8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({4, 3.5, -3, -3.5})));
}
TEST(DequantizeOpTest, Uint8) {
DequantizeOpModel m(TensorType_UINT8, {2, 5}, 0.5, 127, 1);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Int8) {
DequantizeOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Float16) {
DequantizeOpModel m(TensorType_FLOAT16, {2, 3}, 1.0f, 0, 3);
std::vector<Eigen::half> half{Eigen::half{-535.54f}, Eigen::half{-100.0f},
Eigen::half{-1.0f}, Eigen::half{0.f},
Eigen::half{1.0f}, Eigen::half{100.32f}};
m.PopulateTensor(0, 0, reinterpret_cast<TfLiteFloat16*>(half.data()),
reinterpret_cast<TfLiteFloat16*>(half.data()) + half.size());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-535.54f, -100.0f, -1.0f, 0.f, 1.0f, 100.32f},
0.1f)));
}
TEST(DequantizeOpTest, Int16) {
DequantizeOpModel m(TensorType_INT16, {2, 5}, 0.5, 0, 4);
m.SetInput<int16_t>({-129, -126, -125, -124, -123, 124, 125, 126, 127, 131});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-64.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 65.5})));
}
class DequantizePerChannelOpModel : public DequantizeOpModel {
public:
DequantizePerChannelOpModel(TensorType type, std::initializer_list<int> shape,
std::initializer_list<float> scales,
std::initializer_list<int64_t> zero_points,
int channel_dim, int version) {
std::vector<float> per_channel_scales(scales);
std::vector<int64_t> input_offsets(zero_points);
const TensorData input_tensor_data = {
type, shape, 0, 0, 0.0f, 0, true, per_channel_scales,
input_offsets, channel_dim};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
};
TEST(DequantizePerChannelOpTest, Uint8) {
DequantizePerChannelOpModel m(TensorType_UINT8, {2, 5}, {0.5, 0.5},
{127, 127}, 0, 5);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizePerChannelOpTest, Int8) {
DequantizePerChannelOpModel m(TensorType_INT8, {2, 5}, {0.5, 0.5}, {-1, -1},
0, 5);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
}
} |
162 | #ifndef XLA_STREAM_EXECUTOR_DATA_TYPE_H_
#define XLA_STREAM_EXECUTOR_DATA_TYPE_H_
#include <complex>
#include <cstdint>
#include "tsl/platform/ml_dtypes.h"
#include "tsl/protobuf/dnn.pb.h"
namespace Eigen {
struct bfloat16;
struct half;
}
namespace stream_executor {
namespace dnn {
template <typename T>
struct ToDataType;
template <>
struct ToDataType<tsl::float8_e4m3fn> {
static constexpr DataType value = DataType::kF8E4M3FN;
};
template <>
struct ToDataType<tsl::float8_e5m2> {
static constexpr DataType value = DataType::kF8E5M2;
};
template <>
struct ToDataType<tsl::float8_e4m3fnuz> {
static constexpr DataType value = DataType::kF8E4M3FNUZ;
};
template <>
struct ToDataType<tsl::float8_e5m2fnuz> {
static constexpr DataType value = DataType::kF8E5M2FNUZ;
};
template <>
struct ToDataType<float> {
static constexpr DataType value = DataType::kFloat;
};
template <>
struct ToDataType<double> {
static constexpr DataType value = DataType::kDouble;
};
template <>
struct ToDataType<Eigen::half> {
static constexpr DataType value = DataType::kHalf;
};
template <>
struct ToDataType<Eigen::bfloat16> {
static constexpr DataType value = DataType::kBF16;
};
template <>
struct ToDataType<int8_t> {
static constexpr DataType value = DataType::kInt8;
};
template <>
struct ToDataType<int32_t> {
static constexpr DataType value = DataType::kInt32;
};
template <>
struct ToDataType<int64_t> {
static constexpr DataType value = DataType::kInt64;
};
template <>
struct ToDataType<std::complex<float>> {
static constexpr DataType value = DataType::kComplexFloat;
};
template <>
struct ToDataType<std::complex<double>> {
static constexpr DataType value = DataType::kComplexDouble;
};
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <stddef.h>
#include <string>
#include "absl/strings/str_cat.h"
namespace tflite {
namespace gpu {
namespace {
std::string ToGlslType(const std::string& scalar_type,
const std::string& vec_type, int vec_size) {
return vec_size == 1 ? scalar_type : absl::StrCat(vec_type, vec_size);
}
std::string GetGlslPrecisionModifier(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
return "lowp ";
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return "mediump ";
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return "highp ";
case DataType::BOOL:
return "";
default:
return "";
}
}
}
size_t SizeOf(DataType data_type) {
switch (data_type) {
case DataType::UINT8:
case DataType::INT8:
case DataType::BOOL:
return 1;
case DataType::FLOAT16:
case DataType::INT16:
case DataType::UINT16:
return 2;
case DataType::FLOAT32:
case DataType::INT32:
case DataType::UINT32:
return 4;
case DataType::FLOAT64:
case DataType::INT64:
case DataType::UINT64:
return 8;
case DataType::UNKNOWN:
return 0;
}
return 0;
}
std::string ToString(DataType data_type) {
switch (data_type) {
case DataType::FLOAT16:
return "float16";
case DataType::FLOAT32:
return "float32";
case DataType::FLOAT64:
return "float64";
case DataType::INT16:
return "int16";
case DataType::INT32:
return "int32";
case DataType::INT64:
return "int64";
case DataType::INT8:
return "int8";
case DataType::UINT16:
return "uint16";
case DataType::UINT32:
return "uint32";
case DataType::UINT64:
return "uint64";
case DataType::UINT8:
return "uint8";
case DataType::BOOL:
return "bool";
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToCLDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
std::string ToMetalDataType(DataType data_type, int vec_size) {
const std::string postfix = vec_size == 1 ? "" : std::to_string(vec_size);
switch (data_type) {
case DataType::FLOAT16:
return "half" + postfix;
case DataType::FLOAT32:
return "float" + postfix;
case DataType::FLOAT64:
return "double" + postfix;
case DataType::INT16:
return "short" + postfix;
case DataType::INT32:
return "int" + postfix;
case DataType::INT64:
return "long" + postfix;
case DataType::INT8:
return "char" + postfix;
case DataType::UINT16:
return "ushort" + postfix;
case DataType::UINT32:
return "uint" + postfix;
case DataType::UINT64:
return "ulong" + postfix;
case DataType::UINT8:
return "uchar" + postfix;
case DataType::BOOL:
return "bool" + postfix;
case DataType::UNKNOWN:
return "unknown";
}
return "undefined";
}
DataType ToMetalTextureType(DataType data_type) {
switch (data_type) {
case DataType::FLOAT32:
case DataType::FLOAT16:
case DataType::INT32:
case DataType::INT16:
case DataType::UINT32:
case DataType::UINT16:
return data_type;
case DataType::INT8:
return DataType::INT16;
case DataType::UINT8:
case DataType::BOOL:
return DataType::UINT16;
default:
return DataType::UNKNOWN;
}
}
std::string ToGlslShaderDataType(DataType data_type, int vec_size,
bool add_precision, bool explicit_fp16) {
const std::string precision_modifier =
add_precision ? GetGlslPrecisionModifier(data_type) : "";
switch (data_type) {
case DataType::FLOAT16:
if (explicit_fp16) {
return ToGlslType("float16_t", "f16vec", vec_size);
} else {
return precision_modifier + ToGlslType("float", "vec", vec_size);
}
case DataType::FLOAT32:
return precision_modifier + ToGlslType("float", "vec", vec_size);
case DataType::FLOAT64:
return precision_modifier + ToGlslType("double", "dvec", vec_size);
case DataType::INT8:
case DataType::INT16:
case DataType::INT32:
case DataType::INT64:
return precision_modifier + ToGlslType("int", "ivec", vec_size);
case DataType::UINT8:
case DataType::UINT16:
case DataType::UINT32:
case DataType::UINT64:
return precision_modifier + ToGlslType("uint", "uvec", vec_size);
case DataType::BOOL:
return ToGlslType("bool", "bvec", vec_size);
case DataType::UNKNOWN:
return "unknown";
}
return "unknown";
}
}
} | #include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include <gtest/gtest.h>
namespace tflite {
namespace gpu {
namespace {
TEST(DataTypeTest, GlslShaderDataTypes) {
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT16));
EXPECT_EQ("mediump float",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
false));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, false,
true));
EXPECT_EQ("float16_t",
ToGlslShaderDataType(DataType::FLOAT16, 1, true,
true));
EXPECT_EQ("vec4", ToGlslShaderDataType(DataType::FLOAT16, 4));
EXPECT_EQ("mediump vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
false));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, false,
true));
EXPECT_EQ("f16vec4",
ToGlslShaderDataType(DataType::FLOAT16, 4, true,
true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32));
EXPECT_EQ("highp float",
ToGlslShaderDataType(DataType::FLOAT32, 1, true));
EXPECT_EQ("float", ToGlslShaderDataType(DataType::FLOAT32, 1,
false));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2));
EXPECT_EQ("highp vec2",
ToGlslShaderDataType(DataType::FLOAT32, 2, true));
EXPECT_EQ("vec2", ToGlslShaderDataType(DataType::FLOAT32, 2,
false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT32, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT16, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT8, 1, false));
EXPECT_EQ("int",
ToGlslShaderDataType(DataType::INT64, 1, true));
EXPECT_EQ("highp int",
ToGlslShaderDataType(DataType::INT32, 1, true));
EXPECT_EQ("mediump int",
ToGlslShaderDataType(DataType::INT16, 1, true));
EXPECT_EQ("lowp int",
ToGlslShaderDataType(DataType::INT8, 1, true));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT32, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT16, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT8, 1, false));
EXPECT_EQ("uint",
ToGlslShaderDataType(DataType::UINT64, 1, true));
EXPECT_EQ("highp uint",
ToGlslShaderDataType(DataType::UINT32, 1, true));
EXPECT_EQ("mediump uint",
ToGlslShaderDataType(DataType::UINT16, 1, true));
EXPECT_EQ("lowp uint",
ToGlslShaderDataType(DataType::UINT8, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL));
EXPECT_EQ("bvec4", ToGlslShaderDataType(DataType::BOOL, 4));
EXPECT_EQ("bool",
ToGlslShaderDataType(DataType::BOOL, 1, true));
EXPECT_EQ("bool", ToGlslShaderDataType(DataType::BOOL, 1,
false));
}
}
}
} |
163 | #ifndef XLA_SERVICE_SPMD_COLLECTIVE_PERMUTE_MOTION_H_
#define XLA_SERVICE_SPMD_COLLECTIVE_PERMUTE_MOTION_H_
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class CollectivePermuteMotion : public HloModulePass {
public:
CollectivePermuteMotion() = default;
absl::string_view name() const override {
return "collective-permute-motion";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/spmd/collective_permute_motion.h"
#include <cstdint>
#include <deque>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape_util.h"
namespace xla {
absl::flat_hash_set<HloInstruction*> FindLoopConsts(HloComputation* body) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
absl::flat_hash_set<HloInstruction*> loop_consts;
for (int64_t i = 0; i < root->operand_count(); ++i) {
HloInstruction* output = root->mutable_operand(i);
while (output->opcode() == HloOpcode::kReshape ||
output->opcode() == HloOpcode::kCopy) {
output = output->mutable_operand(0);
}
if (output->opcode() == HloOpcode::kGetTupleElement &&
output->tuple_index() == i &&
output->operand(0) == body->parameter_instruction(0)) {
loop_consts.insert(output);
}
}
for (HloInstruction* inst : body->MakeInstructionPostOrder()) {
if (inst->IsConstant() || inst->opcode() == HloOpcode::kIota ||
inst->opcode() == HloOpcode::kReplicaId ||
inst->opcode() == HloOpcode::kPartitionId) {
loop_consts.insert(inst);
continue;
}
if (!inst->IsElementwise() && inst->opcode() != HloOpcode::kBroadcast &&
inst->opcode() != HloOpcode::kReduce &&
inst->opcode() != HloOpcode::kReshape &&
inst->opcode() != HloOpcode::kDynamicSlice &&
inst->opcode() != HloOpcode::kTranspose) {
continue;
}
if (inst->HasSideEffectNoRecurse()) {
continue;
}
if (absl::c_all_of(inst->operands(), [&](const HloInstruction* operand) {
return loop_consts.contains(operand);
})) {
loop_consts.insert(inst);
}
}
return loop_consts;
}
constexpr int64_t kMaxMovableClusterSize = 8;
struct MovableCluster {
int64_t root_tuple_index;
std::vector<HloInstruction*> reverse_order_instructions;
HloInstruction* collective_permute = nullptr;
};
std::optional<MovableCluster> FindMovableClusterAtBodyRoot(
HloComputation* body, int64_t root_tuple_index,
const absl::flat_hash_set<HloInstruction*>& loop_consts) {
HloInstruction* root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
MovableCluster cluster;
cluster.root_tuple_index = root_tuple_index;
std::deque<HloInstruction*> queue;
queue.push_back(root->mutable_operand(root_tuple_index));
while (!queue.empty()) {
HloInstruction* visiting = queue.front();
queue.pop_front();
if (cluster.reverse_order_instructions.size() >= kMaxMovableClusterSize) {
VLOG(2) << "Cannot move: too many instructions to move";
return std::nullopt;
}
if (visiting->user_count() > 1) {
VLOG(2) << "Cannot move: " << visiting->name() << " used multiple times";
return std::nullopt;
}
cluster.reverse_order_instructions.push_back(visiting);
if (visiting->opcode() == HloOpcode::kCollectivePermute) {
if (cluster.collective_permute != nullptr) {
VLOG(2) << "Cannot move: " << visiting->name()
<< " multiple collective permutes";
return std::nullopt;
}
cluster.collective_permute = visiting;
continue;
}
if (!visiting->IsElementwise() || visiting->HasSideEffectNoRecurse()) {
VLOG(2) << "Cannot move: " << visiting->name() << " unsupported op";
return std::nullopt;
}
for (HloInstruction* operand : visiting->mutable_operands()) {
if (!loop_consts.contains(operand)) {
queue.push_back(operand);
}
}
}
if (cluster.collective_permute == nullptr) {
return std::nullopt;
}
return cluster;
}
absl::flat_hash_set<int64_t> FindIndicesUnusedAfterLoop(HloInstruction* loop) {
absl::flat_hash_set<int64_t> indices;
int64_t count = loop->shape().tuple_shapes_size();
for (int64_t i = 0; i < count; ++i) {
indices.insert(i);
}
for (HloInstruction* user : loop->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
indices.clear();
break;
}
indices.erase(user->tuple_index());
}
return indices;
}
absl::StatusOr<bool> MoveCollectivePermutes(HloComputation* computation,
HloInstruction* loop) {
HloComputation* body = loop->while_body();
HloInstruction* root = body->root_instruction();
if (root->opcode() != HloOpcode::kTuple ||
loop->operand(0)->opcode() != HloOpcode::kTuple) {
return false;
}
auto maybe_induction_var_idx = GetLoopInductionVarTupleIdx(loop);
if (!maybe_induction_var_idx.has_value()) {
VLOG(2) << "Skip " << loop->name() << ", no induction var";
return false;
}
absl::flat_hash_map<const HloInstruction*, int64_t> output_appear_counts;
for (const HloInstruction* operand : root->operands()) {
auto res = output_appear_counts.emplace(operand, 1);
if (!res.second) {
res.first->second++;
}
}
absl::flat_hash_set<int64_t> unused_indices_after_loop =
FindIndicesUnusedAfterLoop(loop);
const absl::flat_hash_set<HloInstruction*> loop_consts = FindLoopConsts(body);
int64_t induction_var_idx = *maybe_induction_var_idx;
std::vector<HloInstruction*> input_gtes(root->operand_count(), nullptr);
absl::flat_hash_set<int64_t> multi_use_indices;
for (HloInstruction* user : body->parameter_instruction(0)->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(2) << "Skip " << loop->name() << ", non-GTE input use";
return false;
}
if (multi_use_indices.contains(user->tuple_index())) {
continue;
}
if (input_gtes[user->tuple_index()] != nullptr) {
multi_use_indices.insert(user->tuple_index());
input_gtes[user->tuple_index()] = nullptr;
} else {
input_gtes[user->tuple_index()] = user;
}
}
HloInstruction* ind_var = input_gtes[induction_var_idx];
if (ind_var == nullptr || ind_var->shape().rank() > 0) {
VLOG(2) << "Skip " << loop->name() << ", non-scalar induction var";
return false;
}
if (root->operand(induction_var_idx)->opcode() != HloOpcode::kAdd &&
root->operand(induction_var_idx)->opcode() != HloOpcode::kSubtract) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub induction var";
return false;
}
if (root->operand(induction_var_idx)->operand(0) == ind_var) {
if (!root->operand(induction_var_idx)->operand(1)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else if (root->operand(induction_var_idx)->operand(1) == ind_var) {
if (!root->operand(induction_var_idx)->operand(0)->IsConstant()) {
VLOG(2) << "Skip " << loop->name() << ", non-add/sub const induction var";
return false;
}
} else {
return false;
}
HloInstruction* ind_var_orig =
loop->mutable_operand(0)->mutable_operand(induction_var_idx);
if (!ind_var_orig->IsConstant()) {
VLOG(2) << "Skip " << loop->name()
<< ", non-constant initial induction var";
return false;
}
bool changed = false;
std::vector<MovableCluster> movable_outputs;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (output_appear_counts[root->operand(i)] > 1) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " appears multiple times in output.";
continue;
}
if (!unused_indices_after_loop.contains(i)) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " used after loop.";
continue;
}
auto cluster = FindMovableClusterAtBodyRoot(body, i, loop_consts);
if (!cluster.has_value()) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " did not find a movable cluster.";
continue;
}
HloInstruction* input = input_gtes[cluster->root_tuple_index];
HloInstruction* cp = cluster->collective_permute;
if (input == nullptr || cp->operand(0) == input) {
VLOG(2) << "Skip " << loop->name() << " index " << i
<< " collective-permute already at top.";
continue;
}
const std::vector<HloInstruction*> original_input_users = input->users();
absl::flat_hash_map<const HloInstruction*, HloInstruction*> replacement;
replacement[cp->operand(0)] = input;
for (auto it = cluster->reverse_order_instructions.rbegin();
it != cluster->reverse_order_instructions.rend(); ++it) {
HloInstruction* inst = *it;
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : inst->mutable_operands()) {
auto rit = replacement.find(operand);
if (rit != replacement.end()) {
new_operands.push_back(rit->second);
} else {
new_operands.push_back(operand);
}
}
HloInstruction* clone = body->AddInstruction(
inst->CloneWithNewOperands(inst->shape(), new_operands));
replacement[inst] = clone;
}
HloInstruction* new_input =
replacement[cluster->reverse_order_instructions[0]];
if (ind_var_orig->parent() != body) {
ind_var_orig = body->AddInstruction(ind_var_orig->Clone());
}
HloInstruction* is_first_iter =
body->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(new_input->shape(), PRED),
body->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeScalarShape(PRED), ind_var, ind_var_orig,
Comparison::Direction::kEq)),
{}));
new_input = body->AddInstruction(
HloInstruction::CreateTernary(new_input->shape(), HloOpcode::kSelect,
is_first_iter, input, new_input));
for (HloInstruction* user : original_input_users) {
TF_RETURN_IF_ERROR(input->ReplaceUseWith(user, new_input));
}
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(cluster->root_tuple_index,
cp->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstructionAndUnusedOperands(
cluster->reverse_order_instructions[0]));
VLOG(2) << "Moved " << loop->name() << " index " << i;
changed = true;
}
return changed;
}
absl::StatusOr<bool> CollectivePermuteMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
TF_ASSIGN_OR_RETURN(bool moved,
MoveCollectivePermutes(computation, instr));
changed |= moved;
}
}
}
return changed;
}
} | #include "xla/service/spmd/collective_permute_motion.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
using CollectivePermuteMotionTest = HloTestBase;
namespace op = xla::testing::opcode_matchers;
TEST_F(CollectivePermuteMotionTest, SimpleMove) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto cp = op::CollectivePermute(input);
auto select = op::Select(op::Broadcast(op::Compare()), input, cp);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, NoCollectivePermute) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[], f32[]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[] get-tuple-element(loop_var), index=1
constant.4 = f32[] constant(4.0)
ROOT tuple = (s32[], f32[], f32[]) tuple(add, constant.4, gte1)
}
cond {
loop_var = (s32[], f32[], f32[]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[] parameter(0)
param.1 = f32[] parameter(1)
tuple.1 = (s32[], f32[], f32[]) tuple(constant.2, param, param.1)
while = (s32[], f32[], f32[]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, MoveWithElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
broadcast = f32[4,4] broadcast(constant.4), dimensions={}
add1 = f32[4,4] add(cp, broadcast)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_TRUE(pass.Run(&*module).value());
VLOG(1) << module->ToString();
const HloInstruction* loop = FindInstruction(module.get(), "while");
const HloInstruction* output =
loop->while_body()->root_instruction()->operand(1);
auto input =
AllOf(op::Shape("f32[4,4]"), op::GetTupleElement(op::Parameter(0)));
auto moved =
op::Add(op::CollectivePermute(input), op::Broadcast(op::Constant()));
auto select = op::Select(op::Broadcast(op::Compare()), input, moved);
EXPECT_THAT(output, op::Multiply(select, select));
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveWithNonConstElementwise) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
constant.4 = f32[] constant(1)
nonconst = f32[4,4] custom-call(), custom_call_target="unknown"
add1 = f32[4,4] add(cp, nonconst)
ROOT tuple = (s32[], f32[4,4]) tuple(add, add1)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfOutputUsed) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(add, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = f32[4,4] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfIndictionVarUnknown) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
custom = s32[] custom-call(gte0, constant.1), custom_call_target="unknown"
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4]) tuple(custom, cp)
}
cond {
loop_var = (s32[], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4]) tuple(constant.2, param)
while = (s32[], f32[4,4]) while(tuple.1), condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
TEST_F(CollectivePermuteMotionTest, DoNotMoveIfMultiOutput) {
absl::string_view hlo_string = R"(
HloModule test
body {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
constant.1 = s32[] constant(1)
gte0 = s32[] get-tuple-element(loop_var), index=0
add = s32[] add(gte0, constant.1)
gte1 = f32[4,4] get-tuple-element(loop_var), index=1
mul = f32[4,4] multiply(gte1, gte1)
cp = f32[4,4] collective-permute(mul), source_target_pairs={{0,1},{1,2}}
ROOT tuple = (s32[], f32[4,4], f32[4,4]) tuple(add, cp, cp)
}
cond {
loop_var = (s32[], f32[4,4], f32[4,4]) parameter(0)
gte.cond = s32[] get-tuple-element(loop_var), index=0
constant.3 = s32[] constant(5)
ROOT lt = pred[] compare(gte.cond, constant.3), direction=LT
}
ENTRY main {
constant.2 = s32[] constant(0)
param = f32[4,4] parameter(0)
tuple.1 = (s32[], f32[4,4], f32[4,4]) tuple(constant.2, param, param)
while = (s32[], f32[4,4], f32[4,4]) while(tuple.1),
condition=cond, body=body
ROOT result = s32[] get-tuple-element(while), index=0
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
CollectivePermuteMotion pass;
ASSERT_FALSE(pass.Run(&*module).value());
}
}
} |
164 | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPE_FACTORY_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPE_FACTORY_H_
#include "absl/strings/string_view.h"
#include "common/memory.h"
#include "common/sized_input_view.h"
#include "common/type.h"
namespace cel {
namespace common_internal {
class PiecewiseValueManager;
}
class TypeFactory {
public:
virtual ~TypeFactory() = default;
virtual MemoryManagerRef GetMemoryManager() const = 0;
ListType CreateListType(TypeView element);
MapType CreateMapType(TypeView key, TypeView value);
StructType CreateStructType(absl::string_view name);
OpaqueType CreateOpaqueType(absl::string_view name,
const SizedInputView<TypeView>& parameters);
OptionalType CreateOptionalType(TypeView parameter);
ListTypeView GetDynListType();
MapTypeView GetDynDynMapType();
MapTypeView GetStringDynMapType();
OptionalTypeView GetDynOptionalType();
NullType GetNullType() { return NullType{}; }
ErrorType GetErrorType() { return ErrorType{}; }
DynType GetDynType() { return DynType{}; }
AnyType GetAnyType() { return AnyType{}; }
BoolType GetBoolType() { return BoolType{}; }
IntType GetIntType() { return IntType{}; }
UintType GetUintType() { return UintType{}; }
DoubleType GetDoubleType() { return DoubleType{}; }
StringType GetStringType() { return StringType{}; }
BytesType GetBytesType() { return BytesType{}; }
DurationType GetDurationType() { return DurationType{}; }
TimestampType GetTimestampType() { return TimestampType{}; }
TypeType GetTypeType() { return TypeType{}; }
UnknownType GetUnknownType() { return UnknownType{}; }
BoolWrapperType GetBoolWrapperType() { return BoolWrapperType{}; }
BytesWrapperType GetBytesWrapperType() { return BytesWrapperType{}; }
DoubleWrapperType GetDoubleWrapperType() { return DoubleWrapperType{}; }
IntWrapperType GetIntWrapperType() { return IntWrapperType{}; }
StringWrapperType GetStringWrapperType() { return StringWrapperType{}; }
UintWrapperType GetUintWrapperType() { return UintWrapperType{}; }
Type GetJsonValueType() { return DynType{}; }
ListType GetJsonListType() { return ListType(GetDynListType()); }
MapType GetJsonMapType() { return MapType(GetStringDynMapType()); }
protected:
friend class common_internal::PiecewiseValueManager;
virtual ListType CreateListTypeImpl(TypeView element) = 0;
virtual MapType CreateMapTypeImpl(TypeView key, TypeView value) = 0;
virtual StructType CreateStructTypeImpl(absl::string_view name) = 0;
virtual OpaqueType CreateOpaqueTypeImpl(
absl::string_view name, const SizedInputView<TypeView>& parameters) = 0;
};
}
#endif
#include "common/type_factory.h"
#include "absl/base/attributes.h"
#include "absl/log/absl_check.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/sized_input_view.h"
#include "common/type.h"
#include "common/type_kind.h"
#include "common/types/type_cache.h"
#include "internal/names.h"
namespace cel {
namespace {
using common_internal::ListTypeCacheMap;
using common_internal::MapTypeCacheMap;
using common_internal::OpaqueTypeCacheMap;
using common_internal::ProcessLocalTypeCache;
using common_internal::StructTypeCacheMap;
bool IsValidMapKeyType(TypeView type) {
switch (type.kind()) {
case TypeKind::kDyn:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kError:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kBool:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kInt:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kUint:
ABSL_FALLTHROUGH_INTENDED;
case TypeKind::kString:
return true;
default:
return false;
}
}
}
ListType TypeFactory::CreateListType(TypeView element) {
if (auto list_type = ProcessLocalTypeCache::Get()->FindListType(element);
list_type.has_value()) {
return ListType(*list_type);
}
return CreateListTypeImpl(element);
}
MapType TypeFactory::CreateMapType(TypeView key, TypeView value) {
ABSL_DCHECK(IsValidMapKeyType(key)) << key;
if (auto map_type = ProcessLocalTypeCache::Get()->FindMapType(key, value);
map_type.has_value()) {
return MapType(*map_type);
}
return CreateMapTypeImpl(key, value);
}
StructType TypeFactory::CreateStructType(absl::string_view name) {
ABSL_DCHECK(internal::IsValidRelativeName(name)) << name;
return CreateStructTypeImpl(name);
}
OpaqueType TypeFactory::CreateOpaqueType(
absl::string_view name, const SizedInputView<TypeView>& parameters) {
ABSL_DCHECK(internal::IsValidRelativeName(name)) << name;
if (auto opaque_type =
ProcessLocalTypeCache::Get()->FindOpaqueType(name, parameters);
opaque_type.has_value()) {
return OpaqueType(*opaque_type);
}
return CreateOpaqueTypeImpl(name, parameters);
}
OptionalType TypeFactory::CreateOptionalType(TypeView parameter) {
return Cast<OptionalType>(CreateOpaqueType(OptionalType::kName, {parameter}));
}
ListTypeView TypeFactory::GetDynListType() {
return ProcessLocalTypeCache::Get()->GetDynListType();
}
MapTypeView TypeFactory::GetDynDynMapType() {
return ProcessLocalTypeCache::Get()->GetDynDynMapType();
}
MapTypeView TypeFactory::GetStringDynMapType() {
return ProcessLocalTypeCache::Get()->GetStringDynMapType();
}
OptionalTypeView TypeFactory::GetDynOptionalType() {
return ProcessLocalTypeCache::Get()->GetDynOptionalType();
}
} | #include "common/type_factory.h"
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include "absl/types/optional.h"
#include "common/memory.h"
#include "common/memory_testing.h"
#include "common/type.h"
#include "common/type_introspector.h"
#include "common/type_manager.h"
#include "common/types/type_cache.h"
#include "internal/testing.h"
namespace cel {
namespace {
using common_internal::ProcessLocalTypeCache;
using testing::_;
using testing::Eq;
using testing::Ne;
using testing::TestParamInfo;
using testing::TestWithParam;
enum class ThreadSafety {
kCompatible,
kSafe,
};
std::ostream& operator<<(std::ostream& out, ThreadSafety thread_safety) {
switch (thread_safety) {
case ThreadSafety::kCompatible:
return out << "THREAD_SAFE";
case ThreadSafety::kSafe:
return out << "THREAD_COMPATIBLE";
}
}
class TypeFactoryTest
: public common_internal::ThreadCompatibleMemoryTest<ThreadSafety> {
public:
void SetUp() override {
ThreadCompatibleMemoryTest::SetUp();
switch (thread_safety()) {
case ThreadSafety::kCompatible:
type_manager_ = NewThreadCompatibleTypeManager(
memory_manager(),
NewThreadCompatibleTypeIntrospector(memory_manager()));
break;
case ThreadSafety::kSafe:
type_manager_ = NewThreadSafeTypeManager(
memory_manager(), NewThreadSafeTypeIntrospector(memory_manager()));
break;
}
}
void TearDown() override { Finish(); }
void Finish() {
type_manager_.reset();
ThreadCompatibleMemoryTest::Finish();
}
TypeFactory& type_factory() const { return **type_manager_; }
ThreadSafety thread_safety() const { return std::get<1>(GetParam()); }
static std::string ToString(
TestParamInfo<std::tuple<MemoryManagement, ThreadSafety>> param) {
std::ostringstream out;
out << std::get<0>(param.param) << "_" << std::get<1>(param.param);
return out.str();
}
private:
absl::optional<Shared<TypeManager>> type_manager_;
};
TEST_P(TypeFactoryTest, ListType) {
auto list_type1 = type_factory().CreateListType(StringType());
EXPECT_THAT(type_factory().CreateListType(StringType()), Eq(list_type1));
EXPECT_THAT(type_factory().CreateListType(BytesType()), Ne(list_type1));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto list_type2 = type_factory().CreateListType(struct_type1);
EXPECT_THAT(type_factory().CreateListType(struct_type1), Eq(list_type2));
EXPECT_THAT(type_factory().CreateListType(struct_type2), Ne(list_type2));
EXPECT_EQ(type_factory().GetDynListType(),
ProcessLocalTypeCache::Get()->GetDynListType());
}
TEST_P(TypeFactoryTest, MapType) {
auto map_type1 = type_factory().CreateMapType(StringType(), BytesType());
EXPECT_THAT(type_factory().CreateMapType(StringType(), BytesType()),
Eq(map_type1));
EXPECT_THAT(type_factory().CreateMapType(StringType(), StringType()),
Ne(map_type1));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto map_type2 = type_factory().CreateMapType(StringType(), struct_type1);
EXPECT_THAT(type_factory().CreateMapType(StringType(), struct_type1),
Eq(map_type2));
EXPECT_THAT(type_factory().CreateMapType(StringType(), struct_type2),
Ne(map_type2));
EXPECT_EQ(type_factory().GetDynDynMapType(),
ProcessLocalTypeCache::Get()->GetDynDynMapType());
EXPECT_EQ(type_factory().GetStringDynMapType(),
ProcessLocalTypeCache::Get()->GetStringDynMapType());
}
TEST_P(TypeFactoryTest, MapTypeInvalidKeyType) {
EXPECT_DEBUG_DEATH(type_factory().CreateMapType(DoubleType(), BytesType()),
_);
}
TEST_P(TypeFactoryTest, StructType) {
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
EXPECT_THAT(type_factory().CreateStructType("test.Struct1"),
Eq(struct_type1));
EXPECT_THAT(type_factory().CreateStructType("test.Struct2"),
Ne(struct_type1));
}
TEST_P(TypeFactoryTest, StructTypeBadName) {
EXPECT_DEBUG_DEATH(type_factory().CreateStructType("test.~"), _);
}
TEST_P(TypeFactoryTest, OpaqueType) {
auto opaque_type1 =
type_factory().CreateOpaqueType("test.Struct1", {BytesType()});
EXPECT_THAT(type_factory().CreateOpaqueType("test.Struct1", {BytesType()}),
Eq(opaque_type1));
EXPECT_THAT(type_factory().CreateOpaqueType("test.Struct2", {}),
Ne(opaque_type1));
}
TEST_P(TypeFactoryTest, OpaqueTypeBadName) {
EXPECT_DEBUG_DEATH(type_factory().CreateOpaqueType("test.~", {}), _);
}
TEST_P(TypeFactoryTest, OptionalType) {
auto optional_type1 = type_factory().CreateOptionalType(StringType());
EXPECT_THAT(type_factory().CreateOptionalType(StringType()),
Eq(optional_type1));
EXPECT_THAT(type_factory().CreateOptionalType(BytesType()),
Ne(optional_type1));
auto struct_type1 = type_factory().CreateStructType("test.Struct1");
auto struct_type2 = type_factory().CreateStructType("test.Struct2");
auto optional_type2 = type_factory().CreateOptionalType(struct_type1);
EXPECT_THAT(type_factory().CreateOptionalType(struct_type1),
Eq(optional_type2));
EXPECT_THAT(type_factory().CreateOptionalType(struct_type2),
Ne(optional_type2));
EXPECT_EQ(type_factory().GetDynOptionalType(),
ProcessLocalTypeCache::Get()->GetDynOptionalType());
}
INSTANTIATE_TEST_SUITE_P(
TypeFactoryTest, TypeFactoryTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
::testing::Values(ThreadSafety::kCompatible,
ThreadSafety::kSafe)),
TypeFactoryTest::ToString);
}
} |
165 | #ifndef AROLLA_EXPR_OPERATORS_WHILE_LOOP_WHILE_LOOP_IMPL_H_
#define AROLLA_EXPR_OPERATORS_WHILE_LOOP_WHILE_LOOP_IMPL_H_
#include <functional>
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/operators/while_loop/while_loop.h"
namespace arolla::expr_operators::while_loop_impl {
absl::StatusOr<std::pair<expr::ExprNodePtr, NamedExpressions>>
ExtractImmutables(
const expr::ExprNodePtr& expr,
std::function<std::string(const expr::ExprNodePtr& node)> naming_function);
}
#endif
#include "arolla/expr/operators/while_loop/while_loop_impl.h"
#include <algorithm>
#include <functional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/operators/while_loop/while_loop.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators::while_loop_impl {
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::Placeholder;
absl::StatusOr<std::pair<ExprNodePtr, NamedExpressions>> ExtractImmutables(
const ExprNodePtr& expr, std::function<std::string(const ExprNodePtr& node)>
immutable_naming_function) {
NamedExpressions immutables;
struct Visit {
ExprNodePtr expr;
bool has_placeholder_dep;
bool has_leaf_dep;
};
ASSIGN_OR_RETURN(
(auto [converted_expr, has_placeholder_dep, has_leaf_dep]),
expr::PostOrderTraverse(
expr,
[&](const ExprNodePtr& node,
absl::Span<const Visit* const> visits) -> absl::StatusOr<Visit> {
if (node->is_placeholder()) {
return Visit{.expr = node,
.has_placeholder_dep = true,
.has_leaf_dep = false};
}
if (node->is_leaf()) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = true};
}
bool has_placeholder_dep = std::any_of(
visits.begin(), visits.end(),
[](const auto& v) { return v->has_placeholder_dep; });
bool has_leaf_dep =
std::any_of(visits.begin(), visits.end(),
[](const auto& v) { return v->has_leaf_dep; });
if (!has_placeholder_dep) {
return Visit{.expr = node,
.has_placeholder_dep = false,
.has_leaf_dep = has_leaf_dep};
}
std::vector<ExprNodePtr> new_deps;
new_deps.reserve(visits.size());
for (const auto& visit : visits) {
if (visit->has_placeholder_dep || !visit->has_leaf_dep) {
new_deps.push_back(visit->expr);
} else {
auto placeholder_key = immutable_naming_function(visit->expr);
new_deps.emplace_back(Placeholder(placeholder_key));
immutables.emplace(std::move(placeholder_key), visit->expr);
}
}
ASSIGN_OR_RETURN(auto new_node, expr::WithNewDependencies(
node, std::move(new_deps)));
return Visit{.expr = new_node,
.has_placeholder_dep = true,
.has_leaf_dep = has_leaf_dep};
}));
if (!has_placeholder_dep) {
DCHECK(immutables.empty());
auto placeholder_key = immutable_naming_function(converted_expr);
immutables.emplace(placeholder_key, converted_expr);
converted_expr = Placeholder(placeholder_key);
}
return {{std::move(converted_expr), std::move(immutables)}};
}
} | #include "arolla/expr/operators/while_loop/while_loop_impl.h"
#include <cstdint>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_format.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr_operators::while_loop_impl {
namespace {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::IsOkAndHolds;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
class WhileLoopImplTest : public ::testing::Test {
protected:
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(WhileLoopImplTest, ExtractImmutables) {
absl::flat_hash_map<Fingerprint, std::string> immutable_names;
auto immutable_naming_function = [&](const ExprNodePtr& node) -> std::string {
if (auto it = immutable_names.find(node->fingerprint());
it != immutable_names.end()) {
return it->second;
}
std::string name = absl::StrFormat("_immutable_%d", immutable_names.size());
immutable_names.emplace(node->fingerprint(), name);
return name;
};
{
auto expr = Literal(int64_t{1});
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(Placeholder("_immutable_0")),
UnorderedElementsAre(Pair(
"_immutable_0", EqualsExpr(Literal<int64_t>(1)))))));
}
{
auto expr = Leaf("fifty");
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(Placeholder("_immutable_1")),
UnorderedElementsAre(Pair(
"_immutable_1", EqualsExpr(Leaf("fifty")))))));
}
{
auto expr = Placeholder("seven");
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(expr), IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{Leaf("two"),
CallOp("math.add", {Placeholder("fifty"), Leaf("seven")})}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add",
{Placeholder("_immutable_3"),
CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")})})),
UnorderedElementsAre(
Pair("_immutable_3", EqualsExpr(Leaf("two"))),
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(EqualsExpr(CallOp("math.add", {Placeholder("fifty"),
Literal<int64_t>(7)})),
IsEmpty())));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr57, CallOp("math.add", {Leaf("fifty"), Literal<int64_t>(7)}));
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp("math.add", {expr57, Placeholder("two")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {Placeholder("_immutable_4"), Placeholder("two")})),
UnorderedElementsAre(Pair("_immutable_4", EqualsExpr(expr57))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Placeholder("fifty"), Leaf("seven")}),
Leaf("seven")}));
EXPECT_THAT(
ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp(
"math.add", {CallOp("math.add", {Placeholder("fifty"),
Placeholder("_immutable_2")}),
Placeholder("_immutable_2")})),
UnorderedElementsAre(
Pair("_immutable_2", EqualsExpr(Leaf("seven")))))));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {Literal<int64_t>(1), Leaf("fifty")}),
Placeholder("seven")}));
EXPECT_THAT(ExtractImmutables(expr, immutable_naming_function),
IsOkAndHolds(Pair(
EqualsExpr(CallOp("math.add", {Placeholder("_immutable_5"),
Placeholder("seven")})),
UnorderedElementsAre(Pair(
"_immutable_5",
EqualsExpr(CallOp("math.add", {Literal<int64_t>(1),
Leaf("fifty")})))))));
}
}
}
} |
166 | #ifndef AROLLA_EXPR_EXPR_ATTRIBUTES_H_
#define AROLLA_EXPR_EXPR_ATTRIBUTES_H_
#include <iosfwd>
#include <optional>
#include <ostream>
#include <utility>
#include "absl/log/check.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
class ExprAttributes {
public:
ExprAttributes() noexcept = default;
ExprAttributes(ExprAttributes&&) noexcept = default;
ExprAttributes& operator=(ExprAttributes&&) noexcept = default;
ExprAttributes(const ExprAttributes&) noexcept = default;
ExprAttributes& operator=(const ExprAttributes&) noexcept = default;
explicit ExprAttributes(const QType* qtype) : qtype_(qtype) {}
explicit ExprAttributes(TypedRef qvalue)
: qtype_(qvalue.GetType()), qvalue_(qvalue) {}
explicit ExprAttributes(TypedValue&& qvalue)
: qtype_(qvalue.GetType()), qvalue_(std::move(qvalue)) {}
explicit ExprAttributes(const TypedValue& qvalue)
: qtype_(qvalue.GetType()), qvalue_(qvalue) {}
ExprAttributes(QTypePtr qtype, TypedValue&& qvalue)
: qtype_(qtype), qvalue_(std::move(qvalue)) {
DCHECK_EQ(qtype_, qvalue_->GetType());
}
ExprAttributes(QTypePtr qtype, const TypedValue& qvalue)
: qtype_(qtype), qvalue_(qvalue) {
DCHECK_EQ(qtype_, qvalue_->GetType());
}
ExprAttributes(const QType* qtype,
std::optional<TypedValue>&& qvalue)
: qtype_(qtype), qvalue_(std::move(qvalue)) {
if (qvalue_.has_value()) {
DCHECK_EQ(qtype_, qvalue_->GetType());
}
}
ExprAttributes(const QType* qtype,
const std::optional<TypedValue>& qvalue)
: qtype_(qtype), qvalue_(qvalue) {
if (qvalue_.has_value()) {
DCHECK_EQ(qtype_, qvalue_->GetType());
}
}
const QType* qtype() const { return qtype_; }
const std::optional<TypedValue>& qvalue() const { return qvalue_; }
bool IsEmpty() const { return qtype_ == nullptr; }
bool IsIdenticalTo(const ExprAttributes& other) const {
if (qtype_ != other.qtype_) {
return false;
}
if (qvalue_.has_value() != other.qvalue_.has_value()) {
return false;
}
if (!qvalue_.has_value() || !other.qvalue_.has_value()) {
return true;
}
return qvalue_->GetFingerprint() == other.qvalue_->GetFingerprint();
}
bool IsSubsetOf(const ExprAttributes& other) const {
if (qtype_ != nullptr && qtype_ != other.qtype_) {
return false;
}
if (!qvalue_.has_value()) {
return true;
}
return (other.qvalue_.has_value() &&
qvalue_->GetFingerprint() == other.qvalue_->GetFingerprint());
}
private:
const QType* qtype_ = nullptr;
std::optional<TypedValue> qvalue_;
};
std::ostream& operator<<(std::ostream& ostream, const ExprAttributes& attr);
}
namespace arolla {
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(expr::ExprAttributes);
}
#endif
#include "arolla/expr/expr_attributes.h"
#include <ostream>
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
std::ostream& operator<<(std::ostream& ostream, const ExprAttributes& attr) {
if (attr.qvalue()) {
ostream << "Attr(qvalue=" << attr.qvalue()->Repr() << ")";
} else if (attr.qtype()) {
ostream << "Attr(qtype=" << attr.qtype()->name() << ")";
} else {
ostream << "Attr{}";
}
return ostream;
}
}
namespace arolla {
void FingerprintHasherTraits<expr::ExprAttributes>::operator()(
FingerprintHasher* hasher, const expr::ExprAttributes& attr) const {
hasher->Combine(attr.qtype());
hasher->Combine(attr.qvalue().has_value() ? attr.qvalue()->GetFingerprint()
: Fingerprint{});
}
} | #include "arolla/expr/expr_attributes.h"
#include <cstdint>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/testing/status_matchers_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::IsOkAndHolds;
using ::testing::PrintToString;
using Attr = ::arolla::expr::ExprAttributes;
class ExprAttributesTest : public ::testing::Test {
protected:
void SetUp() override { ASSERT_OK(InitArolla()); }
};
TEST_F(ExprAttributesTest, Default) {
const Attr attr;
EXPECT_EQ(attr.qtype(), nullptr);
EXPECT_EQ(attr.qvalue(), std::nullopt);
EXPECT_EQ(PrintToString(attr), "Attr{}");
}
TEST_F(ExprAttributesTest, QTypeNullptr) {
const Attr attr(nullptr);
EXPECT_EQ(attr.qtype(), nullptr);
EXPECT_EQ(attr.qvalue(), std::nullopt);
EXPECT_EQ(PrintToString(attr), "Attr{}");
}
TEST_F(ExprAttributesTest, QType) {
const Attr attr(GetQTypeQType());
EXPECT_EQ(attr.qtype(), GetQTypeQType());
EXPECT_EQ(attr.qvalue(), std::nullopt);
EXPECT_EQ(PrintToString(attr), "Attr(qtype=QTYPE)");
}
TEST_F(ExprAttributesTest, QValue) {
const Attr attr(TypedValue::FromValue(GetNothingQType()));
EXPECT_EQ(attr.qtype(), GetQTypeQType());
EXPECT_THAT(attr.qvalue()->As<QTypePtr>(), IsOkAndHolds(GetNothingQType()));
EXPECT_EQ(PrintToString(attr), "Attr(qvalue=NOTHING)");
}
TEST_F(ExprAttributesTest, NoQTypeNoQValue) {
const Attr attr(nullptr, std::nullopt);
EXPECT_EQ(attr.qtype(), nullptr);
EXPECT_EQ(attr.qvalue(), std::nullopt);
EXPECT_EQ(PrintToString(attr), "Attr{}");
}
TEST_F(ExprAttributesTest, QTypeNoQValue) {
const Attr attr(GetQTypeQType(), std::nullopt);
EXPECT_EQ(attr.qtype(), GetQTypeQType());
EXPECT_EQ(attr.qvalue(), std::nullopt);
EXPECT_EQ(PrintToString(attr), "Attr(qtype=QTYPE)");
}
TEST_F(ExprAttributesTest, QValueQValue) {
std::optional<TypedValue> qvalue = TypedValue::FromValue(GetNothingQType());
const Attr attr(GetQTypeQType(), qvalue);
EXPECT_EQ(attr.qtype(), GetQTypeQType());
EXPECT_THAT(attr.qvalue()->As<QTypePtr>(), IsOkAndHolds(GetNothingQType()));
EXPECT_EQ(PrintToString(attr), "Attr(qvalue=NOTHING)");
}
TEST_F(ExprAttributesTest, Fingerprints) {
absl::flat_hash_set<Fingerprint> fingerprints;
EXPECT_TRUE(
fingerprints
.insert(FingerprintHasher("").Combine(ExprAttributes()).Finish())
.second);
EXPECT_FALSE(
fingerprints
.insert(FingerprintHasher("").Combine(ExprAttributes()).Finish())
.second);
EXPECT_TRUE(fingerprints
.insert(FingerprintHasher("")
.Combine(ExprAttributes(GetQType<int64_t>()))
.Finish())
.second);
EXPECT_FALSE(fingerprints
.insert(FingerprintHasher("")
.Combine(ExprAttributes(GetQType<int64_t>()))
.Finish())
.second);
EXPECT_TRUE(fingerprints
.insert(FingerprintHasher("")
.Combine(ExprAttributes(
TypedValue::FromValue<int64_t>(57)))
.Finish())
.second);
EXPECT_FALSE(fingerprints
.insert(FingerprintHasher("")
.Combine(ExprAttributes(
TypedValue::FromValue<int64_t>(57)))
.Finish())
.second);
}
TEST_F(ExprAttributesTest, IsIdenticalToEmpty) {
const Attr attr1;
const Attr attr2;
EXPECT_TRUE(attr1.IsIdenticalTo(attr1));
EXPECT_TRUE(attr1.IsIdenticalTo(attr2));
EXPECT_TRUE(attr2.IsIdenticalTo(attr2));
}
TEST_F(ExprAttributesTest, IsIdenticalToGeneral) {
const Attr attr0;
const Attr attr1(GetQTypeQType());
EXPECT_FALSE(attr0.IsIdenticalTo(attr1));
const Attr attr2(TypedValue::FromValue(GetNothingQType()));
EXPECT_FALSE(attr0.IsIdenticalTo(attr2));
EXPECT_FALSE(attr1.IsIdenticalTo(attr2));
const Attr attr3(GetQTypeQType(), TypedValue::FromValue(GetNothingQType()));
EXPECT_FALSE(attr0.IsIdenticalTo(attr3));
EXPECT_FALSE(attr1.IsIdenticalTo(attr3));
EXPECT_TRUE(attr2.IsIdenticalTo(attr3));
const Attr attr4(TypedValue::FromValue(GetQType<int64_t>()));
EXPECT_FALSE(attr0.IsIdenticalTo(attr4));
EXPECT_FALSE(attr1.IsIdenticalTo(attr4));
EXPECT_FALSE(attr2.IsIdenticalTo(attr4));
EXPECT_FALSE(attr3.IsIdenticalTo(attr4));
}
TEST_F(ExprAttributesTest, IsSubsetOfEmpty) {
const Attr attr1;
const Attr attr2;
EXPECT_TRUE(attr1.IsSubsetOf(attr1));
EXPECT_TRUE(attr1.IsSubsetOf(attr2));
EXPECT_TRUE(attr2.IsSubsetOf(attr2));
}
TEST_F(ExprAttributesTest, IsSubsetOf) {
const Attr attr0;
const Attr attr1(GetQTypeQType());
const Attr attr2(TypedValue::FromValue(GetNothingQType()));
const Attr attr3(TypedValue::FromValue(GetQTypeQType()));
EXPECT_TRUE(attr0.IsSubsetOf(attr0));
EXPECT_TRUE(attr0.IsSubsetOf(attr1));
EXPECT_TRUE(attr0.IsSubsetOf(attr2));
EXPECT_TRUE(attr0.IsSubsetOf(attr3));
EXPECT_FALSE(attr1.IsSubsetOf(attr0));
EXPECT_TRUE(attr1.IsSubsetOf(attr1));
EXPECT_TRUE(attr1.IsSubsetOf(attr2));
EXPECT_TRUE(attr1.IsSubsetOf(attr3));
EXPECT_FALSE(attr2.IsSubsetOf(attr0));
EXPECT_FALSE(attr2.IsSubsetOf(attr1));
EXPECT_TRUE(attr2.IsSubsetOf(attr2));
EXPECT_FALSE(attr2.IsSubsetOf(attr3));
EXPECT_FALSE(attr3.IsSubsetOf(attr0));
EXPECT_FALSE(attr3.IsSubsetOf(attr1));
EXPECT_FALSE(attr3.IsSubsetOf(attr2));
EXPECT_TRUE(attr3.IsSubsetOf(attr3));
}
}
} |
167 | #ifndef QUICHE_QUIC_CORE_HTTP_WEB_TRANSPORT_HTTP3_H_
#define QUICHE_QUIC_CORE_HTTP_WEB_TRANSPORT_HTTP3_H_
#include <memory>
#include <optional>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_set.h"
#include "absl/time/time.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/http/web_transport_stream_adapter.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/web_transport_interface.h"
#include "quiche/quic/core/web_transport_stats.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/web_transport/web_transport.h"
#include "quiche/spdy/core/http2_header_block.h"
namespace quic {
class QuicSpdySession;
class QuicSpdyStream;
enum class WebTransportHttp3RejectionReason {
kNone,
kNoStatusCode,
kWrongStatusCode,
kMissingDraftVersion,
kUnsupportedDraftVersion,
};
class QUICHE_EXPORT WebTransportHttp3
: public WebTransportSession,
public QuicSpdyStream::Http3DatagramVisitor {
public:
WebTransportHttp3(QuicSpdySession* session, QuicSpdyStream* connect_stream,
WebTransportSessionId id);
void HeadersReceived(const spdy::Http2HeaderBlock& headers);
void SetVisitor(std::unique_ptr<WebTransportVisitor> visitor) {
visitor_ = std::move(visitor);
}
WebTransportSessionId id() { return id_; }
bool ready() { return ready_; }
void AssociateStream(QuicStreamId stream_id);
void OnStreamClosed(QuicStreamId stream_id) { streams_.erase(stream_id); }
void OnConnectStreamClosing();
size_t NumberOfAssociatedStreams() { return streams_.size(); }
void CloseSession(WebTransportSessionError error_code,
absl::string_view error_message) override;
void OnCloseReceived(WebTransportSessionError error_code,
absl::string_view error_message);
void OnConnectStreamFinReceived();
void CloseSessionWithFinOnlyForTests();
WebTransportStream* AcceptIncomingBidirectionalStream() override;
WebTransportStream* AcceptIncomingUnidirectionalStream() override;
bool CanOpenNextOutgoingBidirectionalStream() override;
bool CanOpenNextOutgoingUnidirectionalStream() override;
WebTransportStream* OpenOutgoingBidirectionalStream() override;
WebTransportStream* OpenOutgoingUnidirectionalStream() override;
webtransport::Stream* GetStreamById(webtransport::StreamId id) override;
webtransport::DatagramStatus SendOrQueueDatagram(
absl::string_view datagram) override;
QuicByteCount GetMaxDatagramSize() const override;
void SetDatagramMaxTimeInQueue(absl::Duration max_time_in_queue) override;
webtransport::DatagramStats GetDatagramStats() override {
return WebTransportDatagramStatsForQuicSession(*session_);
}
webtransport::SessionStats GetSessionStats() override {
return WebTransportStatsForQuicSession(*session_);
}
void NotifySessionDraining() override;
void SetOnDraining(quiche::SingleUseCallback<void()> callback) override {
drain_callback_ = std::move(callback);
}
void OnHttp3Datagram(QuicStreamId stream_id,
absl::string_view payload) override;
void OnUnknownCapsule(QuicStreamId ,
const quiche::UnknownCapsule& ) override {}
bool close_received() const { return close_received_; }
WebTransportHttp3RejectionReason rejection_reason() const {
return rejection_reason_;
}
void OnGoAwayReceived();
void OnDrainSessionReceived();
private:
void MaybeNotifyClose();
QuicSpdySession* const session_;
QuicSpdyStream* const connect_stream_;
const WebTransportSessionId id_;
bool ready_ = false;
std::unique_ptr<WebTransportVisitor> visitor_;
absl::flat_hash_set<QuicStreamId> streams_;
quiche::QuicheCircularDeque<QuicStreamId> incoming_bidirectional_streams_;
quiche::QuicheCircularDeque<QuicStreamId> incoming_unidirectional_streams_;
bool close_sent_ = false;
bool close_received_ = false;
bool close_notified_ = false;
quiche::SingleUseCallback<void()> drain_callback_ = nullptr;
WebTransportHttp3RejectionReason rejection_reason_ =
WebTransportHttp3RejectionReason::kNone;
bool drain_sent_ = false;
WebTransportSessionError error_code_ = 0;
std::string error_message_ = "";
};
class QUICHE_EXPORT WebTransportHttp3UnidirectionalStream : public QuicStream {
public:
WebTransportHttp3UnidirectionalStream(PendingStream* pending,
QuicSpdySession* session);
WebTransportHttp3UnidirectionalStream(QuicStreamId id,
QuicSpdySession* session,
WebTransportSessionId session_id);
void WritePreamble();
void OnDataAvailable() override;
void OnCanWriteNewData() override;
void OnClose() override;
void OnStreamReset(const QuicRstStreamFrame& frame) override;
bool OnStopSending(QuicResetStreamError error) override;
void OnWriteSideInDataRecvdState() override;
WebTransportStream* interface() { return &adapter_; }
void SetUnblocked() { sequencer()->SetUnblocked(); }
private:
QuicSpdySession* session_;
WebTransportStreamAdapter adapter_;
std::optional<WebTransportSessionId> session_id_;
bool needs_to_send_preamble_;
bool ReadSessionId();
void MaybeCloseIncompleteStream();
};
QUICHE_EXPORT std::optional<WebTransportStreamError> Http3ErrorToWebTransport(
uint64_t http3_error_code);
QUICHE_EXPORT WebTransportStreamError
Http3ErrorToWebTransportOrDefault(uint64_t http3_error_code);
QUICHE_EXPORT uint64_t
WebTransportErrorToHttp3(WebTransportStreamError webtransport_error_code);
}
#endif
#include "quiche/quic/core/http/web_transport_http3.h"
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/http/quic_spdy_stream.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/capsule.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/web_transport/web_transport.h"
#define ENDPOINT \
(session_->perspective() == Perspective::IS_SERVER ? "Server: " : "Client: ")
namespace quic {
namespace {
class NoopWebTransportVisitor : public WebTransportVisitor {
void OnSessionReady() override {}
void OnSessionClosed(WebTransportSessionError ,
const std::string& ) override {}
void OnIncomingBidirectionalStreamAvailable() override {}
void OnIncomingUnidirectionalStreamAvailable() override {}
void OnDatagramReceived(absl::string_view ) override {}
void OnCanCreateNewOutgoingBidirectionalStream() override {}
void OnCanCreateNewOutgoingUnidirectionalStream() override {}
};
}
WebTransportHttp3::WebTransportHttp3(QuicSpdySession* session,
QuicSpdyStream* connect_stream,
WebTransportSessionId id)
: session_(session),
connect_stream_(connect_stream),
id_(id),
visitor_(std::make_unique<NoopWebTransportVisitor>()) {
QUICHE_DCHECK(session_->SupportsWebTransport());
QUICHE_DCHECK(IsValidWebTransportSessionId(id, session_->version()));
QUICHE_DCHECK_EQ(connect_stream_->id(), id);
connect_stream_->RegisterHttp3DatagramVisitor(this);
}
void WebTransportHttp3::AssociateStream(QuicStreamId stream_id) {
streams_.insert(stream_id);
ParsedQuicVersion version = session_->version();
if (QuicUtils::IsOutgoingStreamId(version, stream_id,
session_->perspective())) {
return;
}
if (QuicUtils::IsBidirectionalStreamId(stream_id, version)) {
incoming_bidirectional_streams_.push_back(stream_id);
visitor_->OnIncomingBidirectionalStreamAvailable();
} else {
incoming_unidirectional_streams_.push_back(stream_id);
visitor_->OnIncomingUnidirectionalStreamAvailable();
}
}
void WebTransportHttp3::OnConnectStreamClosing() {
std::vector<QuicStreamId> streams(streams_.begin(), streams_.end());
streams_.clear();
for (QuicStreamId id : streams) {
session_->ResetStream(id, QUIC_STREAM_WEBTRANSPORT_SESSION_GONE);
}
connect_stream_->UnregisterHttp3DatagramVisitor();
MaybeNotifyClose();
}
void WebTransportHttp3::CloseSession(WebTransportSessionError error_code,
absl::string_view error_message) {
if (close_sent_) {
QUIC_BUG(WebTransportHttp3 close sent twice)
<< "Calling WebTransportHttp3::CloseSession() more than once is not "
"allowed.";
return;
}
close_sent_ = true;
if (close_received_) {
QUIC_DLOG(INFO) << "Not sending CLOSE_WEBTRANSPORT_SESSION as we've "
"already sent one from peer.";
return;
}
error_code_ = error_code;
error_message_ = std::string(error_message);
QuicConnection::ScopedPacketFlusher flusher(
connect_stream_->spdy_session()->connection());
connect_stream_->WriteCapsule(
quiche::Capsule::CloseWebTransportSession(error_code, error_message),
true);
}
void WebTransportHttp3::OnCloseReceived(WebTransportSessionError error_code,
absl::string_view error_message) {
if (close_received_) {
QUIC_BUG(WebTransportHttp3 notified of close received twice)
<< "WebTransportHttp3::OnCloseReceived() may be only called once.";
}
close_received_ = true;
if (close_sent_) {
QUIC_DLOG(INFO) << "Ignoring received CLOSE_WEBTRANSPORT_SESSION as we've "
"already sent our own.";
return;
}
error_code_ = error_code;
error_message_ = std::string(error_message);
connect_stream_->WriteOrBufferBody("", true);
MaybeNotifyClose();
}
void WebTransportHttp3::OnConnectStreamFinReceived() {
if (close_received_) {
return;
}
close_received_ = true;
if (close_sent_) {
QUIC_DLOG(INFO) << "Ignoring received FIN as we've already sent our close.";
return;
}
connect_stream_->WriteOrBufferBody("", true);
MaybeNotifyClose();
}
void WebTransportHttp3::CloseSessionWithFinOnlyForTests() {
QUICHE_DCHECK(!close_sent_);
close_sent_ = true;
if (close_received_) {
return;
}
connect_stream_->WriteOrBufferBody("", true);
}
void WebTransportHttp3::HeadersReceived(const spdy::Http2HeaderBlock& headers) {
if (session_->perspective() == Perspective::IS_CLIENT) {
int status_code;
if (!QuicSpdyStream::ParseHeaderStatusCode(headers, &status_code)) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received WebTransport headers from server without "
"a valid status code, rejecting.";
rejection_reason_ = WebTransportHttp3RejectionReason::kNoStatusCode;
return;
}
bool valid_status = status_code >= 200 && status_code <= 299;
if (!valid_status) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received WebTransport headers from server with "
"status code "
<< status_code << ", rejecting.";
rejection_reason_ = WebTransportHttp3RejectionReason::kWrongStatusCode;
return;
}
}
QUIC_DVLOG(1) << ENDPOINT << "WebTransport session " << id_ << " ready.";
ready_ = true;
visitor_->OnSessionReady();
session_->ProcessBufferedWebTransportStreamsForSession(this);
}
WebTransportStream* WebTransportHttp3::AcceptIncomingBidirectionalStream() {
while (!incoming_bidirectional_streams_.empty()) {
QuicStreamId id = incoming_bidirectional_streams_.front();
incoming_bidirectional_streams_.pop_front();
QuicSpdyStream* stream = session_->GetOrCreateSpdyDataStream(id);
if (stream == nullptr) {
continue;
}
return stream->web_transport_stream();
}
return nullptr;
}
WebTransportStream* WebTransportHttp3::AcceptIncomingUnidirectionalStream() {
while (!incoming_unidirectional_streams_.empty()) {
QuicStreamId id = incoming_unidirectional_streams_.front();
incoming_unidirectional_streams_.pop_front();
QuicStream* stream = session_->GetOrCreateStream(id);
if (stream == nullptr) {
continue;
}
return static_cast<WebTransportHttp3UnidirectionalStream*>(stream)
->interface();
}
return nullptr;
}
bool WebTransportHttp3::CanOpenNextOutgoingBidirectionalStream() {
return session_->CanOpenOutgoingBidirectionalWebTransportStream(id_);
}
bool WebTransportHttp3::CanOpenNextOutgoingUnidirectionalStream() {
return session_->CanOpenOutgoingUnidirectionalWebTransportStream(id_);
}
WebTransportStream* WebTransportHttp3::OpenOutgoingBidirectionalStream() {
QuicSpdyStream* stream =
session_->CreateOutgoingBidirectionalWebTransportStream(this);
if (stream == nullptr) {
return nullptr;
}
return stream->web_transport_stream();
}
WebTransportStream* WebTransportHttp3::OpenOutgoingUnidirectionalStream() {
WebTransportHttp3UnidirectionalStream* stream =
session_->CreateOutgoingUnidirectionalWebTransportStream(this);
if (stream == nullptr) {
return nullptr;
}
return stream->interface();
}
webtransport::Stream* WebTransportHttp3::GetStreamById(
webtransport::StreamId id) {
if (!streams_.contains(id)) {
return nullptr;
}
QuicStream* stream = session_->GetActiveStream(id);
const bool bidi = QuicUtils::IsBidirectionalStreamId(
id, ParsedQuicVersion::RFCv1());
if (bidi) {
return static_cast<QuicSpdyStream*>(stream)->web_transport_stream();
} else {
return static_cast<WebTransportHttp3UnidirectionalStream*>(stream)
->interface();
}
}
webtransport::DatagramStatus WebTransportHttp3::SendOrQueueDatagram(
absl::string_view datagram) {
return MessageStatusToWebTransportStatus(
connect_stream_->SendHttp3Datagram(datagram));
}
QuicByteCount WebTransportHttp3::GetMaxDatagramSize() const {
return connect_stream_->GetMaxDatagramSize();
}
void WebTransportHttp3::SetDatagramMaxTimeInQueue(
absl::Duration max_time_in_queue) {
connect_stream_->SetMaxDatagramTimeInQueue(QuicTimeDelta(max_time_in_queue));
}
void WebTransportHttp3::NotifySessionDraining() {
if (!drain_sent_) {
connect_stream_->WriteCapsule(
quiche::Capsule(quiche::DrainWebTransportSessionCapsule()));
drain_sent_ = true;
}
}
void WebTransportHttp3::OnHttp3Datagram(QuicStreamId stream_id,
absl::string_view payload) {
QUICHE_DCHECK_EQ(stream_id, connect_stream_->id());
visitor_->OnDatagramReceived(payload);
}
void WebTransportHttp3::MaybeNotifyClose() {
if (close_notified_) {
return;
}
close_notified_ = true;
visitor_->OnSessionClosed(error_code_, error_message_);
}
void WebTransportHttp3::OnGoAwayReceived() {
if (drain_callback_ != nullptr) {
std::move(drain_callback_)();
drain_callback_ = nullptr;
}
}
void WebTransportHttp3::OnDrainSessionReceived() { OnGoAwayReceived(); }
WebTransportHttp3UnidirectionalStream::WebTransportHttp3UnidirectionalStream(
PendingStream* pending, QuicSpdySession* session)
: QuicStream(pending, session, false),
session_(session),
adapter_(session, this, sequencer(), std::nullopt),
needs_to_send_preamble_(false) {
sequencer()->set_level_triggered(true);
}
WebTransportHttp3UnidirectionalStream::WebTransportHttp3UnidirectionalStream(
QuicStreamId id, QuicSpdySession* session, WebTransportSessionId session_id)
: QuicStream(id, session, false, WRITE_UNIDIRECTIONAL),
session_(session),
adapter_(session, this, sequencer(), session_id),
session_id_(session_id),
needs_to_send_preamble_(true) {}
void WebTransportHttp3UnidirectionalStream::WritePreamble() {
if (!needs_to_send_preamble_ || !session_id_.has_value()) {
QUIC_BUG(WebTransportHttp3UnidirectionalStream duplicate preamble)
<< ENDPOINT << "Sending preamble on stream ID " << id()
<< " at the wrong time.";
OnUnrecoverableError(QUIC_INTERNAL_ERROR,
"Attempting to send a WebTransport unidirectional "
"stream preamble at the wrong time.");
return;
}
QuicConnection::ScopedPacketFlusher flusher(session_->connection());
char buffer[sizeof(uint64_t) * 2];
QuicDataWriter writer(sizeof(buffer), buffer);
bool success = true;
success = success && writer.WriteVarInt62(kWebTransportUnidirectionalStream);
success = success && writer.WriteVarInt62(*session_id_);
QUICHE_DCHECK(success);
WriteOrBufferData(absl::string_view(buffer, writer.length()), false,
nullptr);
QUIC_DVLOG(1) << ENDPOINT << "Sent stream type and session ID ("
<< *session_id_ << ") on WebTransport stream " << id();
needs_to_send_preamble_ = false;
}
bool WebTransportHttp3UnidirectionalStream::ReadSessionId() {
iovec iov;
if (!sequencer()->GetReadableRegion(&iov)) {
return false;
}
QuicDataReader reader(static_cast<const char*>(iov.iov_base), iov.iov_len);
WebTransportSessionId session_id;
uint8_t session_id_length = reader.PeekVarInt62Length();
if (!reader.ReadVarInt62(&session_id)) {
if (sequencer()->IsAllDataAvailable()) {
QUIC_DLOG(WARNING)
<< ENDPOINT << "Failed to associate WebTransport stream " << id()
<< " with a session because the stream ended prematurely.";
sequencer()->MarkConsumed(sequencer()->NumBytesBuffered());
}
return false;
}
sequencer()->MarkConsumed(session_id_length);
session_id_ = session_id;
adapter_.SetSessionId(session_id);
session_->AssociateIncomingWebTransportStreamWithSession(session_id, id());
return true;
}
void WebTransportHttp3UnidirectionalStream::OnDataAvailable() {
if (!session_id_.has_value()) {
if (!ReadSessionId()) {
return;
}
}
adapter_.OnDataAvailable();
}
void WebTransportHttp3UnidirectionalStream::OnCanWriteNewData() {
adapter_.OnCanWriteNewData();
}
void WebTransportHttp3UnidirectionalStream::OnClose() {
QuicStream::OnClose();
if (!session_id_.has_value()) {
return;
}
WebTransportHttp3* session = session_->GetWebTransportSession(*session_id_);
if (session == nullptr) {
QUIC_DLOG(WARNING) << ENDPOINT << "WebTransport stream " << id()
<< " attempted to notify parent session " << *session_id_
<< ", but the session could not be found.";
return;
}
session->OnStreamClosed(id());
}
void WebTransportHttp3UnidirectionalStream::OnStreamReset(
const QuicRstStreamFrame& frame) {
if (adapter_.visitor() != nullptr) {
adapter_.visitor()->OnResetStreamReceived(
Http3ErrorToWebTransportOrDefault(frame.ietf_error_code));
}
QuicStream::OnStreamReset(frame);
}
bool WebTransportHttp3UnidirectionalStream::OnStopSending(
QuicResetStreamError error) {
if (adapter_.visitor() != nullptr) {
adapter_.visitor()->OnStopSendingReceived(
Http3ErrorToWebTransportOrDefault(error.ietf_application_code()));
}
return QuicStream::OnStopSending(error);
}
void WebTransportHttp3UnidirectionalStream::OnWriteSideInDataRecvdState() {
if (adapter_.visitor() != nullptr) {
adapter_.visitor()->OnWriteSideInDataRecvdState();
}
QuicStream::OnWriteSideInDataRecvdState();
}
namespace {
constexpr uint64_t kWebTransportMappedErrorCodeFirst = 0x52e4a40fa8db;
constexpr uint64_t kWebTransportMappedErrorCodeLast = 0x52e5ac983162;
constexpr WebTransportStreamError kDefaultWebTransportError = 0;
}
std::optional<WebTransportStreamError> Http3ErrorToWebTransport(
uint64_t http3_error_code) {
if (http3_error_code < kWebTransportMappedErrorCodeFirst ||
http3_error_code > kWebTransportMappedErrorCodeLast) {
return std::nullopt;
}
if ((http3_error_code - 0x21) % 0x1f == 0) {
return std::nullopt;
}
uint64_t shifted = http3_error_code - kWebTransportMappedErrorCodeFirst;
uint64_t result = shifted - shifted / 0x1f;
QUICHE_DCHECK_LE(result,
std::numeric_limits<webtransport::StreamErrorCode>::max());
return static_cast<WebTransportStreamError>(result);
}
WebTransportStreamError Http3ErrorToWebTransportOrDefault(
uint64_t http3_error_code) {
std::optional<WebTransportStreamError> result =
Http3ErrorToWebTransport(http3_error_code);
return result.has_value() ? *result : kDefaultWebTransportError;
}
uint64_t WebTransportErrorToHttp3(
WebTransportStreamError webtransport_error_code) {
return kWebTransportMappedErrorCodeFirst + webtransport_error_code +
webtransport_error_code / 0x1e;
}
} | #include "quiche/quic/core/http/web_transport_http3.h"
#include <cstdint>
#include <limits>
#include <optional>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace {
using ::testing::Optional;
TEST(WebTransportHttp3Test, ErrorCodesToHttp3) {
EXPECT_EQ(0x52e4a40fa8dbu, WebTransportErrorToHttp3(0x00));
EXPECT_EQ(0x52e4a40fa9e2u, WebTransportErrorToHttp3(0xff));
EXPECT_EQ(0x52e5ac983162u, WebTransportErrorToHttp3(0xffffffff));
EXPECT_EQ(0x52e4a40fa8f7u, WebTransportErrorToHttp3(0x1c));
EXPECT_EQ(0x52e4a40fa8f8u, WebTransportErrorToHttp3(0x1d));
EXPECT_EQ(0x52e4a40fa8fau, WebTransportErrorToHttp3(0x1e));
}
TEST(WebTransportHttp3Test, ErrorCodesToWebTransport) {
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8db), Optional(0x00));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa9e2), Optional(0xff));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e5ac983162u), Optional(0xffffffff));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f7), Optional(0x1cu));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f8), Optional(0x1du));
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8f9), std::nullopt);
EXPECT_THAT(Http3ErrorToWebTransport(0x52e4a40fa8fa), Optional(0x1eu));
EXPECT_EQ(Http3ErrorToWebTransport(0), std::nullopt);
EXPECT_EQ(Http3ErrorToWebTransport(std::numeric_limits<uint64_t>::max()),
std::nullopt);
}
TEST(WebTransportHttp3Test, ErrorCodeRoundTrip) {
for (int error = 0; error <= 65536; error++) {
uint64_t http_error = WebTransportErrorToHttp3(error);
std::optional<WebTransportStreamError> mapped_back =
quic::Http3ErrorToWebTransport(http_error);
ASSERT_THAT(mapped_back, Optional(error));
}
for (int64_t error = 0; error < std::numeric_limits<uint32_t>::max();
error += 65537) {
uint64_t http_error = WebTransportErrorToHttp3(error);
std::optional<WebTransportStreamError> mapped_back =
quic::Http3ErrorToWebTransport(http_error);
ASSERT_THAT(mapped_back, Optional(error));
}
}
}
} |
168 | #ifndef XLA_SERVICE_GPU_ALIAS_PASSTHROUGH_PARAMS_H_
#define XLA_SERVICE_GPU_ALIAS_PASSTHROUGH_PARAMS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class AliasPassthroughParams : public HloModulePass {
public:
AliasPassthroughParams() = default;
~AliasPassthroughParams() override = default;
absl::string_view name() const override { return "alias_passthrough_params"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/alias_passthrough_params.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> AliasPassthroughParams::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const HloInstruction* root = module->entry_computation()->root_instruction();
if (module->entry_computation()->num_parameters() == 0 ||
root->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_set<int64_t> used_params;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (root->operand(i)->opcode() == HloOpcode::kParameter &&
used_params.count(root->operand(i)->parameter_number()) == 0) {
VLOG(2) << "Parameter " << root->operand(i)->parameter_number()
<< " with shape " << root->operand(i)->shape().ToString()
<< " in module " << module->name()
<< " is passed-through to root tuple element " << i << ": "
<< root->shape().ToString();
if (module->input_output_alias_config().OutputHasAlias({i}) ||
module->input_output_alias_config().ParameterHasAlias(
root->operand(i)->parameter_number(), {})) {
VLOG(2) << "Skip setting the above pass-through alias as an alias may"
<< " have been set up for alising resource update.";
continue;
}
TF_RETURN_IF_ERROR(module->input_output_alias_config().SetUpAlias(
{i},
root->operand(i)->parameter_number(),
{}));
used_params.insert(root->operand(i)->parameter_number());
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/alias_passthrough_params.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
class AliasPassthroughParamsTest : public HloTestBase {};
TEST_F(AliasPassthroughParamsTest, AliasPassThroughParams) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
EXPECT_EQ(1, alias_config.GetAliasedParameter({2})->parameter_number);
}
TEST_F(AliasPassthroughParamsTest, DoNotAliasPassThroughParamsMoreThanOnce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p0)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
}
TEST_F(AliasPassthroughParamsTest, PresetAliases) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
auto& preset_alias = module->input_output_alias_config();
TF_EXPECT_OK(preset_alias.SetUpAlias({1},
0,
{}));
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_result = module->input_output_alias_config();
EXPECT_EQ(1, alias_result.GetAliasedParameter({2})->parameter_number);
EXPECT_FALSE(alias_result.OutputHasAlias({0}));
}
}
} |
169 | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_UTILS_TRT_ALLOCATOR_H_
#include <unordered_map>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#endif
namespace tensorflow {
namespace tensorrt {
void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space);
}
}
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
class TRTBaseAllocator : public nvinfer1::IGpuAllocator {
public:
virtual ~TRTBaseAllocator() = default;
};
class TRTDeviceAllocator : public TRTBaseAllocator {
public:
TRTDeviceAllocator(Allocator* allocator);
virtual ~TRTDeviceAllocator() {
VLOG(1) << "Destroying allocator attached to " << allocator_->Name();
}
void* allocate(uint64_t size, uint64_t alignment,
uint32_t flags) noexcept override;
void free(void* memory) noexcept override;
private:
mutex mu_;
Allocator* allocator_;
std::unordered_map<void*, void*> mem_map_ TF_GUARDED_BY(mu_);
};
}
}
#endif
#endif
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/platform/logging.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#endif
namespace tensorflow {
namespace tensorrt {
void* Align(uint64_t alignment, uint64_t size, void*& ptr, uint64_t& space) {
QCHECK_GT(alignment, 0ul) << "alignment must be greater than 0.";
QCHECK_EQ(0, alignment & (alignment - 1)) << "Alignment must be power of 2.";
QCHECK_GT(size, 0ul) << "size must be greater than 0.";
QCHECK(ptr) << "ptr must not be nullptr.";
QCHECK_GT(space, 0ul) << "space must be greater than 0.";
const uintptr_t ptr_val = reinterpret_cast<uintptr_t>(ptr);
QCHECK_GE(ptr_val + space, ptr_val) << "Provided space overflows.";
if (size > space) return nullptr;
const uintptr_t aligned_ptr_val = ((ptr_val + alignment - 1) & -alignment);
if (aligned_ptr_val > ptr_val + space - size) return nullptr;
ptr = reinterpret_cast<void*>(aligned_ptr_val);
const uintptr_t diff = aligned_ptr_val - ptr_val;
space -= diff;
return ptr;
}
}
}
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
void* TRTDeviceAllocator::allocate(uint64_t size, uint64_t alignment,
uint32_t flags) noexcept {
if (size == 0) return nullptr;
alignment = 512;
assert((alignment & (alignment - 1)) == 0);
uint64_t total_size = size + alignment;
AllocationAttributes attributes;
attributes.retry_on_failure = false;
void* mem = allocator_->AllocateRaw(alignment, total_size, attributes);
if (!mem) return nullptr;
void* alloc_mem = mem;
QCHECK(Align(alignment, size, mem, total_size));
mutex_lock lock(mu_);
if (mem != alloc_mem) {
QCHECK(mem_map_.insert({mem, alloc_mem}).second);
}
VLOG(2) << "Allocated " << total_size << " bytes memory @" << alloc_mem
<< "; aligned to " << size << " bytes @" << mem << " with alignment "
<< alignment;
return mem;
}
TRTDeviceAllocator::TRTDeviceAllocator(Allocator* allocator)
: allocator_(allocator) {
VLOG(1) << "Using " << allocator->Name() << " allocator from TensorFlow";
}
void TRTDeviceAllocator::free(void* memory) noexcept {
mutex_lock lock(mu_);
VLOG(2) << "Deallocating @ " << memory;
if (memory) {
auto alloc_mem = mem_map_.find(memory);
if (alloc_mem != mem_map_.end()) {
memory = alloc_mem->second;
mem_map_.erase(alloc_mem->first);
}
allocator_->DeallocateRaw(memory);
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tensorrt {
bool RunTest(const uint64_t alignment, const uint64_t size,
const intptr_t orig_ptr_val, const uint64_t orig_space) {
void* const orig_ptr = reinterpret_cast<void*>(orig_ptr_val);
void* ptr = orig_ptr;
uint64_t space = orig_space;
void* result = Align(alignment, size, ptr, space);
if (result == nullptr) {
EXPECT_EQ(orig_ptr, ptr);
EXPECT_EQ(orig_space, space);
return false;
} else {
EXPECT_EQ(result, ptr);
const intptr_t ptr_val = reinterpret_cast<intptr_t>(ptr);
EXPECT_EQ(0, ptr_val % alignment);
EXPECT_GE(ptr_val, orig_ptr_val);
EXPECT_GE(space, size);
EXPECT_LE(space, orig_space);
EXPECT_EQ(ptr_val + space, orig_ptr_val + orig_space);
return true;
}
}
TEST(TRTAllocatorTest, Align) {
for (const uint64_t space :
{1ul, 2ul, 3ul, 4ul, 7ul, 8ul, 9ul, 10ul, 16ul, 32ul, 511ul, 512ul,
513ul, 700ul, 12345ul, 1ul << 32}) {
for (uint64_t alignment = 1; alignment <= space * 4; alignment *= 2) {
for (const uintptr_t ptr_val :
{static_cast<uint64_t>(1),
alignment == 1 ? static_cast<uint64_t>(1) : alignment - 1,
alignment, alignment + 1, alignment + (alignment / 2)}) {
if (ptr_val % alignment == 0) {
for (const uint64_t size :
{static_cast<uint64_t>(1),
space == 1 ? static_cast<uint64_t>(1) : space - 1, space,
space + 1}) {
EXPECT_EQ(space >= size, RunTest(alignment, size, ptr_val, space));
}
} else {
EXPECT_FALSE(RunTest(alignment, space, ptr_val, space));
const uint64_t diff = alignment - ptr_val % alignment;
if (space > diff) {
EXPECT_TRUE(
RunTest(alignment, space - diff, ptr_val + diff, space - diff));
for (const uint64_t size :
{static_cast<uint64_t>(1),
space - diff > 1 ? space - diff - 1
: static_cast<uint64_t>(1),
space - diff, space - diff + 1, space - 1}) {
EXPECT_EQ(space - diff >= size,
RunTest(alignment, size, ptr_val, space));
}
} else {
EXPECT_FALSE(RunTest(alignment, 1, ptr_val, space));
}
}
}
}
}
}
}
} |
170 | #ifndef I18N_ADDRESSINPUT_FAKE_STORAGE_H_
#define I18N_ADDRESSINPUT_FAKE_STORAGE_H_
#include <libaddressinput/storage.h>
#include <map>
#include <string>
namespace i18n {
namespace addressinput {
class FakeStorage : public Storage {
public:
FakeStorage(const FakeStorage&) = delete;
FakeStorage& operator=(const FakeStorage&) = delete;
FakeStorage();
~FakeStorage() override;
void Put(const std::string& key, std::string* data) override;
void Get(const std::string& key, const Callback& data_ready) const override;
private:
std::map<std::string, std::string*> data_;
};
}
}
#endif
#include "fake_storage.h"
#include <cassert>
#include <cstddef>
#include <string>
namespace i18n {
namespace addressinput {
FakeStorage::FakeStorage() = default;
FakeStorage::~FakeStorage() {
for (const auto& pair : data_) {
delete pair.second;
}
}
void FakeStorage::Put(const std::string& key, std::string* data) {
assert(data != nullptr);
auto result = data_.emplace(key, data);
if (!result.second) {
delete result.first->second;
result.first->second = data;
}
}
void FakeStorage::Get(const std::string& key,
const Callback& data_ready) const {
auto data_it = data_.find(key);
bool success = data_it != data_.end();
data_ready(success, key,
success ? new std::string(*data_it->second) : nullptr);
}
}
} | #include "fake_storage.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/storage.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::FakeStorage;
using i18n::addressinput::Storage;
class FakeStorageTest : public testing::Test {
public:
FakeStorageTest(const FakeStorageTest&) = delete;
FakeStorageTest& operator=(const FakeStorageTest&) = delete;
protected:
FakeStorageTest()
: storage_(),
success_(false),
key_(),
data_(),
data_ready_(BuildCallback(this, &FakeStorageTest::OnDataReady)) {}
FakeStorage storage_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const Storage::Callback> data_ready_;
private:
void OnDataReady(bool success, const std::string& key, std::string* data) {
ASSERT_FALSE(success && data == nullptr);
success_ = success;
key_ = key;
if (data != nullptr) {
data_ = *data;
delete data;
}
}
};
TEST_F(FakeStorageTest, GetWithoutPutReturnsEmptyData) {
storage_.Get("key", *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ("key", key_);
EXPECT_TRUE(data_.empty());
}
TEST_F(FakeStorageTest, GetReturnsWhatWasPut) {
storage_.Put("key", new std::string("value"));
storage_.Get("key", *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ("key", key_);
EXPECT_EQ("value", data_);
}
TEST_F(FakeStorageTest, SecondPutOverwritesData) {
storage_.Put("key", new std::string("bad-value"));
storage_.Put("key", new std::string("good-value"));
storage_.Get("key", *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ("key", key_);
EXPECT_EQ("good-value", data_);
}
} |
171 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONV_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONV_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewConvolutionNodeShader();
std::unique_ptr<NodeShader> NewConvolution1x1NodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/conv.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
#include "tensorflow/lite/delegates/gpu/gl/workgroups/ideal_workgroup_picker.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class Convolution : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"Convolution does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const Convolution2DAttributes&>(ctx.op_attr);
if (attr.groups != 1) {
return absl::UnimplementedError(
"Convolution does not support more than 1 group");
}
auto weights = attr.weights.shape;
const int offsets_count = weights.h * weights.w;
const bool offsets_count_too_large = offsets_count > kMaxConstArraySize;
std::vector<Variable> parameters;
if (offsets_count_too_large) {
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"padding_w", attr.padding.prepended.w},
{"padding_h", attr.padding.prepended.h},
{"dilation_w", attr.dilations.w},
{"dilation_h", attr.dilations.h},
{"kernel_w", weights.w},
{"kernel_h", weights.h},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
} else {
std::vector<int2> offsets;
for (int h = 0; h < weights.h; ++h) {
for (int w = 0; w < weights.w; ++w) {
offsets.emplace_back(w * attr.dilations.w - attr.padding.prepended.w,
h * attr.dilations.h - attr.padding.prepended.h);
}
}
parameters = {
{"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])},
{"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])},
{"offsets_count", offsets_count},
{"offsets", offsets},
{"src_depth", DivideRoundUp(weights.i, 4)},
{"stride", int2(attr.strides.w, attr.strides.h)},
};
}
bool non_empty_padding =
attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0;
std::vector<std::pair<std::string, Object>> objects = {
{"weights", MakeReadonlyObject(Get3DSizeForPHWO4I4(attr.weights.shape),
ConvertToPHWO4I4(attr.weights))}};
std::string source;
if (offsets_count_too_large) {
source = R"(
int i = 0;
for (int ky = 0; ky < $kernel_h$; ky++) {
for (int kx = 0; kx < $kernel_w$; kx++, i++) {
ivec2 coord = gid.xy * $stride$ + ivec2(kx * $dilation_w$ - $padding_w$, ky * $dilation_h$ - $padding_h$);)";
} else {
source = R"(
for (int i = 0; i < $offsets_count$; ++i) {
ivec2 coord = gid.xy * $stride$ + $offsets[i]$;)";
}
if (non_empty_padding) {
source += R"(
if (coord.x < 0 || coord.y < 0 || coord.x >= $input_data_0_w$ || coord.y >= $input_data_0_h$) {
continue;
})";
}
source += R"(
for (int l = 0; l < $src_depth$; ++l) {
vec4 input_ = $input_data_0[coord.x, coord.y, l]$;
value_0.x += dot(input_, $weights[l * 4 + 0, i, gid.z]$);
value_0.y += dot(input_, $weights[l * 4 + 1, i, gid.z]$);
value_0.z += dot(input_, $weights[l * 4 + 2, i, gid.z]$);
value_0.w += dot(input_, $weights[l * 4 + 3, i, gid.z]$);
}
}
)";
if (offsets_count_too_large) {
source += R"(
}
)";
}
if (!attr.bias.data.empty()) {
source += "value_0 += $bias[gid.z]$;\n";
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
GetIdealWorkgroupIfPossible(
*ctx.gpu_info, OperationType::CONVOLUTION_2D,
HW(weights.h, weights.w), attr.strides, uint3(0, 0, 0),
OHWI(weights.o, ctx.input_shapes[0][1], ctx.input_shapes[0][2],
ctx.input_shapes[0][3])),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
int SelectMultiplier(int32_t input_width,
const NodeShader::GenerationContext& ctx) {
std::vector<int> multipliers = {4, 2};
if (ctx.gpu_info->IsAMD()) {
return 1;
}
if (!ctx.compiler_options.allow_precision_loss && ctx.gpu_info->IsMali()) {
multipliers = {2};
}
for (int i : multipliers) {
if (input_width % i == 0) {
return i;
}
}
return 1;
}
class Convolution1x1 : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() != 1) {
return absl::UnimplementedError(
"Convolution does not support more than 1 runtime tensor");
}
const auto& attr =
std::any_cast<const Convolution2DAttributes&>(ctx.op_attr);
if (attr.weights.shape.h != 1 || attr.weights.shape.w != 1) {
return absl::UnimplementedError("Height and width should be 1.");
}
if (attr.dilations.h != 1 || attr.dilations.w != 1) {
return absl::UnimplementedError("Dilations are not supported.");
}
if (attr.strides.h != 1 || attr.strides.w != 1) {
return absl::UnimplementedError("Strides are not supported.");
}
if (attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0) {
return absl::UnimplementedError("Padding is not supported.");
}
int multiplier = SelectMultiplier(ctx.input_shapes[0][2], ctx);
std::vector<Variable> parameters = {
{"src_depth",
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)},
};
std::vector<std::pair<std::string, Object>> objects = {
{"weights",
MakeReadonlyObject(uint3(4, DivideRoundUp(attr.weights.shape.i, 4),
DivideRoundUp(attr.weights.shape.o, 4)),
ConvertToPHWO4I4(attr.weights))}};
std::string source;
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, "highp vec4 result", i, " = vec4(0);\n");
}
absl::StrAppend(&source, "vec4 f;\n");
absl::StrAppend(&source, "for (int l = 0; l < $src_depth$; ++l) {\n");
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, " vec4 input", i, " = $input_data_0[gid.x * ",
multiplier, " + ", i, ",gid.y,l]$;\n");
}
for (int k = 0; k < 4; k++) {
absl::StrAppend(&source, " f = $weights[", k, ", l, gid.z]$;\n");
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, " result", i, "[", k, "] += dot(input", i,
", f);\n");
}
}
absl::StrAppend(&source, "}\n");
if (!attr.bias.data.empty()) {
objects.push_back({"bias", MakeReadonlyObject(attr.bias.data)});
absl::StrAppend(&source, "vec4 b = $bias[gid.z]$;\n");
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, "result", i, " += b;\n");
}
}
if (multiplier != 1) {
for (int i = 0; i < multiplier; i++) {
absl::StrAppend(&source, "$inplace_update:result", i, "$\n");
absl::StrAppend(&source, "$output_data_0[gid.x * ", multiplier, " + ",
i, ",gid.y,gid.z] = result", i, "$;\n");
}
} else {
absl::StrAppend(&source, "value_0 = result0;\n");
}
auto dst_depth = DivideRoundUp(ctx.output_shapes[0][3], 4);
uint3 workgroup = uint3(16, 16, 1);
if (ctx.gpu_info->IsAdreno()) {
if (dst_depth >= 2) {
workgroup = uint3(8, 8, 2);
}
if (dst_depth >= 4) {
workgroup = uint3(4, 8, 4);
}
if (dst_depth >= 8) {
workgroup = uint3(4, 4, 8);
}
if (dst_depth >= 32) {
workgroup = uint3(4, 4, 16);
}
if (dst_depth >= 64) {
workgroup = uint3(2, 8, 16);
}
} else {
if (dst_depth >= 2) {
workgroup = uint3(16, 8, 2);
}
if (dst_depth >= 4) {
workgroup = uint3(16, 4, 4);
}
if (dst_depth >= 8) {
workgroup = uint3(8, 4, 8);
}
if (dst_depth >= 32) {
workgroup = uint3(8, 4, 8);
}
if (dst_depth >= 64) {
workgroup = uint3(8, 4, 8);
}
}
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(ctx.output_shapes[0][2] / multiplier, ctx.output_shapes[0][1],
DivideRoundUp(ctx.output_shapes[0][3], 4)),
GetIdealWorkgroupIfPossible(
*ctx.gpu_info, OperationType::CONVOLUTION_2D,
HW(attr.weights.shape.h, attr.weights.shape.w), attr.strides,
workgroup,
OHWI(attr.weights.shape.o, ctx.input_shapes[0][1],
ctx.input_shapes[0][2], ctx.input_shapes[0][3])),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
multiplier == 1 ? IOStructure::AUTO
: IOStructure::ONLY_DEFINITIONS,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewConvolutionNodeShader() {
return std::make_unique<Convolution>();
}
std::unique_ptr<NodeShader> NewConvolution1x1NodeShader() {
return std::make_unique<Convolution1x1>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/conv.h"
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(ConvTest, O2H2W1I1Stride1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data = {1, 1};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 2, 1, 1);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(1, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {4, 8, 4, 8, 2, 4, 2, 4}));
}
TEST(ConvTest, O1H2W2I1Stride1x1Dilation2x2) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data.push_back(0.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 2, 2, 1);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.dilations = HW(2, 2);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 1, 1, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1, 1, 1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {10}));
}
TEST(ConvTest, O1H3W3I1Stride1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 1;
bias.id = 1;
bias.data.push_back(1.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 3, 3, 1);
weights.id = 2;
weights.data = {1, 2, 3, 1, 2, 3, 1, 2, 3};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(1, 1);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 1, 1, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {11}));
}
TEST(ConvTest, O2H1W1I2Stride1x1Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 1, 2);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data = {1, 1};
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(2, 1, 1, 2);
weights.id = 2;
weights.data = {1, 2, 3, 4};
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(1, 1);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 2, 1, 2);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 1, 1, 1}));
ASSERT_OK(model.Invoke(*NewConvolution1x1NodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {4, 8, 4, 8}));
}
TEST(ConvTest, O1H1W1I1Stride2x2Dilation1x1) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 3, 3, 1);
Convolution2DAttributes attr;
Tensor<Linear, DataType::FLOAT32> bias;
bias.shape.v = 2;
bias.id = 1;
bias.data.push_back(0.0);
attr.bias = std::move(bias);
Tensor<OHWI, DataType::FLOAT32> weights;
weights.shape = OHWI(1, 1, 1, 1);
weights.id = 2;
weights.data.push_back(2.0);
attr.weights = std::move(weights);
attr.dilations = HW(1, 1);
attr.padding.prepended = HW(0, 0);
attr.padding.appended = HW(0, 0);
attr.strides = HW(2, 2);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 3;
output.shape = BHWC(1, 2, 2, 1);
SingleOpModel model(
{ToString(OperationType::CONVOLUTION_2D), std::move(attr)}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {1, 0, 2, 0, 0, 0, 4, 0, 8}));
ASSERT_OK(model.Invoke(*NewConvolutionNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2, 4, 8, 16}));
}
}
}
}
} |
172 | #ifndef XLA_SERVICE_GPU_CONV_ALGORITHM_PICKER_H_
#define XLA_SERVICE_GPU_CONV_ALGORITHM_PICKER_H_
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
class GpuConvAlgorithmPicker : public HloModulePass {
public:
explicit GpuConvAlgorithmPicker(AutotuneConfig config) : config_(config) {}
absl::string_view name() const override {
return "gpu-conv-algorithm-picker";
}
static bool IsEnabled(const HloModule* module) {
return module->config().debug_options().xla_gpu_autotune_level() != 0;
}
static bool IsCandidate(const HloInstruction* instr) {
return IsCustomCallToDnnConvolution(*instr);
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
absl::StatusOr<bool> RunOnInstruction(HloInstruction* instr);
absl::StatusOr<AutotuneResult> PickBestAlgorithm(
const HloCustomCallInstruction* instr);
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
struct ReferenceResult {
stream_executor::dnn::AlgorithmDesc algorithm;
std::vector<stream_executor::DeviceMemoryBase> buffers;
};
struct AutotuneRuntimeArguments {
const HloModuleConfig hlo_module_config;
RedzoneBuffers rz_buffers;
const GpuConvConfig gpu_conv_config;
std::optional<std::string> canonical_hlo;
static absl::StatusOr<AutotuneRuntimeArguments> FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options);
};
absl::StatusOr<AutotuneResult> AutotuneOneConvRunner(
GenericConvRunner* runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const stream_executor::dnn::AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments);
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCacheCuda(
const HloCustomCallInstruction* instr);
#endif
absl::StatusOr<AutotuneResult> PickBestAlgorithmNoCacheRocm(
const HloCustomCallInstruction* instr);
private:
AutotuneConfig config_;
};
}
}
#endif
#include "xla/service/gpu/conv_algorithm_picker.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_autotuning.pb.h"
#include "xla/service/gpu/gpu_conv_runner.h"
#include "xla/service/gpu/hlo_algorithm_denylist.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/slow_operation_alarm.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/cuda/cuda_platform_id.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/lazy_op_runner.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/rocm/rocm_platform_id.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tsl/util/env_var.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/numbers.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
#include "third_party/gpus/cudnn/cudnn.h"
#include "third_party/gpus/cudnn/cudnn_version.h"
#if CUDNN_VERSION >= 90000
#include "third_party/gpus/cudnn/cudnn_ops.h"
#else
#include "third_party/gpus/cudnn/cudnn_ops_infer.h"
#endif
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#endif
namespace xla {
namespace gpu {
namespace {
using se::DeviceMemoryBase;
using se::dnn::AlgorithmDesc;
using std::optional;
class ScratchAllocator : public se::ScratchAllocator {
public:
ScratchAllocator(int device_ordinal,
se::DeviceMemoryAllocator* memory_allocator)
: device_ordinal_(device_ordinal), memory_allocator_(memory_allocator) {}
int64_t GetMemoryLimitInBytes() override {
return ScratchAllocator::GetDefaultMemoryLimitInBytes();
}
int64_t TotalAllocatedBytes() { return total_allocated_bytes_; }
static int64_t GetDefaultMemoryLimitInBytes() {
int64_t value;
TF_CHECK_OK(tsl::ReadInt64FromEnvVar("TF_CUDNN_WORKSPACE_LIMIT_IN_MB",
1LL << 12, &value));
return value * (1LL << 20);
}
absl::StatusOr<se::DeviceMemory<uint8_t>> AllocateBytes(
int64_t byte_size) override;
template <typename T>
absl::StatusOr<se::DeviceMemory<T>> Allocate(int64_t num_elements) {
TF_ASSIGN_OR_RETURN(se::DeviceMemory<uint8_t> bytes,
AllocateBytes(num_elements * sizeof(T)));
return se::DeviceMemory<T>(bytes);
}
private:
const int device_ordinal_;
se::DeviceMemoryAllocator* memory_allocator_;
std::vector<se::OwningDeviceMemory> allocated_buffers_;
int64_t total_allocated_bytes_ = 0;
};
absl::StatusOr<se::DeviceMemory<uint8_t>> ScratchAllocator::AllocateBytes(
int64_t byte_size) {
CHECK_GE(byte_size, 0) << "byte_size must be positive.";
if (byte_size > GetMemoryLimitInBytes()) {
return absl::ResourceExhaustedError(absl::StrFormat(
"Allocating %d bytes exceeds the memory limit of %d bytes.", byte_size,
GetMemoryLimitInBytes()));
}
TF_ASSIGN_OR_RETURN(se::OwningDeviceMemory allocated_buffer,
memory_allocator_->Allocate(device_ordinal_, byte_size,
false));
total_allocated_bytes_ += byte_size;
se::DeviceMemoryBase buffer_addr = *allocated_buffer;
allocated_buffers_.push_back(std::move(allocated_buffer));
return se::DeviceMemory<uint8_t>(buffer_addr);
}
absl::StatusOr<std::vector<GenericConvRunner>> GetAlgorithms(
const GpuConvConfig& config, se::Stream* stream, bool use_cudnn_frontend,
bool use_fallback, const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType input_type,
GetDNNDataTypeFromPrimitiveType(config.input_type));
TF_ASSIGN_OR_RETURN(se::dnn::DataType output_type,
GetDNNDataTypeFromPrimitiveType(config.output_type));
se::StreamExecutor* stream_exec = stream->parent();
std::vector<GenericConvRunner> result;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
switch (kind) {
default:
return Internal("Unknown ConvolutionKind %d", kind);
case se::dnn::ConvolutionKind::FORWARD_BIAS_ACTIVATION: {
if (!config.fusion) {
return Internal(
"GpuConvConfig had fusion ConvolutionKind but no FusionConfig.");
}
std::vector<std::unique_ptr<const se::dnn::FusedConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetFusedConvolveRunners(
use_cudnn_frontend,
se::dnn::ConvolutionKind::FORWARD, input_type,
BiasTypeForInputType(input_type), output_type,
config.conv_result_scale,
config.fusion->side_input_scale,
config.fusion->leakyrelu_alpha, stream,
config.input_descriptor, config.filter_descriptor,
config.bias_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, config.fusion->mode, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::FusedConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD_GRAPH: {
std::vector<std::unique_ptr<const se::dnn::GraphConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetGraphConvolveRunners(
kind, input_type, output_type, stream, config.input_descriptor,
config.filter_descriptor, config.output_descriptor, config.conv_desc,
use_fallback, numeric_options, &runners, config.serialized_graph));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::GraphConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
case se::dnn::ConvolutionKind::FORWARD:
case se::dnn::ConvolutionKind::BACKWARD_DATA:
case se::dnn::ConvolutionKind::BACKWARD_FILTER: {
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
use_cudnn_frontend, kind, input_type, output_type, stream,
config.input_descriptor,
DeviceMemoryBase(nullptr),
config.filter_descriptor,
DeviceMemoryBase(nullptr),
config.output_descriptor,
DeviceMemoryBase(nullptr), config.conv_desc,
use_fallback, nullptr, numeric_options, &runners));
for (auto& runner : runners) {
TF_ASSIGN_OR_RETURN(
auto runner_cache,
se::dnn::LazyOpRunner<se::dnn::ConvOp>::FromOpRunner(
std::move(runner)));
result.emplace_back(std::move(runner_cache));
}
break;
}
}
return result;
}
absl::StatusOr<std::vector<std::unique_ptr<const se::dnn::ConvRunner>>>
GetMIOpenAlgorithms(const HloCustomCallInstruction* instr,
absl::Span<se::DeviceMemoryBase> operand_buffers,
absl::Span<se::DeviceMemoryBase> result_buffers,
se::StreamExecutor* stream_exec,
ScratchAllocator* scratch_allocator, se::Stream* stream,
const se::NumericOptions& numeric_options) {
TF_ASSIGN_OR_RETURN(GpuConvConfig config, GetGpuConvConfig(instr));
TF_ASSIGN_OR_RETURN(se::dnn::ConvolutionKind kind,
GetDNNConvKindFromCudnnConvKind(config.kind));
TF_ASSIGN_OR_RETURN(se::dnn::DataType dtype,
GetDNNDataTypeFromPrimitiveType(config.output_type));
TF_ASSIGN_OR_RETURN(
GpuConvParams params,
GetGpuConvParams(config, operand_buffers, result_buffers));
std::vector<std::unique_ptr<const se::dnn::ConvRunner>> runners;
auto dnn = stream_exec->AsDnn();
if (dnn == nullptr) {
return absl::InvalidArgumentError("No DNN in stream executor.");
}
TF_RETURN_IF_ERROR(dnn->GetConvolveRunners(
false, kind, dtype, dtype, stream,
params.config->input_descriptor, params.input_buf,
params.config->filter_descriptor, params.filter_buf,
params.config->output_descriptor, params.output_buf,
params.config->conv_desc,
false, scratch_allocator, numeric_options,
&runners));
return runners;
}
std::string NumBytesToString(int64_t bytes) {
return absl::StrCat(tsl::strings::HumanReadableNumBytes(bytes), " (", bytes,
"B)");
}
CudnnVersion GetCudnnVersion(se::StreamExecutor* stream_executor) {
se::dnn::VersionInfo version = GetDnnVersionInfoOrDefault(stream_executor);
CudnnVersion cudnn_version;
cudnn_version.set_major(version.major_version());
cudnn_version.set_minor(version.minor_version());
cudnn_version.set_patch(version.patch());
return cudnn_version;
}
ComputeCapability GetComputeCapability(se::StreamExecutor* stream_executor) {
ComputeCapability cc;
se::CudaComputeCapability se_cc =
stream_executor->GetDeviceDescription().cuda_compute_capability();
cc.set_major(se_cc.major);
cc.set_minor(se_cc.minor);
return cc;
}
void PrintPlatformInfo(const se::Stream* stream) {
auto* se = stream->parent();
const auto& desc = se->GetDeviceDescription();
LOG(ERROR) << "Device: " << desc.name();
LOG(ERROR) << "Platform: " << desc.platform_version();
LOG(ERROR) << "Driver: " << desc.driver_version();
LOG(ERROR) << "Runtime: " << desc.runtime_version();
auto dnn_version = GetDnnVersionInfo(se);
if (dnn_version.ok()) {
auto v = dnn_version.value();
LOG(ERROR) << "cudnn version: " << v.major_version() << "."
<< v.minor_version() << "." << v.patch();
}
}
absl::StatusOr<bool> CheckRedzones(const se::RedzoneAllocator& allocator,
se::Stream* stream, absl::string_view name,
std::string_view instr_str,
AutotuneResult* result) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("CudnnConvAlgorithmPicker checking redzones",
2);
using RedzoneCheckStatus = se::RedzoneAllocator::RedzoneCheckStatus;
TF_ASSIGN_OR_RETURN(RedzoneCheckStatus redzone_check,
allocator.CheckRedzones());
if (redzone_check.ok()) {
return true;
}
auto* fail = result->mutable_failure();
fail->set_kind(AutotuneResult::REDZONE_MODIFIED);
*fail->mutable_msg() = redzone_check.RedzoneFailureMsg();
fail->set_buffer_address(
reinterpret_cast<uint64_t>(redzone_check.user_buffer_address));
LOG(ERROR) << absl::StreamFormat(
"Detected cudnn out-of-bounds write in conv %s buffer! This is likely a "
"cudnn bug. We will skip this algorithm in the future, but your GPU "
"state may already be corrupted, leading to incorrect results. Within "
"Google, no action is needed on your part. Outside of Google, please "
"ensure you're running the latest version of cudnn. If that doesn't fix "
"the problem, please file a bug with this full error message and we'll "
"contact nvidia.",
name);
LOG(ERROR) << redzone_check.RedzoneFailureMsg();
LOG(ERROR) << "HloInstruction " << instr_str;
PrintPlatformInfo(stream);
return false;
}
}
bool ShouldInitConvData(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 2;
}
bool ShouldCheckConv(const HloModuleConfig& hlo_module_config) {
const int32_t conv_autotune_level =
hlo_module_config.debug_options().xla_gpu_autotune_level();
return conv_autotune_level >= 4;
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithm(
const HloCustomCallInstruction* instr) {
return AutotunerUtil::Autotune(
instr, config_, [&] { return PickBestAlgorithmNoCache(instr); });
}
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::PickBestAlgorithmNoCache(
const HloCustomCallInstruction* instr) {
if (config_.IsDeviceless()) {
AutotuneResult result;
result.mutable_algorithm()->set_algo_id(-1);
return result;
}
se::StreamExecutor* stream_exec = config_.GetExecutor();
absl::MutexLock lock(&GetGpuMutex(stream_exec));
if (!stream_exec->SynchronizeAllActivity()) {
return Internal(
"Failed to synchronize GPU for autotuning conv instruction");
}
absl::StatusOr<AutotuneResult> result_or(Internal("Unknown platform."));
se::Platform::Id platform_id = stream_exec->GetPlatform()->id();
if (platform_id == se::rocm::kROCmPlatformId) {
result_or = PickBestAlgorithmNoCacheRocm(instr);
} else if (platform_id == se::cuda::kCudaPlatformId) {
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
result_or = PickBestAlgorithmNoCacheCuda(instr);
#endif
}
return result_or;
}
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA)
absl::StatusOr<GpuConvAlgorithmPicker::AutotuneRuntimeArguments>
GpuConvAlgorithmPicker::AutotuneRuntimeArguments::FromInstruction(
const HloCustomCallInstruction* instr, const AutotuneConfig& config,
const DebugOptions& debug_options) {
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*instr, config, debug_options,
RedzoneBuffers::kAllInputsOutputsNoScratch));
std::string canonical_hlo(
AutotuneCacheKey(config.GetExecutor()->GetDeviceDescription().model_str(),
*instr)
.GetHlo());
TF_ASSIGN_OR_RETURN(GpuConvConfig gpu_conv_config, GetGpuConvConfig(instr));
GpuConvAlgorithmPicker::AutotuneRuntimeArguments runtime_arguments = {
instr->GetModule()->config(),
std::move(rz_buffers),
std::move(gpu_conv_config),
{canonical_hlo}};
return runtime_arguments;
}
struct CudnnVersionRange {
using TupleVersion = std::tuple<int, int, int>;
TupleVersion begin;
TupleVersion end;
bool IsInRange(const CudnnVersion& other) const {
TupleVersion other_version{other.major(), other.minor(), other.patch()};
return begin <= other_version && other_version < end;
}
CudnnVersionRange(const CudnnVersion& begin, const CudnnVersion& end)
: begin(begin.major(), begin.minor(), begin.patch()),
end(end.major(), end.minor(), end.patch()) {}
CudnnVersionRange(const TupleVersion& begin, const TupleVersion& end)
: begin(begin), end(end) {}
};
struct ComputeCapabilityRange {
using TupleComputeCapability = std::tuple<int, int>;
TupleComputeCapability begin;
TupleComputeCapability end;
bool IsInRange(const ComputeCapability& other) const {
TupleComputeCapability other_cc{other.major(), other.minor()};
return begin <= other_cc && other_cc < end;
}
};
struct DisabledAlgorithm {
CudnnVersionRange cudnn_version_range;
ComputeCapabilityRange compute_capability_range;
int algo_id;
};
static const DisabledAlgorithm kDisabledAlgorithms[] = {
{{{9, 0, 0}, {10, 0, 0}},
{{6, 0}, {8, 0}},
14}};
absl::StatusOr<AutotuneResult> GpuConvAlgorithmPicker::AutotuneOneConvRunner(
GenericConvRunner* const runner,
std::optional<ReferenceResult>* reference_result,
absl::Span<const AlgorithmDesc> disabled_algos,
std::optional<AutotuneCacheKey> instruction_info,
const AutotuneRuntimeArguments& runtime_arguments) {
auto alg = runner->ToAlgorithmDesc();
se::StreamExecutor* stream_exec = config_.GetExecutor();
XLA_SCOPED_LOGGING_TIMER_LEVEL(
absl::StrCat("CudnnConvAlgorithmPicker::PickBestAlgorithm algo ",
alg.ToString()),
2);
auto make_failure = [&alg](AutotuneResult::FailureKind kind,
absl::string_view msg) {
AutotuneResult result;
*result.mutable_algorithm() = alg.ToProto();
result.mutable_failure()->set_kind(kind);
result.mutable_failure()->set_msg( msg.data(), msg.size());
return result;
};
AlgorithmDesc alg_key(alg.algo_id(), alg.tensor_ops_enabled(), std::nullopt);
std::string instr_str = instruction_info.has_value()
? std::string(instruction_info->GetHlo())
: "<unknown>";
for (const auto& disabled_algo : kDisabledAlgorithms) {
if (disabled_algo.cudnn_version_range.IsInRange(
GetCudnnVersion(stream_exec)) &&
disabled_algo.compute_capability_range.IsInRange(
GetComputeCapability(stream_exec)) &&
disabled_algo.algo_id == alg.algo_id()) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
}
if (absl::c_linear_search(disabled_algos, alg_key)) {
LOG(INFO) << "Omitted potentially buggy algorithm " << alg.ToString()
<< " for conv " << instr_str;
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for being known-buggy.");
}
GpuConvConfig config = runtime_arguments.gpu_conv_config;
auto activation_mode =
config.fusion ? config.fusion->mode : se::dnn::ActivationMode::kNone;
if (!alg.is_cudnn_frontend() &&
config.kind == CudnnConvKind::kForwardActivation &&
activation_mode == se::dnn::ActivationMode::kNone &&
alg.algo_id() != CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM) {
return make_failure(AutotuneResult::DISQUALIFIED,
"Disqualified for implicit RELU.");
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator scratch_allocator,
AutotunerUtil::CreateRedzoneAllocator(
config_, runtime_arguments.hlo_module_config.debug_options()));
se::dnn::ProfileResult profile_result;
VLOG(4) << "Trying algorithm " << alg.ToString() << " for " << instr_str;
SlowOperationAlarm alarm(absl::Seconds(1), [&] {
return absl::StrFormat(
"Trying algorithm %s for conv %s is taking a while...", alg.ToString(),
instr_str);
});
std::optional<size_t> workspace_size =
runner->ToAlgorithmDesc().workspace_size();
if (!workspace_size) {
return make_failure(AutotuneResult::UNKNOWN,
"Internal error: missing workspace size from "
"OpRunner::ToAlgorithmDesc()");
}
auto scratch_or = scratch_allocator.AllocateBytes(*workspace_size);
if (!scratch_or.ok()) {
return make_failure(AutotuneResult::DISQUALIFIED,
absl::StrCat("Scratch allocation failed: ",
scratch_or.status().ToString()));
}
se::DeviceMemoryBase scratch_memory = scratch_or.value();
RunConvOptions options;
options.runner_cache = runner;
float max_time = 0;
float min_time = std::numeric_limits<float>::max();
absl::Status launch_status;
std::vector<se::DeviceMemoryBase> operand_buffers =
runtime_arguments.rz_buffers.input_buffers();
std::vector<se::DeviceMemoryBase> result_buffers =
runtime_arguments.rz_buffers.output_buffers();
TF_ASSIGN_OR_RETURN(se::Stream* const stream, config_.GetStream());
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
options.profile_result = &profile_result;
profile_result.set_warmup_run_executed(true);
constexpr int kMaxIter = 10;
int num_iters = 0;
for (; num_iters < kMaxIter && launch_status.ok(); ++num_iters) {
launch_status = RunGpuConv(config, operand_buffers, result_buffers,
scratch_memory, stream, options);
if (!profile_result.is_valid()) {
break;
}
float old_min_time = min_time;
min_time = std::min(min_time, profile_result.elapsed_time_in_ms());
max_time = std::max(max_time, profile_result.elapsed_time_in_ms());
constexpr float kThreshold = 0.05f;
if (std::abs(profile_result.elapsed_time_in_ms() - old_min_time) /
old_min_time <
kThreshold) {
break;
}
}
if (!launch_status.ok()) {
VLOG(5) << "Launch failed: " << launch_status;
return make_failure(
AutotuneResult::DISQUALIFIED,
absl::StrCat("Profiling failure on cuDNN engine ", alg.ToString(), ": ",
launch_status.ToString()));
}
if (!profile_result.is_valid()) {
VLOG(5) << "Launch succeeded but profile result is invalid.";
return make_failure(
AutotuneResult::UNKNOWN,
absl::StrCat("Launch succeeded but profile result is invalid, "
"with cuDNN engine ",
alg.ToString(), ": ", launch_sta | #include "xla/service/gpu/conv_algorithm_picker.h"
#include <cstdint>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/platform_util.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GpuConvAlgorithmPickerTest : public HloTestBase {
public:
GpuConvAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
};
TEST_F(GpuConvAlgorithmPickerTest, SetAlgorithm) {
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[3,56,56,16]{2,1,0,3} parameter(0)
%arg1 = f32[3,3,3,64]{2,1,0,3} parameter(1)
ROOT %conv = f32[54,54,16,64]{1,0,3,2} convolution(%arg0, %arg1), window={size=3x3}, dim_labels=f01b_i01o->01bf
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(kHlo));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
const se::GpuComputeCapability& cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(GpuConvRewriter(cc), m.get()));
changed = false;
DebugOptions opts = DefaultDebugOptionsIgnoringFlags();
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_scratch_bytes = result.scratch_bytes();
int64_t new_scratch_bytes = old_scratch_bytes + 1;
result.set_scratch_bytes(new_scratch_bytes);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed, RunHloPass(GpuConvRewriter(cc), m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GpuConvAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
TF_ASSERT_OK(RunHloPass(TupleSimplifier(), m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&conv))));
EXPECT_THAT(
conv->shape(),
GmockMatch(m::Shape().WithSubshape(
{1}, m::Shape().WithElementType(U8).WithDims({new_scratch_bytes}))));
TF_ASSERT_OK_AND_ASSIGN(auto dnn_version, GetDnnVersionInfo(stream_exec));
if (dnn_version.major_version() >= 9 && dnn_version.major_version() < 10 &&
std::holds_alternative<stream_executor::CudaComputeCapability>(cc) &&
std::get<stream_executor::CudaComputeCapability>(cc).major == 7 &&
std::get<stream_executor::CudaComputeCapability>(cc).minor == 0) {
EXPECT_TRUE(conv->backend_config<GpuBackendConfig>()
->has_cudnn_conv_backend_config() &&
conv->backend_config<GpuBackendConfig>()
->cudnn_conv_backend_config()
.algorithm()
.algo_id() != 14);
}
}
}
} |
173 | #ifndef QUICHE_QUIC_CORE_BATCH_WRITER_QUIC_GSO_BATCH_WRITER_H_
#define QUICHE_QUIC_CORE_BATCH_WRITER_QUIC_GSO_BATCH_WRITER_H_
#include <cstddef>
#include "quiche/quic/core/batch_writer/quic_batch_writer_base.h"
#include "quiche/quic/core/quic_linux_socket_utils.h"
namespace quic {
class QUICHE_EXPORT QuicGsoBatchWriter : public QuicUdpBatchWriter {
public:
explicit QuicGsoBatchWriter(int fd);
QuicGsoBatchWriter(int fd, clockid_t clockid_for_release_time);
bool SupportsReleaseTime() const final { return supports_release_time_; }
bool SupportsEcn() const override {
return GetQuicRestartFlag(quic_support_ect1);
}
CanBatchResult CanBatch(const char* buffer, size_t buf_len,
const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address,
const PerPacketOptions* options,
const QuicPacketWriterParams& params,
uint64_t release_time) const override;
FlushImplResult FlushImpl() override;
protected:
struct QUICHE_EXPORT ReleaseTimeForceEnabler {};
QuicGsoBatchWriter(std::unique_ptr<QuicBatchWriterBuffer> batch_buffer,
int fd, clockid_t clockid_for_release_time,
ReleaseTimeForceEnabler enabler);
ReleaseTime GetReleaseTime(
const QuicPacketWriterParams& params) const override;
virtual uint64_t NowInNanosForReleaseTime() const;
static size_t MaxSegments(size_t gso_size) {
return gso_size <= 2 ? 16 : 45;
}
static const int kCmsgSpace = kCmsgSpaceForIp + kCmsgSpaceForSegmentSize +
kCmsgSpaceForTxTime + kCmsgSpaceForTOS;
static void BuildCmsg(QuicMsgHdr* hdr, const QuicIpAddress& self_address,
uint16_t gso_size, uint64_t release_time,
QuicEcnCodepoint ecn_codepoint);
template <size_t CmsgSpace, typename CmsgBuilderT>
FlushImplResult InternalFlushImpl(CmsgBuilderT cmsg_builder) {
QUICHE_DCHECK(!IsWriteBlocked());
QUICHE_DCHECK(!buffered_writes().empty());
FlushImplResult result = {WriteResult(WRITE_STATUS_OK, 0),
0, 0};
WriteResult& write_result = result.write_result;
size_t total_bytes = batch_buffer().SizeInUse();
const BufferedWrite& first = buffered_writes().front();
char cbuf[CmsgSpace];
iovec iov{const_cast<char*>(first.buffer), total_bytes};
QuicMsgHdr hdr(&iov, 1, cbuf, sizeof(cbuf));
hdr.SetPeerAddress(first.peer_address);
uint16_t gso_size = buffered_writes().size() > 1 ? first.buf_len : 0;
cmsg_builder(&hdr, first.self_address, gso_size, first.release_time,
first.params.ecn_codepoint);
write_result = QuicLinuxSocketUtils::WritePacket(fd(), hdr);
QUIC_DVLOG(1) << "Write GSO packet result: " << write_result
<< ", fd: " << fd()
<< ", self_address: " << first.self_address.ToString()
<< ", peer_address: " << first.peer_address.ToString()
<< ", num_segments: " << buffered_writes().size()
<< ", total_bytes: " << total_bytes
<< ", gso_size: " << gso_size
<< ", release_time: " << first.release_time;
if (write_result.status != WRITE_STATUS_OK) {
return result;
}
result.num_packets_sent = buffered_writes().size();
write_result.bytes_written = total_bytes;
result.bytes_written = total_bytes;
batch_buffer().PopBufferedWrite(buffered_writes().size());
QUIC_BUG_IF(quic_bug_12544_1, !buffered_writes().empty())
<< "All packets should have been written on a successful return";
return result;
}
private:
static std::unique_ptr<QuicBatchWriterBuffer> CreateBatchWriterBuffer();
const clockid_t clockid_for_release_time_;
const bool supports_release_time_;
};
}
#endif
#include "quiche/quic/core/batch_writer/quic_gso_batch_writer.h"
#include <time.h>
#include <ctime>
#include <memory>
#include <utility>
#include "quiche/quic/core/quic_linux_socket_utils.h"
#include "quiche/quic/platform/api/quic_server_stats.h"
namespace quic {
std::unique_ptr<QuicBatchWriterBuffer>
QuicGsoBatchWriter::CreateBatchWriterBuffer() {
return std::make_unique<QuicBatchWriterBuffer>();
}
QuicGsoBatchWriter::QuicGsoBatchWriter(int fd)
: QuicGsoBatchWriter(fd, CLOCK_MONOTONIC) {}
QuicGsoBatchWriter::QuicGsoBatchWriter(int fd,
clockid_t clockid_for_release_time)
: QuicUdpBatchWriter(CreateBatchWriterBuffer(), fd),
clockid_for_release_time_(clockid_for_release_time),
supports_release_time_(
GetQuicRestartFlag(quic_support_release_time_for_gso) &&
QuicLinuxSocketUtils::EnableReleaseTime(fd,
clockid_for_release_time)) {
if (supports_release_time_) {
QUIC_RESTART_FLAG_COUNT(quic_support_release_time_for_gso);
}
}
QuicGsoBatchWriter::QuicGsoBatchWriter(
std::unique_ptr<QuicBatchWriterBuffer> batch_buffer, int fd,
clockid_t clockid_for_release_time, ReleaseTimeForceEnabler )
: QuicUdpBatchWriter(std::move(batch_buffer), fd),
clockid_for_release_time_(clockid_for_release_time),
supports_release_time_(true) {
QUIC_DLOG(INFO) << "Release time forcefully enabled.";
}
QuicGsoBatchWriter::CanBatchResult QuicGsoBatchWriter::CanBatch(
const char* , size_t buf_len, const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address, const PerPacketOptions* ,
const QuicPacketWriterParams& params, uint64_t release_time) const {
if (buffered_writes().empty()) {
return CanBatchResult(true, false);
}
const BufferedWrite& first = buffered_writes().front();
const BufferedWrite& last = buffered_writes().back();
const bool can_burst = !SupportsReleaseTime() ||
params.release_time_delay.IsZero() ||
params.allow_burst;
size_t max_segments = MaxSegments(first.buf_len);
bool can_batch =
buffered_writes().size() < max_segments &&
last.self_address == self_address &&
last.peer_address == peer_address &&
batch_buffer().SizeInUse() + buf_len <= kMaxGsoPacketSize &&
first.buf_len == last.buf_len &&
first.buf_len >= buf_len &&
first.params.ecn_codepoint == params.ecn_codepoint &&
(can_burst || first.release_time == release_time);
bool must_flush = (!can_batch) ||
(last.buf_len != buf_len) ||
(buffered_writes().size() + 1 == max_segments);
return CanBatchResult(can_batch, must_flush);
}
QuicGsoBatchWriter::ReleaseTime QuicGsoBatchWriter::GetReleaseTime(
const QuicPacketWriterParams& params) const {
QUICHE_DCHECK(SupportsReleaseTime());
const uint64_t now = NowInNanosForReleaseTime();
const uint64_t ideal_release_time =
now + params.release_time_delay.ToMicroseconds() * 1000;
if ((params.release_time_delay.IsZero() || params.allow_burst) &&
!buffered_writes().empty() &&
(buffered_writes().back().release_time >= now)) {
const uint64_t actual_release_time = buffered_writes().back().release_time;
const int64_t offset_ns = actual_release_time - ideal_release_time;
ReleaseTime result{actual_release_time,
QuicTime::Delta::FromMicroseconds(offset_ns / 1000)};
QUIC_DVLOG(1) << "ideal_release_time:" << ideal_release_time
<< ", actual_release_time:" << actual_release_time
<< ", offset:" << result.release_time_offset;
return result;
}
return {ideal_release_time, QuicTime::Delta::Zero()};
}
uint64_t QuicGsoBatchWriter::NowInNanosForReleaseTime() const {
struct timespec ts;
if (clock_gettime(clockid_for_release_time_, &ts) != 0) {
return 0;
}
return ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
void QuicGsoBatchWriter::BuildCmsg(QuicMsgHdr* hdr,
const QuicIpAddress& self_address,
uint16_t gso_size, uint64_t release_time,
QuicEcnCodepoint ecn_codepoint) {
hdr->SetIpInNextCmsg(self_address);
if (gso_size > 0) {
*hdr->GetNextCmsgData<uint16_t>(SOL_UDP, UDP_SEGMENT) = gso_size;
}
if (release_time != 0) {
*hdr->GetNextCmsgData<uint64_t>(SOL_SOCKET, SO_TXTIME) = release_time;
}
if (ecn_codepoint != ECN_NOT_ECT && GetQuicRestartFlag(quic_support_ect1)) {
QUIC_RESTART_FLAG_COUNT_N(quic_support_ect1, 8, 9);
if (self_address.IsIPv4()) {
*hdr->GetNextCmsgData<int>(IPPROTO_IP, IP_TOS) =
static_cast<int>(ecn_codepoint);
} else {
*hdr->GetNextCmsgData<int>(IPPROTO_IPV6, IPV6_TCLASS) =
static_cast<int>(ecn_codepoint);
}
}
}
QuicGsoBatchWriter::FlushImplResult QuicGsoBatchWriter::FlushImpl() {
return InternalFlushImpl<kCmsgSpace>(BuildCmsg);
}
} | #include "quiche/quic/core/batch_writer/quic_gso_batch_writer.h"
#include <sys/socket.h>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_mock_syscall_wrapper.h"
using testing::_;
using testing::Invoke;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
size_t PacketLength(const msghdr* msg) {
size_t length = 0;
for (size_t i = 0; i < msg->msg_iovlen; ++i) {
length += msg->msg_iov[i].iov_len;
}
return length;
}
uint64_t MillisToNanos(uint64_t milliseconds) { return milliseconds * 1000000; }
class QUICHE_EXPORT TestQuicGsoBatchWriter : public QuicGsoBatchWriter {
public:
using QuicGsoBatchWriter::batch_buffer;
using QuicGsoBatchWriter::buffered_writes;
using QuicGsoBatchWriter::CanBatch;
using QuicGsoBatchWriter::CanBatchResult;
using QuicGsoBatchWriter::GetReleaseTime;
using QuicGsoBatchWriter::MaxSegments;
using QuicGsoBatchWriter::QuicGsoBatchWriter;
using QuicGsoBatchWriter::ReleaseTime;
static std::unique_ptr<TestQuicGsoBatchWriter>
NewInstanceWithReleaseTimeSupport() {
return std::unique_ptr<TestQuicGsoBatchWriter>(new TestQuicGsoBatchWriter(
std::make_unique<QuicBatchWriterBuffer>(),
-1, CLOCK_MONOTONIC, ReleaseTimeForceEnabler()));
}
uint64_t NowInNanosForReleaseTime() const override {
return MillisToNanos(forced_release_time_ms_);
}
void ForceReleaseTimeMs(uint64_t forced_release_time_ms) {
forced_release_time_ms_ = forced_release_time_ms;
}
private:
uint64_t forced_release_time_ms_ = 1;
};
struct QUICHE_EXPORT TestBufferedWrite : public BufferedWrite {
using BufferedWrite::BufferedWrite;
TestBufferedWrite(const TestBufferedWrite& other)
: BufferedWrite(other.buffer, other.buf_len, other.self_address,
other.peer_address,
other.options ? other.options->Clone()
: std::unique_ptr<PerPacketOptions>(),
QuicPacketWriterParams(), other.release_time) {}
};
static char unused_packet_buffer[kMaxOutgoingPacketSize];
struct QUICHE_EXPORT BatchCriteriaTestData {
BatchCriteriaTestData(size_t buf_len, const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address,
uint64_t release_time, bool can_batch, bool must_flush)
: buffered_write(unused_packet_buffer, buf_len, self_address,
peer_address, std::unique_ptr<PerPacketOptions>(),
QuicPacketWriterParams(), release_time),
can_batch(can_batch),
must_flush(must_flush) {}
TestBufferedWrite buffered_write;
bool can_batch;
bool must_flush;
};
std::vector<BatchCriteriaTestData> BatchCriteriaTestData_SizeDecrease() {
const QuicIpAddress self_addr;
const QuicSocketAddress peer_addr;
std::vector<BatchCriteriaTestData> test_data_table = {
{1350, self_addr, peer_addr, 0, true, false},
{1350, self_addr, peer_addr, 0, true, false},
{1350, self_addr, peer_addr, 0, true, false},
{39, self_addr, peer_addr, 0, true, true},
{39, self_addr, peer_addr, 0, false, true},
{1350, self_addr, peer_addr, 0, false, true},
};
return test_data_table;
}
std::vector<BatchCriteriaTestData> BatchCriteriaTestData_SizeIncrease() {
const QuicIpAddress self_addr;
const QuicSocketAddress peer_addr;
std::vector<BatchCriteriaTestData> test_data_table = {
{1350, self_addr, peer_addr, 0, true, false},
{1350, self_addr, peer_addr, 0, true, false},
{1350, self_addr, peer_addr, 0, true, false},
{1351, self_addr, peer_addr, 0, false, true},
};
return test_data_table;
}
std::vector<BatchCriteriaTestData> BatchCriteriaTestData_AddressChange() {
const QuicIpAddress self_addr1 = QuicIpAddress::Loopback4();
const QuicIpAddress self_addr2 = QuicIpAddress::Loopback6();
const QuicSocketAddress peer_addr1(self_addr1, 666);
const QuicSocketAddress peer_addr2(self_addr1, 777);
const QuicSocketAddress peer_addr3(self_addr2, 666);
const QuicSocketAddress peer_addr4(self_addr2, 777);
std::vector<BatchCriteriaTestData> test_data_table = {
{1350, self_addr1, peer_addr1, 0, true, false},
{1350, self_addr1, peer_addr1, 0, true, false},
{1350, self_addr1, peer_addr1, 0, true, false},
{1350, self_addr2, peer_addr1, 0, false, true},
{1350, self_addr1, peer_addr2, 0, false, true},
{1350, self_addr1, peer_addr3, 0, false, true},
{1350, self_addr1, peer_addr4, 0, false, true},
{1350, self_addr1, peer_addr4, 0, false, true},
};
return test_data_table;
}
std::vector<BatchCriteriaTestData> BatchCriteriaTestData_ReleaseTime1() {
const QuicIpAddress self_addr;
const QuicSocketAddress peer_addr;
std::vector<BatchCriteriaTestData> test_data_table = {
{1350, self_addr, peer_addr, 5, true, false},
{1350, self_addr, peer_addr, 5, true, false},
{1350, self_addr, peer_addr, 5, true, false},
{1350, self_addr, peer_addr, 9, false, true},
};
return test_data_table;
}
std::vector<BatchCriteriaTestData> BatchCriteriaTestData_ReleaseTime2() {
const QuicIpAddress self_addr;
const QuicSocketAddress peer_addr;
std::vector<BatchCriteriaTestData> test_data_table = {
{1350, self_addr, peer_addr, 0, true, false},
{1350, self_addr, peer_addr, 0, true, false},
{1350, self_addr, peer_addr, 0, true, false},
{1350, self_addr, peer_addr, 9, false, true},
};
return test_data_table;
}
std::vector<BatchCriteriaTestData> BatchCriteriaTestData_MaxSegments(
size_t gso_size) {
const QuicIpAddress self_addr;
const QuicSocketAddress peer_addr;
std::vector<BatchCriteriaTestData> test_data_table;
size_t max_segments = TestQuicGsoBatchWriter::MaxSegments(gso_size);
for (size_t i = 0; i < max_segments; ++i) {
bool is_last_in_batch = (i + 1 == max_segments);
test_data_table.push_back({gso_size, self_addr, peer_addr,
0, true, is_last_in_batch});
}
test_data_table.push_back(
{gso_size, self_addr, peer_addr, 0, false, true});
return test_data_table;
}
class QuicGsoBatchWriterTest : public QuicTest {
protected:
WriteResult WritePacket(QuicGsoBatchWriter* writer, size_t packet_size) {
return writer->WritePacket(&packet_buffer_[0], packet_size, self_address_,
peer_address_, nullptr,
QuicPacketWriterParams());
}
WriteResult WritePacketWithParams(QuicGsoBatchWriter* writer,
QuicPacketWriterParams& params) {
return writer->WritePacket(&packet_buffer_[0], 1350, self_address_,
peer_address_, nullptr, params);
}
QuicIpAddress self_address_ = QuicIpAddress::Any4();
QuicSocketAddress peer_address_{QuicIpAddress::Any4(), 443};
char packet_buffer_[1500];
StrictMock<MockQuicSyscallWrapper> mock_syscalls_;
ScopedGlobalSyscallWrapperOverride syscall_override_{&mock_syscalls_};
};
TEST_F(QuicGsoBatchWriterTest, BatchCriteria) {
std::unique_ptr<TestQuicGsoBatchWriter> writer;
std::vector<std::vector<BatchCriteriaTestData>> test_data_tables;
test_data_tables.emplace_back(BatchCriteriaTestData_SizeDecrease());
test_data_tables.emplace_back(BatchCriteriaTestData_SizeIncrease());
test_data_tables.emplace_back(BatchCriteriaTestData_AddressChange());
test_data_tables.emplace_back(BatchCriteriaTestData_ReleaseTime1());
test_data_tables.emplace_back(BatchCriteriaTestData_ReleaseTime2());
test_data_tables.emplace_back(BatchCriteriaTestData_MaxSegments(1));
test_data_tables.emplace_back(BatchCriteriaTestData_MaxSegments(2));
test_data_tables.emplace_back(BatchCriteriaTestData_MaxSegments(1350));
for (size_t i = 0; i < test_data_tables.size(); ++i) {
writer = TestQuicGsoBatchWriter::NewInstanceWithReleaseTimeSupport();
const auto& test_data_table = test_data_tables[i];
for (size_t j = 0; j < test_data_table.size(); ++j) {
const BatchCriteriaTestData& test_data = test_data_table[j];
SCOPED_TRACE(testing::Message() << "i=" << i << ", j=" << j);
QuicPacketWriterParams params;
params.release_time_delay = QuicTime::Delta::FromMicroseconds(
test_data.buffered_write.release_time);
TestQuicGsoBatchWriter::CanBatchResult result = writer->CanBatch(
test_data.buffered_write.buffer, test_data.buffered_write.buf_len,
test_data.buffered_write.self_address,
test_data.buffered_write.peer_address, nullptr, params,
test_data.buffered_write.release_time);
ASSERT_EQ(test_data.can_batch, result.can_batch);
ASSERT_EQ(test_data.must_flush, result.must_flush);
if (result.can_batch) {
ASSERT_TRUE(writer->batch_buffer()
.PushBufferedWrite(
test_data.buffered_write.buffer,
test_data.buffered_write.buf_len,
test_data.buffered_write.self_address,
test_data.buffered_write.peer_address, nullptr,
params, test_data.buffered_write.release_time)
.succeeded);
}
}
}
}
TEST_F(QuicGsoBatchWriterTest, WriteSuccess) {
TestQuicGsoBatchWriter writer(-1);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 1000));
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(1100u, PacketLength(msg));
return 1100;
}));
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 1100), WritePacket(&writer, 100));
ASSERT_EQ(0u, writer.batch_buffer().SizeInUse());
ASSERT_EQ(0u, writer.buffered_writes().size());
}
TEST_F(QuicGsoBatchWriterTest, WriteBlockDataNotBuffered) {
TestQuicGsoBatchWriter writer(-1);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(200u, PacketLength(msg));
errno = EWOULDBLOCK;
return -1;
}));
ASSERT_EQ(WriteResult(WRITE_STATUS_BLOCKED, EWOULDBLOCK),
WritePacket(&writer, 150));
ASSERT_EQ(200u, writer.batch_buffer().SizeInUse());
ASSERT_EQ(2u, writer.buffered_writes().size());
}
TEST_F(QuicGsoBatchWriterTest, WriteBlockDataBuffered) {
TestQuicGsoBatchWriter writer(-1);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(250u, PacketLength(msg));
errno = EWOULDBLOCK;
return -1;
}));
ASSERT_EQ(WriteResult(WRITE_STATUS_BLOCKED_DATA_BUFFERED, EWOULDBLOCK),
WritePacket(&writer, 50));
EXPECT_TRUE(writer.IsWriteBlocked());
ASSERT_EQ(250u, writer.batch_buffer().SizeInUse());
ASSERT_EQ(3u, writer.buffered_writes().size());
}
TEST_F(QuicGsoBatchWriterTest, WriteErrorWithoutDataBuffered) {
TestQuicGsoBatchWriter writer(-1);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(200u, PacketLength(msg));
errno = EPERM;
return -1;
}));
WriteResult error_result = WritePacket(&writer, 150);
ASSERT_EQ(WriteResult(WRITE_STATUS_ERROR, EPERM), error_result);
ASSERT_EQ(3u, error_result.dropped_packets);
ASSERT_EQ(0u, writer.batch_buffer().SizeInUse());
ASSERT_EQ(0u, writer.buffered_writes().size());
}
TEST_F(QuicGsoBatchWriterTest, WriteErrorAfterDataBuffered) {
TestQuicGsoBatchWriter writer(-1);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(250u, PacketLength(msg));
errno = EPERM;
return -1;
}));
WriteResult error_result = WritePacket(&writer, 50);
ASSERT_EQ(WriteResult(WRITE_STATUS_ERROR, EPERM), error_result);
ASSERT_EQ(3u, error_result.dropped_packets);
ASSERT_EQ(0u, writer.batch_buffer().SizeInUse());
ASSERT_EQ(0u, writer.buffered_writes().size());
}
TEST_F(QuicGsoBatchWriterTest, FlushError) {
TestQuicGsoBatchWriter writer(-1);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 0), WritePacket(&writer, 100));
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(200u, PacketLength(msg));
errno = EINVAL;
return -1;
}));
WriteResult error_result = writer.Flush();
ASSERT_EQ(WriteResult(WRITE_STATUS_ERROR, EINVAL), error_result);
ASSERT_EQ(2u, error_result.dropped_packets);
ASSERT_EQ(0u, writer.batch_buffer().SizeInUse());
ASSERT_EQ(0u, writer.buffered_writes().size());
}
TEST_F(QuicGsoBatchWriterTest, ReleaseTime) {
const WriteResult write_buffered(WRITE_STATUS_OK, 0);
auto writer = TestQuicGsoBatchWriter::NewInstanceWithReleaseTimeSupport();
QuicPacketWriterParams params;
EXPECT_TRUE(params.release_time_delay.IsZero());
EXPECT_FALSE(params.allow_burst);
EXPECT_EQ(MillisToNanos(1),
writer->GetReleaseTime(params).actual_release_time);
WriteResult result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
EXPECT_EQ(MillisToNanos(1), writer->buffered_writes().back().release_time);
EXPECT_EQ(result.send_time_offset, QuicTime::Delta::Zero());
params.release_time_delay = QuicTime::Delta::FromMilliseconds(3);
params.allow_burst = true;
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
EXPECT_EQ(MillisToNanos(1), writer->buffered_writes().back().release_time);
EXPECT_EQ(result.send_time_offset, QuicTime::Delta::FromMilliseconds(-3));
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(2700u, PacketLength(msg));
errno = 0;
return 0;
}));
params.release_time_delay = QuicTime::Delta::FromMilliseconds(5);
params.allow_burst = false;
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 2700), result);
EXPECT_EQ(MillisToNanos(6), writer->buffered_writes().back().release_time);
EXPECT_EQ(result.send_time_offset, QuicTime::Delta::Zero());
params.allow_burst = true;
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
EXPECT_EQ(MillisToNanos(6), writer->buffered_writes().back().release_time);
EXPECT_EQ(result.send_time_offset, QuicTime::Delta::Zero());
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
EXPECT_EQ(3000u, PacketLength(msg));
errno = 0;
return 0;
}));
params.allow_burst = true;
EXPECT_EQ(MillisToNanos(6),
writer->GetReleaseTime(params).actual_release_time);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 3000),
writer->WritePacket(&packet_buffer_[0], 300, self_address_,
peer_address_, nullptr, params));
EXPECT_TRUE(writer->buffered_writes().empty());
writer->ForceReleaseTimeMs(2);
params.release_time_delay = QuicTime::Delta::FromMilliseconds(4);
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
EXPECT_EQ(MillisToNanos(6), writer->buffered_writes().back().release_time);
EXPECT_EQ(result.send_time_offset, QuicTime::Delta::Zero());
}
TEST_F(QuicGsoBatchWriterTest, EcnCodepoint) {
const WriteResult write_buffered(WRITE_STATUS_OK, 0);
auto writer = TestQuicGsoBatchWriter::NewInstanceWithReleaseTimeSupport();
QuicPacketWriterParams params;
EXPECT_TRUE(params.release_time_delay.IsZero());
EXPECT_FALSE(params.allow_burst);
params.ecn_codepoint = ECN_ECT0;
WriteResult result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
EXPECT_EQ(MillisToNanos(1), writer->buffered_writes().back().release_time);
EXPECT_EQ(result.send_time_offset, QuicTime::Delta::Zero());
params.allow_burst = true;
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
params.ecn_codepoint = ECN_ECT1;
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
const int kEct0 = 0x02;
EXPECT_EQ(2700u, PacketLength(msg));
msghdr mutable_msg;
memcpy(&mutable_msg, msg, sizeof(*msg));
for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&mutable_msg); cmsg != NULL;
cmsg = CMSG_NXTHDR(&mutable_msg, cmsg)) {
if (cmsg->cmsg_level == IPPROTO_IP && cmsg->cmsg_type == IP_TOS) {
EXPECT_EQ(*reinterpret_cast<int*> CMSG_DATA(cmsg), kEct0);
break;
}
}
errno = 0;
return 0;
}));
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 2700), result);
}
TEST_F(QuicGsoBatchWriterTest, EcnCodepointIPv6) {
const WriteResult write_buffered(WRITE_STATUS_OK, 0);
self_address_ = QuicIpAddress::Any6();
peer_address_ = QuicSocketAddress(QuicIpAddress::Any6(), 443);
auto writer = TestQuicGsoBatchWriter::NewInstanceWithReleaseTimeSupport();
QuicPacketWriterParams params;
EXPECT_TRUE(params.release_time_delay.IsZero());
EXPECT_FALSE(params.allow_burst);
params.ecn_codepoint = ECN_ECT0;
WriteResult result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
EXPECT_EQ(MillisToNanos(1), writer->buffered_writes().back().release_time);
EXPECT_EQ(result.send_time_offset, QuicTime::Delta::Zero());
params.allow_burst = true;
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(write_buffered, result);
params.ecn_codepoint = ECN_ECT1;
EXPECT_CALL(mock_syscalls_, Sendmsg(_, _, _))
.WillOnce(Invoke([](int , const msghdr* msg, int ) {
const int kEct0 = 0x02;
EXPECT_EQ(2700u, PacketLength(msg));
msghdr mutable_msg;
memcpy(&mutable_msg, msg, sizeof(*msg));
for (struct cmsghdr* cmsg = CMSG_FIRSTHDR(&mutable_msg); cmsg != NULL;
cmsg = CMSG_NXTHDR(&mutable_msg, cmsg)) {
if (cmsg->cmsg_level == IPPROTO_IPV6 &&
cmsg->cmsg_type == IPV6_TCLASS) {
EXPECT_EQ(*reinterpret_cast<int*> CMSG_DATA(cmsg), kEct0);
break;
}
}
errno = 0;
return 0;
}));
result = WritePacketWithParams(writer.get(), params);
ASSERT_EQ(WriteResult(WRITE_STATUS_OK, 2700), result);
}
}
}
} |
174 | #ifndef XLA_SERVICE_GPU_CUDNN_SUPPORT_UTILS_H_
#define XLA_SERVICE_GPU_CUDNN_SUPPORT_UTILS_H_
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size);
struct CudnnReorderTransposeConfig {
Shape transpose_shape;
Shape result_shape;
std::vector<int64_t> permutation;
};
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers);
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape);
inline constexpr absl::string_view kWorkspaceAllocationCustomCallTarget =
"__nop";
bool IsWorkspaceAllocationRoot(const HloInstruction& root);
}
}
#endif
#include "xla/service/gpu/cudnn_support_utils.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> CudnnSupportsOptimizedIntegerConvolution(
const se::CudaComputeCapability& compute_capability,
HloCustomCallInstruction& conv, int vector_size) {
TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(&conv));
const Shape& input_shape = conv.operand(0)->shape();
const Shape& kernel_shape = conv.operand(1)->shape();
const Shape& result_shape = conv.shape().tuple_shapes(0);
const auto& dnums = conv.convolution_dimension_numbers();
if (vector_size != 4 && vector_size != 32) {
VLOG(3) << "Unsupported vector size for integer convolution: "
<< vector_size;
return false;
}
if ((vector_size == 32 && !compute_capability.IsAtLeast(7, 5)) ||
!compute_capability.IsAtLeast(6, 1)) {
VLOG(3) << "Compute capability " << compute_capability.ToString()
<< " is not sufficent for int8x" << vector_size
<< " vectorization.";
return false;
}
if (kind != CudnnConvKind::kForward &&
kind != CudnnConvKind::kForwardActivation) {
VLOG(3) << "Convolution kind is not forward or foward-activation: "
<< conv.ToString();
return false;
}
if (!primitive_util::IsIntegralType(input_shape.element_type()) ||
!primitive_util::IsIntegralType(kernel_shape.element_type())) {
VLOG(3) << "Convolution does not accept integer inputs/weights: "
<< conv.ToString();
return false;
}
if (dnums.input_spatial_dimensions().size() != 2 ||
dnums.kernel_spatial_dimensions().size() != 2 ||
dnums.output_spatial_dimensions().size() != 2) {
VLOG(3) << "Convolution is not 2D: " << conv.ToString();
return false;
}
if (vector_size == 32 &&
!primitive_util::IsIntegralType(result_shape.element_type())) {
VLOG(3) << "int8x32 convolutions only support integer output: "
<< conv.ToString();
return false;
}
if (vector_size == 32) {
int64_t W = input_shape.dimensions(dnums.input_spatial_dimensions()[0]);
int64_t H = input_shape.dimensions(dnums.input_spatial_dimensions()[1]);
int64_t R = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[0]);
int64_t S = kernel_shape.dimensions(dnums.kernel_spatial_dimensions()[1]);
const int64_t dilationW = conv.window().dimensions()[0].base_dilation();
const int64_t dilationH = conv.window().dimensions()[1].base_dilation();
if ((W <= (R - 1) * dilationW) || (H <= (S - 1) * dilationH)) {
VLOG(3) << "Conv spatial filter/input dimensions are too small for "
"vecotrized int8x32 convolution: "
<< conv.ToString();
return false;
}
}
if (window_util::HasDilation(conv.window())) {
VLOG(3) << "Vectorized integer convolutions do not support dilation: "
<< conv.ToString();
return false;
}
return true;
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForFilterReordering(
const Shape& shape, const ConvolutionDimensionNumbers& dimension_numbers) {
if (shape.rank() != 4 && shape.rank() != 5) {
return Internal("Filter shape has unexpected rank.");
}
const int64_t dO = dimension_numbers.kernel_output_feature_dimension();
const int64_t dI = dimension_numbers.kernel_input_feature_dimension();
const int64_t dH = dimension_numbers.kernel_spatial_dimensions().at(0);
const int64_t dW = dimension_numbers.kernel_spatial_dimensions().at(1);
bool revectorize = shape.rank() == 5;
const int64_t dZ = revectorize ? 10 - dO - dI - dH - dW : -1;
const int64_t vsize = revectorize ? shape.dimensions(dZ) : 1;
if (shape.dimensions(dO) % 32 != 0 ||
shape.dimensions(dI) % (32 / vsize) != 0 ||
(revectorize && vsize != 4 && vsize != 32)) {
return Internal("Filter shape is not vectorizable.");
}
std::vector<int64_t> output = {
shape.dimensions(dO), shape.dimensions(dI) / (32 / vsize),
shape.dimensions(dH), shape.dimensions(dW), 32};
Shape output_shape = ShapeUtil::MakeShape(shape.element_type(), output);
auto calc_index = [&](int dim) {
bool split_v = vsize == 32;
return (revectorize
? (dI < dim ? 2 - split_v : 0) + (dZ < dim ? 1 + split_v : 0)
: (dI < dim ? 3 : 0)) +
(dO < dim ? 3 : 0) + (dH < dim) + (dW < dim);
};
int idx_O = calc_index(dO);
int idx_I = calc_index(dI);
int idx_H = calc_index(dH);
int idx_W = calc_index(dW);
int idx_Y = vsize == 32 ? calc_index(dZ) : idx_I + 1;
int idx_Z = vsize == 4 ? calc_index(dZ) : vsize == 32 ? idx_Y + 1 : idx_I + 2;
std::vector<int64_t> dims(8);
dims[idx_O] = shape.dimensions(dO) / 8;
dims[idx_O + 1] = 4;
dims[idx_O + 2] = 2;
dims[idx_I] = shape.dimensions(dI) / (32 / vsize);
dims[idx_Y] = 8;
dims[idx_Z] = 4;
dims[idx_H] = shape.dimensions(dH);
dims[idx_W] = shape.dimensions(dW);
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {idx_I, idx_H, idx_W, idx_O,
idx_O + 2, idx_Y, idx_O + 1, idx_Z};
return CudnnReorderTransposeConfig{split_shape, output_shape, permutation};
}
absl::StatusOr<CudnnReorderTransposeConfig>
CudnnInferTransposeForBiasReordering(const Shape& shape) {
if (shape.rank() != 1) {
return Internal("Bias shape has unexpected rank.");
}
if (shape.dimensions(0) % 32 != 0) {
return Internal("Bias shape is not vectorizable.");
}
std::vector<int64_t> dims = {shape.dimensions(0) / 32, 4, 2, 4};
Shape split_shape = ShapeUtil::MakeShape(shape.element_type(), dims);
std::vector<int64_t> permutation = {0, 2, 1, 3};
return CudnnReorderTransposeConfig{split_shape, shape, permutation};
}
bool IsWorkspaceAllocationRoot(const HloInstruction& root) {
return root.IsRoot() && root.opcode() == HloOpcode::kTuple &&
root.operand_count() == 2 &&
root.operand(1)->IsCustomCall(kWorkspaceAllocationCustomCallTarget) &&
root.operand(1)->operand_count() == 0;
}
}
} | #include "xla/service/gpu/cudnn_support_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
class CudnnSupportUtilsTest : public HloTestBase {
public:
absl::StatusOr<HloCustomCallInstruction*> GetCustomCall(
xla::VerifiedHloModule* module, absl::string_view target) {
HloCustomCallInstruction* call = nullptr;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* inst : comp->instructions()) {
if (inst->IsCustomCall(target)) {
VLOG(1) << inst->ToString();
if (call != nullptr) {
return tsl::errors::FailedPrecondition(
"Found more than one custom call.");
}
call = Cast<HloCustomCallInstruction>(inst);
}
}
}
if (call == nullptr) {
return tsl::errors::FailedPrecondition(
"Did not find any matching custom call.");
}
return call;
}
};
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckVectorSize) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 7),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 1),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckComputeCapability) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[8,10,10,128] parameter(0)
filter = s8[2,2,128,128] parameter(1)
ROOT result = (s8[8,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 0}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({6, 1}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 4}, *conv, 32),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedIntegerConvolutionCheckKind) {
auto moduleFwd = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleFwd.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleBwdFilter = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f16[10,20,30,41] parameter(0)
output = f16[10,20,30,40] parameter(1)
result = (f16[2,2,41,40], u8[0]) custom-call(input, output),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardFilter"
ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdFilter.get(), "__cudnn$convBackwardFilter"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleBwdInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
output = f16[10,20,30,40] parameter(0)
filter = f16[2,2,41,40] parameter(1)
result = (f16[10,20,30,41], u8[0]) custom-call(output, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convBackwardInput"
ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleBwdInput.get(), "__cudnn$convBackwardInput"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckTypes) {
auto moduleS8InOut = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InOut.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
auto moduleS8InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleS8InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleF32InF32Out = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = f32[32,10,10,64] parameter(0)
filter = f32[2,2,64,128] parameter(1)
ROOT result = (f32[32,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(
conv, GetCustomCall(moduleF32InF32Out.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDims) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,10,64] parameter(0)
filter = s8[2,2,2,64,128] parameter(1)
ROOT result = (s8[32,10,10,10,128], u8[0]) custom-call(input, filter),
window={size=2x2}, dim_labels=b012f_012io->b012f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckDilation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,10,10,64] parameter(0)
filter = s8[2,2,64,128] parameter(1)
ROOT result = (s8[32,20,20,128], u8[0]) custom-call(input, filter),
window={size=2x2 rhs_dilate=2x2}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(module.get(), "__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(false));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
}
TEST_F(CudnnSupportUtilsTest,
CudnnSupportsOptimizedVectorizedIntegerConvolutionCheckAlgo1Dims) {
auto moduleFilterCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,2,2,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,2,2,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
HloCustomCallInstruction* conv;
TF_ASSERT_OK_AND_ASSIGN(conv, GetCustomCall(moduleFilterCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(false));
auto moduleFilterAlmostCoversInput = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
input = s8[32,3,3,64] parameter(0)
filter = s8[3,3,64,128] parameter(1)
ROOT result = (s8[32,3,3,128], u8[0]) custom-call(input, filter),
window={size=3x3}, dim_labels=b01f_01io->b01f,
custom_call_target="__cudnn$convForward"
})")
.value();
TF_ASSERT_OK_AND_ASSIGN(conv,
GetCustomCall(moduleFilterAlmostCoversInput.get(),
"__cudnn$convForward"));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 4),
IsOkAndHolds(true));
EXPECT_THAT(CudnnSupportsOptimizedIntegerConvolution({7, 5}, *conv, 32),
IsOkAndHolds(true));
}
class ReorderFilterRank4Test : public ::testing::TestWithParam<std::string> {};
TEST_P(ReorderFilterRank4Test, InferTransposeRank4) {
auto input_dims = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[4] = {0, 0, 0, 0};
shape_dims[dI] = 224;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
EXPECT_EQ(inferred_config.permutation[7], inferred_config.permutation[5] + 1);
}
std::vector<std::string> GeneratePermutations(std::string input_dims) {
std::sort(input_dims.begin(), input_dims.end());
std::vector<std::string> permutations;
do {
permutations.push_back(input_dims);
} while (std::next_permutation(input_dims.begin(), input_dims.end()));
return permutations;
}
INSTANTIATE_TEST_SUITE_P(ReorderTestSuite, ReorderFilterRank4Test,
::testing::ValuesIn(GeneratePermutations("01io")));
class ReorderFilterRank5Test
: public ::testing::TestWithParam<std::tuple<std::string, int>> {};
TEST_P(ReorderFilterRank5Test, InferTransposeRank5) {
auto [input_dims, vsize] = GetParam();
size_t dI = input_dims.find('i');
size_t dO = input_dims.find('o');
size_t dH = input_dims.find('0');
size_t dW = input_dims.find('1');
ConvolutionDimensionNumbers dnums;
dnums.set_kernel_input_feature_dimension(dI);
dnums.set_kernel_output_feature_dimension(dO);
dnums.add_kernel_spatial_dimensions(dH);
dnums.add_kernel_spatial_dimensions(dW);
int64_t shape_dims[5] = {vsize, vsize, vsize, vsize, vsize};
shape_dims[dI] = 224 / vsize;
shape_dims[dO] = 96;
shape_dims[dH] = 5;
shape_dims[dW] = 3;
Shape shape = ShapeUtil::MakeShape(U8, absl::MakeSpan(shape_dims));
auto input = HloInstruction::CreateParameter(0, shape, "input");
auto filter = HloInstruction::CreateParameter(1, shape, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForFilterReordering(shape, dnums));
EXPECT_THAT(inferred_config.result_shape.dimensions(),
::testing::ElementsAre(96, 7, 5, 3, 32));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(),
::testing::ElementsAre(7, 5, 3, 12, 2, 8, 4, 4));
EXPECT_EQ(inferred_config.permutation[6], inferred_config.permutation[4] - 1);
}
INSTANTIATE_TEST_SUITE_P(
ReorderTestSuite, ReorderFilterRank5Test,
::testing::Combine(::testing::ValuesIn(GeneratePermutations("01?io")),
::testing::Values(4, 32)));
class ReorderBiasTest : public ::testing::Test {};
TEST_F(ReorderBiasTest, InferTranspose) {
Shape shape = ShapeUtil::MakeShape(U8, {96});
auto bias = HloInstruction::CreateParameter(2, shape, "bias");
Shape unused = ShapeUtil::MakeNil();
auto input = HloInstruction::CreateParameter(0, unused, "input");
auto filter = HloInstruction::CreateParameter(1, unused, "filter");
TF_ASSERT_OK_AND_ASSIGN(CudnnReorderTransposeConfig inferred_config,
CudnnInferTransposeForBiasReordering(shape));
Shape reshaped = ShapeUtil::PermuteDimensions(
inferred_config.permutation, inferred_config.transpose_shape);
EXPECT_THAT(reshaped.dimensions(), ::testing::ElementsAre(3, 2, 4, 4));
EXPECT_EQ(inferred_config.permutation[2], 1);
EXPECT_EQ(inferred_config.permutation[3], 3);
}
}
}
} |
175 | #ifndef TENSORFLOW_LITE_TESTING_MESSAGE_H_
#define TENSORFLOW_LITE_TESTING_MESSAGE_H_
#include <memory>
#include <string>
#include <vector>
namespace tflite {
namespace testing {
class Message {
public:
static bool Read(std::istream* input, Message* message);
Message() {}
virtual ~Message() {}
virtual void SetField(const std::string& name, const std::string& value) {}
virtual Message* AddChild(const std::string& name) { return nullptr; }
virtual void Finish() {}
protected:
Message* Store(Message* n) {
children_.emplace_back(n);
return n;
}
const std::vector<std::unique_ptr<Message>>& Children() const {
return children_;
}
private:
std::vector<std::unique_ptr<Message>> children_;
};
}
}
#endif
#include "tensorflow/lite/testing/message.h"
#include <stack>
#include <string>
#include "tensorflow/lite/testing/tokenize.h"
namespace tflite {
namespace testing {
class MessageStack : public TokenProcessor {
public:
explicit MessageStack(Message* first_node) {
nodes_.push(first_node);
valid_ = true;
}
void ConsumeToken(std::string* token) override {
if (!valid_) return;
Message* current_node = nodes_.top();
if (*token == "{") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
nodes_.push(current_node ? current_node->AddChild(previous_token_)
: nullptr);
previous_token_.clear();
} else if (*token == "}") {
if (nodes_.size() == 1 || !previous_token_.empty()) {
valid_ = false;
return;
}
if (current_node) {
current_node->Finish();
}
nodes_.pop();
} else if (*token == ":") {
if (previous_token_.empty()) {
valid_ = false;
return;
}
} else {
if (previous_token_.empty()) {
previous_token_.swap(*token);
} else {
if (current_node) {
current_node->SetField(previous_token_, *token);
}
previous_token_.clear();
}
}
}
bool valid() const { return valid_; }
private:
std::stack<Message*> nodes_;
std::string previous_token_;
bool valid_;
};
bool Message::Read(std::istream* input, Message* message) {
MessageStack stack(message);
Tokenize(input, &stack);
return stack.valid();
}
}
} | #include "tensorflow/lite/testing/message.h"
#include <map>
#include <string>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
class TestMessage : public Message {
public:
TestMessage() {}
explicit TestMessage(const std::string& text_to_parse) {
std::stringstream ss(text_to_parse);
finished_ = Message::Read(&ss, this);
}
void SetField(const std::string& name, const std::string& value) override {
fields_[name] = value;
}
Message* AddChild(const std::string& name) override {
TestMessage* m = new TestMessage;
m->name_ = name;
return Store(m);
}
void Finish() override { finished_ = true; }
int NumChildren() const { return Children().size(); }
const TestMessage* GetChild(int i) const {
return dynamic_cast<TestMessage*>(Children()[i].get());
}
int NumFields() const { return fields_.size(); }
const std::string& GetField(const std::string& key) const {
return fields_.at(key);
}
const std::string& name() const { return name_; }
bool finished() const { return finished_; }
protected:
std::string name_;
std::map<std::string, std::string> fields_;
bool finished_ = false;
};
TEST(MessageTest, Simple) {
TestMessage message("x{a:1 b:2} y{} z{c:3} d:4");
ASSERT_TRUE(message.finished());
ASSERT_EQ(message.NumFields(), 1);
EXPECT_EQ(message.GetField("d"), "4");
ASSERT_EQ(message.NumChildren(), 3);
auto* x = message.GetChild(0);
EXPECT_EQ(x->name(), "x");
ASSERT_EQ(x->NumFields(), 2);
EXPECT_EQ(x->GetField("a"), "1");
EXPECT_EQ(x->GetField("b"), "2");
auto* y = message.GetChild(1);
EXPECT_EQ(y->name(), "y");
ASSERT_EQ(y->NumFields(), 0);
auto* z = message.GetChild(2);
EXPECT_EQ(z->name(), "z");
ASSERT_EQ(z->NumFields(), 1);
EXPECT_EQ(z->GetField("c"), "3");
}
TEST(MessageTest, Unnamed) {
TestMessage message("x{c:3} {} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, TooManyBraces) {
TestMessage message("x{c:3} } y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 1);
}
TEST(MessageTest, LeftoverToken) {
TestMessage message("x{c:3} z{test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingKey) {
TestMessage message("x{c:3} z{:test} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
TEST(MessageTest, MissingValue) {
TestMessage message("x{c:3} z{test:} y{d:4}");
ASSERT_FALSE(message.finished());
EXPECT_EQ(message.NumChildren(), 2);
}
}
}
} |
176 | #ifndef ABSL_DEBUGGING_STACKTRACE_H_
#define ABSL_DEBUGGING_STACKTRACE_H_
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
extern int GetStackFrames(void** result, int* sizes, int max_depth,
int skip_count);
extern int GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames);
extern int GetStackTrace(void** result, int max_depth, int skip_count);
extern int GetStackTraceWithContext(void** result, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames);
extern void SetStackUnwinder(int (*unwinder)(void** pcs, int* sizes,
int max_depth, int skip_count,
const void* uc,
int* min_dropped_frames));
extern int DefaultStackUnwinder(void** pcs, int* sizes, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames);
namespace debugging_internal {
extern bool StackTraceWorksForTest();
}
ABSL_NAMESPACE_END
}
#endif
#include "absl/debugging/stacktrace.h"
#include <atomic>
#include "absl/base/attributes.h"
#include "absl/base/port.h"
#include "absl/debugging/internal/stacktrace_config.h"
#if defined(ABSL_STACKTRACE_INL_HEADER)
#include ABSL_STACKTRACE_INL_HEADER
#else
# error Cannot calculate stack trace: will need to write for your environment
# include "absl/debugging/internal/stacktrace_aarch64-inl.inc"
# include "absl/debugging/internal/stacktrace_arm-inl.inc"
# include "absl/debugging/internal/stacktrace_emscripten-inl.inc"
# include "absl/debugging/internal/stacktrace_generic-inl.inc"
# include "absl/debugging/internal/stacktrace_powerpc-inl.inc"
# include "absl/debugging/internal/stacktrace_riscv-inl.inc"
# include "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
# include "absl/debugging/internal/stacktrace_win32-inl.inc"
# include "absl/debugging/internal/stacktrace_x86-inl.inc"
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
typedef int (*Unwinder)(void**, int*, int, int, const void*, int*);
std::atomic<Unwinder> custom;
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, int* sizes,
int max_depth, int skip_count,
const void* uc,
int* min_dropped_frames) {
Unwinder f = &UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>;
Unwinder g = custom.load(std::memory_order_acquire);
if (g != nullptr) f = g;
int size = (*f)(result, sizes, max_depth, skip_count + 1, uc,
min_dropped_frames);
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return size;
}
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackFrames(
void** result, int* sizes, int max_depth, int skip_count) {
return Unwind<true, false>(result, sizes, max_depth, skip_count, nullptr,
nullptr);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames) {
return Unwind<true, true>(result, sizes, max_depth, skip_count, uc,
min_dropped_frames);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(
void** result, int max_depth, int skip_count) {
return Unwind<false, false>(result, nullptr, max_depth, skip_count, nullptr,
nullptr);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
GetStackTraceWithContext(void** result, int max_depth, int skip_count,
const void* uc, int* min_dropped_frames) {
return Unwind<false, true>(result, nullptr, max_depth, skip_count, uc,
min_dropped_frames);
}
void SetStackUnwinder(Unwinder w) {
custom.store(w, std::memory_order_release);
}
int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip,
const void* uc, int* min_dropped_frames) {
skip++;
Unwinder f = nullptr;
if (sizes == nullptr) {
if (uc == nullptr) {
f = &UnwindImpl<false, false>;
} else {
f = &UnwindImpl<false, true>;
}
} else {
if (uc == nullptr) {
f = &UnwindImpl<true, false>;
} else {
f = &UnwindImpl<true, true>;
}
}
volatile int x = 0;
int n = (*f)(pcs, sizes, depth, skip, uc, min_dropped_frames);
x = 1; (void) x;
return n;
}
ABSL_NAMESPACE_END
} | #include "absl/debugging/stacktrace.h"
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
namespace {
#if defined(__linux__) && (defined(__x86_64__) || defined(__aarch64__))
ABSL_ATTRIBUTE_NOINLINE void Unwind(void* p) {
ABSL_ATTRIBUTE_UNUSED static void* volatile sink = p;
constexpr int kSize = 16;
void* stack[kSize];
int frames[kSize];
absl::GetStackTrace(stack, kSize, 0);
absl::GetStackFrames(stack, frames, kSize, 0);
}
ABSL_ATTRIBUTE_NOINLINE void HugeFrame() {
char buffer[1 << 20];
Unwind(buffer);
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
}
TEST(StackTrace, HugeFrame) {
HugeFrame();
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
}
#endif
} |
177 | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_UTILS_SIZE_UTILS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_UTILS_SIZE_UTILS_H_
#include <cstdint>
namespace mlir {
namespace TFL {
int32_t ConvertToTfliteSize(int64_t size);
}
}
#endif
#include "tensorflow/compiler/mlir/lite/utils/size_utils.h"
#include <cstdint>
#include "mlir/IR/BuiltinTypes.h"
namespace mlir {
namespace TFL {
int32_t ConvertToTfliteSize(int64_t size) {
return mlir::ShapedType::isDynamic(size) ? -1 : static_cast<int32_t>(size);
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/size_utils.h"
#include "mlir/IR/BuiltinTypes.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
namespace {
TEST(SizeUtilTest, TestConvertsSize) {
ASSERT_EQ(ConvertToTfliteSize(1), 1);
ASSERT_EQ(ConvertToTfliteSize(-1), -1);
ASSERT_EQ(ConvertToTfliteSize(mlir::ShapedType::kDynamic), -1);
}
}
}
} |
178 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_WEIGHTS_CONVERTER_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_WEIGHTS_CONVERTER_H_
#include <string>
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
class ConverterToConvWeights : public GPUOperation {
public:
ConverterToConvWeights(const OperationDef& definition,
const WeightsDescription& weights_desc,
Layout input_layout);
absl::Status BindArguments(ArgumentsBinder* args) override;
int3 GetGridSize() const override;
ConverterToConvWeights(ConverterToConvWeights&& operation) = default;
ConverterToConvWeights& operator=(ConverterToConvWeights&& operation) =
default;
ConverterToConvWeights(const ConverterToConvWeights&) = delete;
ConverterToConvWeights& operator=(const ConverterToConvWeights&) = delete;
private:
std::string GetConverterToConvWeightsCode();
OHWI GetWeightsSize() const;
WeightsDescription weights_desc_;
Layout input_layout_;
};
ConverterToConvWeights CreateConverterToConvWeights(
const OperationDef& definition, const WeightsDescription& weights_desc,
Layout input_layout);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.h"
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
ConverterToConvWeights::ConverterToConvWeights(
const OperationDef& definition, const WeightsDescription& weights_desc,
Layout input_layout)
: GPUOperation(definition),
weights_desc_(weights_desc),
input_layout_(input_layout) {
code_ = GetConverterToConvWeightsCode();
}
std::string ConverterToConvWeights::GetConverterToConvWeightsCode() {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
args_.AddFloat("mask_x");
args_.AddFloat("mask_y");
args_.AddFloat("mask_z");
args_.AddFloat("mask_w");
args_.AddInt("out_ch");
args_.AddInt("out_ch_x4_groups");
args_.AddInt("in_ch");
args_.AddInt("in_ch_x4_groups");
args_.AddInt("kernel_width");
args_.AddInt("kernel_height");
args_.AddInt("kernel_spatial_size");
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
std::vector<int32_t> remap(weights_desc_.spatial_remap.size());
for (int i = 0; i < remap.size(); ++i) {
remap[i] = weights_desc_.spatial_remap[i];
}
BufferDescriptor desc;
desc.element_type = DataType::INT32;
desc.element_size = 1;
desc.memory_type = MemoryType::GLOBAL;
desc.size = remap.size() * sizeof(int32_t);
desc.data.resize(desc.size);
std::memcpy(desc.data.data(), remap.data(), desc.size);
args_.AddObject("spatial_remap",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
std::string c;
c += "MAIN_FUNCTION($0) {\n";
c += " int O = GLOBAL_ID_0;\n";
c += " int I = GLOBAL_ID_1;\n";
c += " int spatial_linear = GLOBAL_ID_2;\n";
c += " if (O >= args.out_ch_x4_groups) return;\n";
c += " if (I >= args.in_ch_x4_groups) return;\n";
c += " if (spatial_linear >= args.kernel_spatial_size) return;\n";
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
c += " int linear_remap = args.spatial_remap.Read(spatial_linear);\n";
c += " int W = linear_remap % args.kernel_width;\n";
c += " int H = linear_remap / args.kernel_width;\n";
} else {
c += " int W = spatial_linear % args.kernel_width;\n";
c += " int H = spatial_linear / args.kernel_width;\n";
}
c += " FLT4 v0 = INIT_FLT4(0.0f);\n";
c += " FLT4 v1 = INIT_FLT4(0.0f);\n";
c += " FLT4 v2 = INIT_FLT4(0.0f);\n";
c += " FLT4 v3 = INIT_FLT4(0.0f);\n";
if (input_layout_ == Layout::OHWI) {
c += " if (O * 4 < args.out_ch) {\n";
c += " v0 = args.src_tensor.Read(W, H, I, O * 4);\n";
c += " }\n";
c += " if (O * 4 + 1 < args.out_ch) {\n";
c += " v1 = args.src_tensor.Read(W, H, I, O * 4 + 1);\n";
c += " }\n";
c += " if (O * 4 + 2 < args.out_ch) {\n";
c += " v2 = args.src_tensor.Read(W, H, I, O * 4 + 2);\n";
c += " }\n";
c += " if (O * 4 + 3 < args.out_ch) {\n";
c += " v3 = args.src_tensor.Read(W, H, I, O * 4 + 3);\n";
c += " }\n";
c += " if (I == args.src_tensor.Slices() - 1) {\n";
c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c += " v0 *= mask;\n";
c += " v1 *= mask;\n";
c += " v2 *= mask;\n";
c += " v3 *= mask;\n";
c += " }\n";
} else if (input_layout_ == Layout::HWIO) {
c += " if (I * 4 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v0 = args.src_tensor.Read(I * 4, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 1 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v1 = args.src_tensor.Read(I * 4 + 1, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 2 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v2 = args.src_tensor.Read(I * 4 + 2, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 3 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v3 = args.src_tensor.Read(I * 4 + 3, W, O, H);\n";
c += " }\n";
c += " if (O == args.src_tensor.Slices() - 1) {\n";
c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c += " v0 *= mask;\n";
c += " v1 *= mask;\n";
c += " v2 *= mask;\n";
c += " v3 *= mask;\n";
c += " }\n";
}
const bool need_transpose =
(input_layout_ == Layout::HWIO && weights_desc_.IsO4I4()) ||
(input_layout_ == Layout::OHWI && weights_desc_.IsI4O4());
if (need_transpose) {
c += " FLT4 r0 = INIT_FLT4v4(v0.x, v1.x, v2.x, v3.x);\n";
c += " FLT4 r1 = INIT_FLT4v4(v0.y, v1.y, v2.y, v3.y);\n";
c += " FLT4 r2 = INIT_FLT4v4(v0.z, v1.z, v2.z, v3.z);\n";
c += " FLT4 r3 = INIT_FLT4v4(v0.w, v1.w, v2.w, v3.w);\n";
} else {
c += " FLT4 r0 = v0;\n";
c += " FLT4 r1 = v1;\n";
c += " FLT4 r2 = v2;\n";
c += " FLT4 r3 = v3;\n";
}
if (weights_desc_.layout ==
WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4 ||
weights_desc_.layout ==
WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4) {
AddDstTensor("dst_tensor0", definition_.dst_tensors[0]);
AddDstTensor("dst_tensor1", definition_.dst_tensors[1]);
AddDstTensor("dst_tensor2", definition_.dst_tensors[2]);
AddDstTensor("dst_tensor3", definition_.dst_tensors[3]);
c += " int yc = spatial_linear * args.in_ch_x4_groups + I;\n";
c += " args.dst_tensor0.Write2D(r0, O, yc);\n";
c += " args.dst_tensor1.Write2D(r1, O, yc);\n";
c += " args.dst_tensor2.Write2D(r2, O, yc);\n";
c += " args.dst_tensor3.Write2D(r3, O, yc);\n";
c += "}\n";
} else {
AddDstTensor("dst_tensor", definition_.dst_tensors[0]);
c += " int OUTPUT_GROUP_SIZE = " +
std::to_string(weights_desc_.GetOutputGroupSize()) + ";\n";
c += " int d_index = (O * 4) / (OUTPUT_GROUP_SIZE * 4);\n";
c += " int k_index = ((O * 4) % (OUTPUT_GROUP_SIZE * 4)) / 4;\n";
std::string index;
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
index =
"(d_index * args.in_ch_x4_groups + I) * args.kernel_spatial_size + "
"spatial_linear";
} else if (weights_desc_.layout == WeightsLayout::kOSpatialIOGroupI4O4 ||
weights_desc_.layout == WeightsLayout::kOSpatialIOGroupO4I4) {
index =
"(d_index * args.kernel_spatial_size + spatial_linear) * "
"args.in_ch_x4_groups + I";
}
c += " int dst_offset = (" + index + ") * OUTPUT_GROUP_SIZE + k_index;\n";
c += " args.dst_tensor.WriteLinear(r0, dst_offset * 4 + 0);\n";
c += " args.dst_tensor.WriteLinear(r1, dst_offset * 4 + 1);\n";
c += " args.dst_tensor.WriteLinear(r2, dst_offset * 4 + 2);\n";
c += " args.dst_tensor.WriteLinear(r3, dst_offset * 4 + 3);\n";
c += "}\n";
}
return c;
}
OHWI ConverterToConvWeights::GetWeightsSize() const {
int output_channels = 0;
int input_channels = 0;
int kernel_width = 0;
int kernel_height = 0;
if (input_layout_ == Layout::HWIO) {
output_channels = src_[0]->Channels();
input_channels = src_[0]->Width();
kernel_width = src_[0]->Height();
kernel_height = src_[0]->Batch();
} else if (input_layout_ == Layout::OHWI) {
output_channels = src_[0]->Batch();
input_channels = src_[0]->Channels();
kernel_width = src_[0]->Width();
kernel_height = src_[0]->Height();
}
return OHWI(output_channels, kernel_height, kernel_width, input_channels);
}
absl::Status ConverterToConvWeights::BindArguments(ArgumentsBinder* args) {
const auto& weights_shape = GetWeightsSize();
const int output_channels_x4_groups = DivideRoundUp(
AlignByN(weights_shape.o, 4 * weights_desc_.GetOutputGroupSize()), 4);
RETURN_IF_ERROR(args->SetInt("out_ch", weights_shape.o));
RETURN_IF_ERROR(args->SetInt("out_ch_x4_groups", output_channels_x4_groups));
RETURN_IF_ERROR(args->SetInt("in_ch", weights_shape.i));
RETURN_IF_ERROR(
args->SetInt("in_ch_x4_groups", DivideRoundUp(weights_shape.i, 4)));
RETURN_IF_ERROR(args->SetInt("kernel_width", weights_shape.w));
RETURN_IF_ERROR(args->SetInt("kernel_height", weights_shape.h));
RETURN_IF_ERROR(
args->SetInt("kernel_spatial_size", weights_shape.w * weights_shape.h));
float4 mask = GetMaskForLastPlane(src_[0]->Channels());
RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x));
RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y));
RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z));
return args->SetFloat("mask_w", mask.w);
}
int3 ConverterToConvWeights::GetGridSize() const {
const auto& weights_shape = GetWeightsSize();
const int out_group_size = weights_desc_.GetOutputGroupSize();
const int grid_x =
DivideRoundUp(AlignByN(weights_shape.o, 4 * out_group_size), 4);
const int grid_y = DivideRoundUp(weights_shape.i, 4);
const int grid_z = weights_shape.w * weights_shape.h;
return int3(grid_x, grid_y, grid_z);
}
ConverterToConvWeights CreateConverterToConvWeights(
const OperationDef& definition, const WeightsDescription& weights_desc,
Layout input_layout) {
return ConverterToConvWeights(definition, weights_desc, input_layout);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4) {
const auto status = ConverterToConvWeights1x1OutX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4Unaligned) {
const auto status = ConverterToConvWeights1x1OutX4UnalignedTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX2) {
const auto status = ConverterToConvWeights1x1OutX2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeightsOutX2) {
const auto status = ConverterToConvWeightsOutX2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvTransposedWeights4x4) {
const auto status = ConverterToConvTransposedWeights4x4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights4xTextures) {
const auto status = ConverterToConvWeights4xTexturesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} |
179 | #ifndef I18N_ADDRESSINPUT_VALIDATING_STORAGE_H_
#define I18N_ADDRESSINPUT_VALIDATING_STORAGE_H_
#include <libaddressinput/storage.h>
#include <memory>
#include <string>
namespace i18n {
namespace addressinput {
class ValidatingStorage : public Storage {
public:
ValidatingStorage(const ValidatingStorage&) = delete;
ValidatingStorage& operator=(const ValidatingStorage&) = delete;
explicit ValidatingStorage(Storage* storage);
~ValidatingStorage() override;
void Put(const std::string& key, std::string* data) override;
void Get(const std::string& key, const Callback& data_ready) const override;
private:
std::unique_ptr<Storage> wrapped_storage_;
};
}
}
#endif
#include "validating_storage.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/storage.h>
#include <cassert>
#include <cstddef>
#include <ctime>
#include <memory>
#include <string>
#include "validating_util.h"
namespace i18n {
namespace addressinput {
namespace {
class Helper {
public:
Helper(const Helper&) = delete;
Helper& operator=(const Helper&) = delete;
Helper(const std::string& key,
const ValidatingStorage::Callback& data_ready,
const Storage& wrapped_storage)
: data_ready_(data_ready),
wrapped_data_ready_(BuildCallback(this, &Helper::OnWrappedDataReady)) {
wrapped_storage.Get(key, *wrapped_data_ready_);
}
private:
~Helper() = default;
void OnWrappedDataReady(bool success,
const std::string& key,
std::string* data) {
if (success) {
assert(data != nullptr);
bool is_stale =
!ValidatingUtil::UnwrapTimestamp(data, std::time(nullptr));
bool is_corrupted = !ValidatingUtil::UnwrapChecksum(data);
success = !is_corrupted && !is_stale;
if (is_corrupted) {
delete data;
data = nullptr;
}
} else {
delete data;
data = nullptr;
}
data_ready_(success, key, data);
delete this;
}
const Storage::Callback& data_ready_;
const std::unique_ptr<const Storage::Callback> wrapped_data_ready_;
};
}
ValidatingStorage::ValidatingStorage(Storage* storage)
: wrapped_storage_(storage) {
assert(wrapped_storage_ != nullptr);
}
ValidatingStorage::~ValidatingStorage() = default;
void ValidatingStorage::Put(const std::string& key, std::string* data) {
assert(data != nullptr);
ValidatingUtil::Wrap(std::time(nullptr), data);
wrapped_storage_->Put(key, data);
}
void ValidatingStorage::Get(const std::string& key,
const Callback& data_ready) const {
new Helper(key, data_ready, *wrapped_storage_);
}
}
} | #include "validating_storage.h"
#include <libaddressinput/callback.h>
#include <libaddressinput/storage.h>
#include <cstddef>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "fake_storage.h"
#define CHECKSUM "dd63dafcbd4d5b28badfcaf86fb6fcdb"
#define DATA "{'foo': 'bar'}"
#define OLD_TIMESTAMP "0"
namespace {
using i18n::addressinput::BuildCallback;
using i18n::addressinput::FakeStorage;
using i18n::addressinput::Storage;
using i18n::addressinput::ValidatingStorage;
const char kKey[] = "key";
const char kValidatedData[] = DATA;
const char kStaleWrappedData[] = "timestamp=" OLD_TIMESTAMP "\n"
"checksum=" CHECKSUM "\n"
DATA;
const char kEmptyData[] = "";
class ValidatingStorageTest : public testing::Test {
public:
ValidatingStorageTest(const ValidatingStorageTest&) = delete;
ValidatingStorageTest& operator=(const ValidatingStorageTest&) = delete;
protected:
ValidatingStorageTest()
: wrapped_storage_(new FakeStorage),
storage_(wrapped_storage_),
success_(false),
key_(),
data_(),
data_ready_(BuildCallback(this, &ValidatingStorageTest::OnDataReady)) {}
Storage* const wrapped_storage_;
ValidatingStorage storage_;
bool success_;
std::string key_;
std::string data_;
const std::unique_ptr<const ValidatingStorage::Callback> data_ready_;
private:
void OnDataReady(bool success, const std::string& key, std::string* data) {
ASSERT_FALSE(success && data == nullptr);
success_ = success;
key_ = key;
if (data != nullptr) {
data_ = *data;
delete data;
}
}
};
TEST_F(ValidatingStorageTest, GoodData) {
storage_.Put(kKey, new std::string(kValidatedData));
storage_.Get(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kValidatedData, data_);
}
TEST_F(ValidatingStorageTest, EmptyData) {
storage_.Put(kKey, new std::string(kEmptyData));
storage_.Get(kKey, *data_ready_);
EXPECT_TRUE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kEmptyData, data_);
}
TEST_F(ValidatingStorageTest, MissingKey) {
storage_.Get(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(data_.empty());
}
TEST_F(ValidatingStorageTest, GarbageData) {
storage_.Put(kKey, new std::string(kValidatedData));
wrapped_storage_->Put(kKey, new std::string("garbage"));
storage_.Get(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_TRUE(data_.empty());
}
TEST_F(ValidatingStorageTest, StaleData) {
storage_.Put(kKey, new std::string(kValidatedData));
wrapped_storage_->Put(kKey, new std::string(kStaleWrappedData));
storage_.Get(kKey, *data_ready_);
EXPECT_FALSE(success_);
EXPECT_EQ(kKey, key_);
EXPECT_EQ(kValidatedData, data_);
}
} |
180 | #ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_IO_OPS_H_
#define TENSORFLOW_C_EXPERIMENTAL_OPS_IO_OPS_H_
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
namespace tensorflow {
namespace ops {
Status RestoreV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle*> tensors,
absl::Span<DataType> dtypes, const char* name = nullptr,
const char* raw_device_name = nullptr);
Status SaveV2(AbstractContext* ctx, AbstractTensorHandle* const prefix,
AbstractTensorHandle* const tensor_names,
AbstractTensorHandle* const shape_and_slices,
absl::Span<AbstractTensorHandle* const> tensors,
const char* name = nullptr,
const char* raw_device_name = nullptr);
}
}
#endif
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/util/saved_tensor_slice_util.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status ScalarInputsAndOutputs(InferenceContext* c) {
ShapeHandle unused;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 0, &unused));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementVectorAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementOutput(InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
}
}
REGISTER_OP("SaveV2")
.Input("prefix: string")
.Input("tensor_names: string")
.Input("shape_and_slices: string")
.Input("tensors: dtypes")
.Attr("dtypes: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
for (int i = 1; i <= 2; ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &s));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(s, 0), c->num_inputs() - 3, &unused_dim));
}
return absl::OkStatus();
});
REGISTER_OP("RestoreV2")
.Input("prefix: string")
.Input("tensor_names: string")
.Input("shape_and_slices: string")
.Output("tensors: dtypes")
.Attr("dtypes: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle shape0, shape1, shape2;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &shape0));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &shape1));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &shape2));
TF_RETURN_IF_ERROR(c->Merge(shape1, shape2, &shape0));
const Tensor* shape_and_slices_tensor = c->input_tensor(2);
if (shape_and_slices_tensor) {
if (shape_and_slices_tensor->dtype() != DT_STRING) {
return errors::InvalidArgument(
"Expected an input tensor of type string.");
}
const auto& shape_and_slices_flat =
shape_and_slices_tensor->flat<tstring>();
if (shape_and_slices_flat.size() != c->num_outputs()) {
return errors::InvalidArgument(
"The number of shape_and_slice doesn't match tensor outputs.");
}
for (int i = 0; i < shape_and_slices_flat.size(); ++i) {
const string& shape_and_slice = shape_and_slices_flat(i);
if (shape_and_slice.empty()) {
c->set_output(i, c->UnknownShape());
continue;
}
TensorShape parsed_full_shape;
TensorSlice parsed_slice;
TensorShape parsed_slice_shape;
TF_RETURN_IF_ERROR(checkpoint::ParseShapeAndSlice(
shape_and_slice, &parsed_full_shape, &parsed_slice,
&parsed_slice_shape));
ShapeHandle shape_handle;
TF_RETURN_IF_ERROR(
c->MakeShapeFromTensorShape(parsed_slice_shape, &shape_handle));
c->set_output(i, shape_handle);
}
return absl::OkStatus();
} else {
return UnknownShape(c);
}
});
REGISTER_OP("MergeV2Checkpoints")
.Input("checkpoint_prefixes: string")
.Input("destination_prefix: string")
.Attr("delete_old_dirs: bool = true")
.Attr("allow_missing_files: bool = false")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("Save")
.Input("filename: string")
.Input("tensor_names: string")
.Input("data: T")
.Attr("T: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &s));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(s, 0), c->num_inputs() - 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("SaveSlices")
.Input("filename: string")
.Input("tensor_names: string")
.Input("shapes_and_slices: string")
.Input("data: T")
.Attr("T: list(type)")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle s;
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
for (int i = 1; i <= 2; ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &s));
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(s, 0), c->num_inputs() - 3, &unused_dim));
}
return absl::OkStatus();
});
REGISTER_OP("Restore")
.Input("file_pattern: string")
.Input("tensor_name: string")
.Output("tensor: dt")
.Attr("dt: type")
.Attr("preferred_shard: int = -1")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
REGISTER_OP("RestoreSlice")
.Input("file_pattern: string")
.Input("tensor_name: string")
.Input("shape_and_slice: string")
.Output("tensor: dt")
.Attr("dt: type")
.Attr("preferred_shard: int = -1")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
const Tensor* shape_and_slices_tensor = c->input_tensor(2);
if (shape_and_slices_tensor) {
const auto& shape_and_slice =
shape_and_slices_tensor->flat<tstring>()(0);
if (shape_and_slice.empty()) {
c->set_output(0, c->UnknownShape());
} else {
TensorShape parsed_full_shape;
TensorSlice parsed_slice;
TensorShape parsed_slice_shape;
TF_RETURN_IF_ERROR(checkpoint::ParseShapeAndSlice(
shape_and_slice, &parsed_full_shape, &parsed_slice,
&parsed_slice_shape));
ShapeHandle shape_handle;
TF_RETURN_IF_ERROR(
c->MakeShapeFromTensorShape(parsed_slice_shape, &shape_handle));
c->set_output(0, shape_handle);
}
} else {
c->set_output(0, c->UnknownShape());
}
return absl::OkStatus();
});
REGISTER_OP("ShardedFilename")
.Input("basename: string")
.Input("shard: int32")
.Input("num_shards: int32")
.Output("filename: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ShardedFilespec")
.Input("basename: string")
.Input("num_shards: int32")
.Output("filename: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("WholeFileReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("WholeFileReaderV2")
.Output("reader_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("TextLineReader")
.Output("reader_handle: Ref(string)")
.Attr("skip_header_lines: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use TextLineReaderV2");
REGISTER_OP("TextLineReaderV2")
.Output("reader_handle: resource")
.Attr("skip_header_lines: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("FixedLengthRecordReader")
.Output("reader_handle: Ref(string)")
.Attr("header_bytes: int = 0")
.Attr("record_bytes: int")
.Attr("footer_bytes: int = 0")
.Attr("hop_bytes: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use FixedLengthRecordReaderV2");
REGISTER_OP("FixedLengthRecordReaderV2")
.Output("reader_handle: resource")
.Attr("header_bytes: int = 0")
.Attr("record_bytes: int")
.Attr("footer_bytes: int = 0")
.Attr("hop_bytes: int = 0")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("encoding: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("TFRecordReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("compression_type: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use TFRecordReaderV2");
REGISTER_OP("TFRecordReaderV2")
.Output("reader_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("compression_type: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("LMDBReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("IdentityReader")
.Output("reader_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(TwoElementOutput)
.Deprecated(26, "Use IdentityReaderV2");
REGISTER_OP("IdentityReaderV2")
.Output("reader_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("ReaderRead")
.Input("reader_handle: Ref(string)")
.Input("queue_handle: Ref(string)")
.Output("key: string")
.Output("value: string")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderReadV2")
.Input("reader_handle: resource")
.Input("queue_handle: resource")
.Output("key: string")
.Output("value: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderReadUpTo")
.Input("reader_handle: Ref(string)")
.Input("queue_handle: Ref(string)")
.Input("num_records: int64")
.Output("keys: string")
.Output("values: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
ShapeHandle out = c->Vector(InferenceContext::kUnknownDim);
c->set_output(0, out);
c->set_output(1, out);
return absl::OkStatus();
});
REGISTER_OP("ReaderReadUpToV2")
.Input("reader_handle: resource")
.Input("queue_handle: resource")
.Input("num_records: int64")
.Output("keys: string")
.Output("values: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused));
ShapeHandle out = c->Vector(InferenceContext::kUnknownDim);
c->set_output(0, out);
c->set_output(1, out);
return absl::OkStatus();
});
REGISTER_OP("ReaderNumRecordsProduced")
.Input("reader_handle: Ref(string)")
.Output("records_produced: int64")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderNumRecordsProducedV2")
.Input("reader_handle: resource")
.Output("records_produced: int64")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderNumWorkUnitsCompleted")
.Input("reader_handle: Ref(string)")
.Output("units_completed: int64")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderNumWorkUnitsCompletedV2")
.Input("reader_handle: resource")
.Output("units_completed: int64")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderSerializeState")
.Input("reader_handle: Ref(string)")
.Output("state: string")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderSerializeStateV2")
.Input("reader_handle: resource")
.Output("state: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReaderRestoreState")
.Input("reader_handle: Ref(string)")
.Input("state: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused));
DimensionHandle unused_handle;
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(c->input(0), 0), 2, &unused_handle));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ReaderRestoreStateV2")
.Input("reader_handle: resource")
.Input("state: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("ReaderReset")
.Input("reader_handle: Ref(string)")
.SetShapeFn(TwoElementVectorAndScalarOutputs);
REGISTER_OP("ReaderResetV2")
.Input("reader_handle: resource")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("ReadFile")
.Input("filename: string")
.Output("contents: string")
.SetShapeFn(ScalarInputsAndOutputs);
REGISTER_OP("WriteFile")
.Input("filename: string")
.Input("contents: string")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused));
return absl::OkStatus();
});
REGISTER_OP("MatchingFiles")
.Input("pattern: string")
.Output("filenames: string")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(0), 1, &unused));
c->set_output(0, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(IoOpsTest, Save_ShapeFn) {
ShapeInferenceTestOp op("Save");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({{"c", 0, DT_FLOAT}, {"d", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "");
INFER_OK(op, "[];[2];?;?", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?");
}
TEST(IoOpsTest, SaveSlices_ShapeFn) {
ShapeInferenceTestOp op("SaveSlices");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"a", 0, DT_STRING})
.Input({"b", 0, DT_STRING})
.Input({"c", 0, DT_STRING})
.Input({{"d", 0, DT_FLOAT}, {"e", 0, DT_INT64}})
.Attr("T", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?;?", "");
INFER_OK(op, "[];[2];[2];?;?", "");
INFER_OK(op, "[];[2];[2];[100,200,300];[4,5]", "");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2,3];?;?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];?;?;?");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[2];[2,3];?;?");
INFER_ERROR("Dimension must be 2 but is 3", op, "[];[2];[3];?;?");
}
TEST(IoOpsTest, Restore_ShapeFn) {
ShapeInferenceTestOp op("Restore");
INFER_OK(op, "?;?", "?");
INFER_OK(op, "[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, RestoreV2_ShapeFn) {
ShapeInferenceTestOp op("RestoreV2");
TF_ASSERT_OK(NodeDefBuilder("test", op.name)
.Input({"prefix", 0, DT_STRING})
.Input({"tensor_names", 0, DT_STRING})
.Input({"shapes_and_slices", 0, DT_STRING})
.Attr("dtypes", {DT_FLOAT, DT_INT64})
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?", "?;?");
INFER_OK(op, "[];[10];[10]", "?;?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?,?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[];[?];[?,?]");
INFER_ERROR("in both shapes must be equal", op, "[];[10];[20]");
}
TEST(IoOpsTest, RestoreSlice_ShapeFn) {
ShapeInferenceTestOp op("RestoreSlice");
INFER_OK(op, "?;?;?", "?");
INFER_OK(op, "[];[];[]", "?");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilename_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilename");
INFER_OK(op, "?;?;?", "[]");
INFER_OK(op, "[];[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[];[?]");
}
TEST(IoOpsTest, ShardedFilespec_ShapeFn) {
ShapeInferenceTestOp op("ShardedFilespec");
INFER_OK(op, "?;?", "[]");
INFER_OK(op, "[];[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[?]");
}
TEST(IoOpsTest, SingleScalarInputAndOutput_ShapeFns) {
for (const char* op_name : {"ReadFile"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[]", "[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?]");
}
}
TEST(IoOpsTest, TwoElementVectorInputsAndScalarOutput_ShapeFns) {
for (const char* op_name :
{"ReaderNumRecordsProduced", "ReaderNumWorkUnitsCompleted",
"ReaderSerializeState"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?", "[]");
INFER_OK(op, "[2]", "[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
INFER_ERROR("Dimension must be 2 but is 3", op, "[3]");
}
}
TEST(IoOpsTest, ReaderRead_ShapeFn) {
ShapeInferenceTestOp op("ReaderRead");
INFER_OK(op, "?;?", "[];[]");
INFER_OK(op, "[2];[?]", "[];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[?,?];[2]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[]");
}
TEST(IoOpsTest, ReaderReadUpTo_ShapeFn) {
ShapeInferenceTestOp op("ReaderReadUpTo");
INFER_OK(op, "[2];[2];[]", "[?];[?]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[2];[]");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2];[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[2];[2];[?]");
}
TEST(IoOpsTest, ReaderReset_ShapeFn) {
ShapeInferenceTestOp op("ReaderReset");
INFER_OK(op, "[2]", "");
INFER_OK(op, "[?]", "");
INFER_OK(op, "?", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[]");
}
TEST(IoOpsTest, ReaderRestoreState_ShapeFn) {
ShapeInferenceTestOp op("ReaderRestoreState");
INFER_OK(op, "?;?", "");
INFER_OK(op, "[2];[]", "");
INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];[?]");
}
TEST(IoOpsTest, MatchingFiles_ShapeFn) {
ShapeInferenceTestOp op("MatchingFiles");
INFER_OK(op, "?", "[?]");
INFER_OK(op, "[]", "[?]");
INFER_OK(op, "[42]", "[?]");
INFER_ERROR("Shape must be at most rank 1 but is rank 2", op, "[?,?]");
}
} |
181 | #ifndef XLA_SERVICE_OPERAND_UPCASTER_H_
#define XLA_SERVICE_OPERAND_UPCASTER_H_
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
#include "xla/util.h"
namespace xla {
class OperandUpcaster : public OpExpanderPass {
public:
explicit OperandUpcaster(HloPredicate extra_filter = nullptr)
: OpExpanderPass(std::move(extra_filter)) {}
absl::string_view name() const override { return "operand_upcaster"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/operand_upcaster.h"
#include <optional>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
absl::StatusOr<std::optional<Shape>> MaybeInferShape(
const HloInstruction* instruction) {
switch (instruction->opcode()) {
case HloOpcode::kDot:
return ShapeInference::InferDotOpShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->dot_dimension_numbers(),
std::nullopt,
Cast<HloDotInstruction>(instruction)->sparsity());
case HloOpcode::kConvolution:
return ShapeInference::InferConvolveShape(
instruction->operand(0)->shape(), instruction->operand(1)->shape(),
instruction->feature_group_count(), instruction->batch_group_count(),
instruction->window(), instruction->convolution_dimension_numbers(),
std::nullopt);
default:
return std::optional<Shape>(std::nullopt);
}
}
}
bool OperandUpcaster::InstructionMatchesPattern(HloInstruction* instruction) {
auto status_or_inferred_shape = MaybeInferShape(instruction);
if (!status_or_inferred_shape.ok() ||
!status_or_inferred_shape->has_value()) {
return false;
}
if (absl::c_count(instruction->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE) == 2) {
return true;
}
PrimitiveType inferred_type = (*status_or_inferred_shape)->element_type();
if (instruction->shape().element_type() == inferred_type &&
instruction->operand(0)->shape().element_type() == inferred_type &&
instruction->operand(1)->shape().element_type() == inferred_type) {
return false;
}
return ShapeUtil::ElementCanUpcast(**status_or_inferred_shape,
instruction->shape());
}
absl::StatusOr<HloInstruction*> OperandUpcaster::ExpandInstruction(
HloInstruction* instruction) {
const bool packed_nibble =
absl::c_count(instruction->precision_config().operand_precision(),
PrecisionConfig::PACKED_NIBBLE) == 2;
auto type = instruction->shape().element_type();
if (packed_nibble) {
HloInstruction *lhs_n0 = instruction->mutable_operand(0), *lhs_n1 = lhs_n0,
*rhs_n0 = instruction->mutable_operand(1), *rhs_n1 = rhs_n0;
TF_ASSIGN_OR_RETURN(lhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, lhs_n0,
MakeScalarLike(lhs_n0, 4)));
HloOpcode lhs_shift = ShapeUtil::ElementIsSigned(lhs_n0->shape())
? HloOpcode::kShiftRightArithmetic
: HloOpcode::kShiftRightLogical;
TF_ASSIGN_OR_RETURN(
lhs_n0, MakeBinaryHlo(lhs_shift, lhs_n0, MakeScalarLike(lhs_n0, 4)));
lhs_n0 = MakeConvertToHlo(lhs_n0, type);
TF_ASSIGN_OR_RETURN(
lhs_n1, MakeBinaryHlo(lhs_shift, lhs_n1, MakeScalarLike(lhs_n1, 4)));
lhs_n1 = MakeConvertToHlo(lhs_n1, type);
TF_ASSIGN_OR_RETURN(rhs_n0, MakeBinaryHlo(HloOpcode::kShiftLeft, rhs_n0,
MakeScalarLike(rhs_n0, 4)));
HloOpcode rhs_shift = ShapeUtil::ElementIsSigned(rhs_n0->shape())
? HloOpcode::kShiftRightArithmetic
: HloOpcode::kShiftRightLogical;
TF_ASSIGN_OR_RETURN(
rhs_n0, MakeBinaryHlo(rhs_shift, rhs_n0, MakeScalarLike(rhs_n0, 4)));
rhs_n0 = MakeConvertToHlo(rhs_n0, type);
TF_ASSIGN_OR_RETURN(
rhs_n1, MakeBinaryHlo(rhs_shift, rhs_n1, MakeScalarLike(rhs_n1, 4)));
rhs_n1 = MakeConvertToHlo(rhs_n1, type);
HloInstruction* linear_n0 =
instruction->parent()->AddInstruction(instruction->CloneWithNewOperands(
instruction->shape(), {lhs_n0, rhs_n0}));
linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(
0, PrecisionConfig::DEFAULT);
linear_n0->mutable_precision_config()->mutable_operand_precision()->Set(
1, PrecisionConfig::DEFAULT);
HloInstruction* linear_n1 =
instruction->parent()->AddInstruction(linear_n0->CloneWithNewOperands(
instruction->shape(), {lhs_n1, rhs_n1}));
return MakeBinaryHlo(HloOpcode::kAdd, linear_n0, linear_n1);
}
for (int i = 0; i < HloDotInstruction::kOperands; ++i) {
auto* operand = instruction->mutable_operand(i);
if (operand->shape().element_type() == type) {
continue;
}
auto upcast_shape = operand->shape();
upcast_shape.set_element_type(type);
auto* convert_inst = instruction->AddInstruction(
HloInstruction::CreateConvert(upcast_shape, operand));
TF_RETURN_IF_ERROR(
instruction->ReplaceOperandWithDifferentShape(i, convert_inst));
}
return nullptr;
}
} | #include "xla/service/operand_upcaster.h"
#include <memory>
#include <tuple>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class OperandUpcasterTest
: public HloTestBase,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>> {};
bool ShouldUpcast(PrimitiveType operand_type, PrimitiveType result_type) {
return operand_type != result_type &&
primitive_util::HigherPrecisionType(operand_type, result_type) ==
result_type;
}
TEST_P(OperandUpcasterTest, ConvertInserted) {
PrimitiveType lhs_type, rhs_type, result_type;
std::tie(lhs_type, rhs_type, result_type) = GetParam();
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));
EXPECT_EQ(upcasted, ShouldUpcast(lhs_type, result_type) ||
ShouldUpcast(rhs_type, result_type));
auto original_lhs = op::Parameter(0);
auto original_rhs = op::Parameter(1);
auto upcasted_lhs =
ShouldUpcast(lhs_type, result_type)
? AllOf(op::Convert(original_lhs),
op::Shape(absl::Substitute(
"$0[2,3]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type))))
: original_lhs;
auto upcasted_rhs =
ShouldUpcast(rhs_type, result_type)
? AllOf(op::Convert(original_rhs),
op::Shape(absl::Substitute(
"$0[3,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type))))
: original_rhs;
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(upcasted_lhs, upcasted_rhs),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
}
INSTANTIATE_TEST_SUITE_P(S16U16, OperandUpcasterTest,
::testing::Values(std::make_tuple(S8, S8, S16),
std::make_tuple(U8, U8, U16)));
INSTANTIATE_TEST_SUITE_P(S32, OperandUpcasterTest,
::testing::Combine(::testing::Values(S8, U8, S16),
::testing::Values(S8, U8, S16),
::testing::Values(S32)));
INSTANTIATE_TEST_SUITE_P(U32, OperandUpcasterTest,
::testing::Combine(::testing::Values(U8, U16),
::testing::Values(U8, U16),
::testing::Values(U32)));
INSTANTIATE_TEST_SUITE_P(BF16, OperandUpcasterTest,
::testing::Combine(::testing::Values(BF16, S8, U8),
::testing::Values(BF16, S8, U8),
::testing::Values(BF16)));
INSTANTIATE_TEST_SUITE_P(F32, OperandUpcasterTest,
::testing::Combine(::testing::Values(BF16, F16),
::testing::Values(BF16, F16),
::testing::Values(F32)));
INSTANTIATE_TEST_SUITE_P(NoUpcast, OperandUpcasterTest,
::testing::Values(std::make_tuple(F32, F32, BF16),
std::make_tuple(S32, S32, U32)));
TEST_F(OperandUpcasterTest, SparseDot) {
absl::string_view kHlo = R"(
HloModule module
ENTRY main {
p0 = bf16[2,16]{1,0} parameter(0)
p1 = bf16[32,2]{1,0} parameter(1)
meta = u16[2,2]{1,0} parameter(2)
ROOT dot = f32[2,2]{1,0} dot(p0, p1, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHlo));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted, OperandUpcaster().Run(module.get()));
EXPECT_TRUE(upcasted);
auto upcasted_lhs =
AllOf(op::Convert(op::Parameter(0)), op::Shape("f32[2,16]{1,0}"));
auto upcasted_rhs =
AllOf(op::Convert(op::Parameter(1)), op::Shape("f32[32,2]{1,0}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(::testing::MakeMatcher(new ::xla::testing::HloMatcher(
HloOpcode::kDot,
{upcasted_lhs, upcasted_rhs, op::Parameter(2)})),
op::Shape("f32[2,2]{1,0}")));
}
}
} |
182 | #ifndef XLA_MLIR_UTILS_ERROR_UTIL_H_
#define XLA_MLIR_UTILS_ERROR_UTIL_H_
#include <string>
#include "absl/status/status.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
namespace mlir {
class BaseScopedDiagnosticHandler : public SourceMgrDiagnosticHandler {
public:
explicit BaseScopedDiagnosticHandler(MLIRContext* context,
bool propagate = false,
bool filter_stack = false);
~BaseScopedDiagnosticHandler();
bool ok() const;
absl::Status ConsumeStatus();
absl::Status Combine(absl::Status status);
protected:
LogicalResult handler(Diagnostic* diag);
std::string diag_str_;
llvm::raw_string_ostream diag_stream_;
llvm::SourceMgr source_mgr_;
bool propagate_;
};
}
#endif
#include "xla/mlir/utils/error_util.h"
#include <string>
#include <string_view>
#include "tsl/platform/errors.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Diagnostics.h"
namespace mlir {
BaseScopedDiagnosticHandler::BaseScopedDiagnosticHandler(MLIRContext* context,
bool propagate,
bool filter_stack)
: SourceMgrDiagnosticHandler(source_mgr_, context, diag_stream_),
diag_stream_(diag_str_),
propagate_(propagate) {}
BaseScopedDiagnosticHandler::~BaseScopedDiagnosticHandler() {
bool all_errors_produced_were_consumed = ok();
DCHECK(all_errors_produced_were_consumed) << "Error status not consumed:\n"
<< diag_str_;
}
bool BaseScopedDiagnosticHandler::ok() const { return diag_str_.empty(); }
absl::Status BaseScopedDiagnosticHandler::ConsumeStatus() {
if (ok()) return absl::OkStatus();
absl::Status s = absl::UnknownError(diag_str_);
diag_str_.clear();
return s;
}
absl::Status BaseScopedDiagnosticHandler::Combine(absl::Status status) {
if (status.ok()) return ConsumeStatus();
if (ok()) return status;
std::string str_status_message(status.message());
status = absl::Status(status.code(), str_status_message + diag_str_);
diag_str_.clear();
return status;
}
LogicalResult BaseScopedDiagnosticHandler::handler(Diagnostic* diag) {
size_t current_diag_str_size_ = diag_str_.size();
emitDiagnostic(*diag);
diag_stream_.flush();
if (diag->getSeverity() != DiagnosticSeverity::Error) {
VLOG(1) << diag_str_.substr(current_diag_str_size_);
diag_str_.resize(current_diag_str_size_);
}
return failure(propagate_);
}
} | #include "xla/mlir/utils/error_util.h"
#include <string>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "llvm/ADT/Twine.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
namespace mlir {
namespace {
TEST(ErrorUtilTest, BaseScopedDiagnosticHandler) {
MLIRContext context;
auto id = StringAttr::get(&context, "
auto loc = FileLineColLoc::get(&context, id, 0, 0);
{
TF_EXPECT_OK(
BaseScopedDiagnosticHandler(&context).Combine(absl::OkStatus()));
}
{
BaseScopedDiagnosticHandler handler(&context);
emitError(loc) << "Diagnostic message";
ASSERT_TRUE(absl::IsUnknown(handler.ConsumeStatus()));
}
{
absl::Status err = absl::InternalError("Passed in error");
ASSERT_TRUE(
absl::IsInternal(BaseScopedDiagnosticHandler(&context).Combine(err)));
}
{
auto function = [&]() {
emitError(loc) << "Diagnostic message reported";
emitError(loc) << "Second diagnostic message reported";
return absl::InternalError("Passed in error");
};
BaseScopedDiagnosticHandler ssdh(&context);
absl::Status s = ssdh.Combine(function());
ASSERT_TRUE(absl::IsInternal(s));
EXPECT_TRUE(absl::StrContains(s.message(), "Passed in error"));
EXPECT_TRUE(absl::StrContains(s.message(), "Diagnostic message reported"));
EXPECT_TRUE(
absl::StrContains(s.message(), "Second diagnostic message reported"));
}
}
}
} |
183 | #ifndef THIRD_PARTY_CEL_CPP_COMMON_VALUES_UINT_VALUE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_VALUES_UINT_VALUE_H_
#include <cstddef>
#include <cstdint>
#include <ostream>
#include <string>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/json.h"
#include "common/type.h"
#include "common/value_kind.h"
namespace cel {
class Value;
class ValueView;
class ValueManager;
class UintValue;
class UintValueView;
class TypeManager;
namespace common_internal {
struct UintValueBase {
static constexpr ValueKind kKind = ValueKind::kUint;
constexpr explicit UintValueBase(uint64_t value) noexcept : value(value) {}
UintValueBase() = default;
UintValueBase(const UintValueBase&) = default;
UintValueBase(UintValueBase&&) = default;
UintValueBase& operator=(const UintValueBase&) = default;
UintValueBase& operator=(UintValueBase&&) = default;
constexpr ValueKind kind() const { return kKind; }
UintType GetType(TypeManager&) const { return UintType(); }
absl::string_view GetTypeName() const { return UintType::kName; }
std::string DebugString() const;
absl::StatusOr<size_t> GetSerializedSize(AnyToJsonConverter&) const;
absl::Status SerializeTo(AnyToJsonConverter&, absl::Cord& value) const;
absl::StatusOr<absl::Cord> Serialize(AnyToJsonConverter&) const;
absl::StatusOr<std::string> GetTypeUrl(
absl::string_view prefix = kTypeGoogleApisComPrefix) const;
absl::StatusOr<Any> ConvertToAny(
AnyToJsonConverter&,
absl::string_view prefix = kTypeGoogleApisComPrefix) const;
absl::StatusOr<Json> ConvertToJson(AnyToJsonConverter&) const;
absl::Status Equal(ValueManager& value_manager, ValueView other,
Value& result) const;
absl::StatusOr<Value> Equal(ValueManager& value_manager,
ValueView other) const;
bool IsZeroValue() const { return NativeValue() == 0; }
constexpr uint64_t NativeValue() const { return value; }
constexpr operator uint64_t() const noexcept { return value; }
uint64_t value = 0;
};
}
class UintValue final : private common_internal::UintValueBase {
private:
using Base = UintValueBase;
public:
using view_alternative_type = UintValueView;
using Base::kKind;
UintValue() = default;
UintValue(const UintValue&) = default;
UintValue(UintValue&&) = default;
UintValue& operator=(const UintValue&) = default;
UintValue& operator=(UintValue&&) = default;
constexpr explicit UintValue(uint64_t value) noexcept : Base(value) {}
constexpr explicit UintValue(UintValueView other) noexcept;
using Base::kind;
using Base::GetType;
using Base::GetTypeName;
using Base::DebugString;
using Base::GetSerializedSize;
using Base::SerializeTo;
using Base::Serialize;
using Base::GetTypeUrl;
using Base::ConvertToAny;
using Base::ConvertToJson;
using Base::Equal;
using Base::IsZeroValue;
using Base::NativeValue;
using Base::operator uint64_t;
friend void swap(UintValue& lhs, UintValue& rhs) noexcept {
using std::swap;
swap(lhs.value, rhs.value);
}
private:
friend class UintValueView;
};
template <typename H>
H AbslHashValue(H state, UintValue value) {
return H::combine(std::move(state), value.NativeValue());
}
constexpr bool operator==(UintValue lhs, UintValue rhs) {
return lhs.NativeValue() == rhs.NativeValue();
}
constexpr bool operator!=(UintValue lhs, UintValue rhs) {
return !operator==(lhs, rhs);
}
inline std::ostream& operator<<(std::ostream& out, UintValue value) {
return out << value.DebugString();
}
class UintValueView final : private common_internal::UintValueBase {
private:
using Base = UintValueBase;
public:
using alternative_type = UintValue;
using Base::kKind;
UintValueView() = default;
UintValueView(const UintValueView&) = default;
UintValueView(UintValueView&&) = default;
UintValueView& operator=(const UintValueView&) = default;
UintValueView& operator=(UintValueView&&) = default;
constexpr explicit UintValueView(uint64_t value) noexcept : Base(value) {}
constexpr UintValueView(UintValue other) noexcept
: UintValueView(static_cast<uint64_t>(other)) {}
using Base::kind;
using Base::GetType;
using Base::GetTypeName;
using Base::DebugString;
using Base::GetSerializedSize;
using Base::SerializeTo;
using Base::Serialize;
using Base::GetTypeUrl;
using Base::ConvertToAny;
using Base::ConvertToJson;
using Base::Equal;
using Base::IsZeroValue;
using Base::NativeValue;
using Base::operator uint64_t;
friend void swap(UintValueView& lhs, UintValueView& rhs) noexcept {
using std::swap;
swap(lhs.value, rhs.value);
}
private:
friend class IntValue;
};
template <typename H>
H AbslHashValue(H state, UintValueView value) {
return H::combine(std::move(state), value.NativeValue());
}
constexpr bool operator==(UintValueView lhs, UintValueView rhs) {
return lhs.NativeValue() == rhs.NativeValue();
}
constexpr bool operator!=(UintValueView lhs, UintValueView rhs) {
return !operator==(lhs, rhs);
}
inline std::ostream& operator<<(std::ostream& out, UintValueView value) {
return out << value.DebugString();
}
inline constexpr UintValue::UintValue(UintValueView other) noexcept
: UintValue(static_cast<uint64_t>(other)) {}
}
#endif
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/number.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
namespace cel::common_internal {
namespace {
std::string UintDebugString(int64_t value) { return absl::StrCat(value, "u"); }
}
std::string UintValueBase::DebugString() const {
return UintDebugString(NativeValue());
}
absl::StatusOr<size_t> UintValueBase::GetSerializedSize(
AnyToJsonConverter&) const {
return internal::SerializedUInt64ValueSize(NativeValue());
}
absl::Status UintValueBase::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeUInt64Value(NativeValue(), value);
}
absl::StatusOr<absl::Cord> UintValueBase::Serialize(
AnyToJsonConverter& value_manager) const {
absl::Cord value;
CEL_RETURN_IF_ERROR(SerializeTo(value_manager, value));
return value;
}
absl::StatusOr<std::string> UintValueBase::GetTypeUrl(
absl::string_view prefix) const {
return MakeTypeUrlWithPrefix(prefix, "google.protobuf.UInt64Value");
}
absl::StatusOr<Any> UintValueBase::ConvertToAny(
AnyToJsonConverter& value_manager, absl::string_view prefix) const {
CEL_ASSIGN_OR_RETURN(auto value, Serialize(value_manager));
CEL_ASSIGN_OR_RETURN(auto type_url, GetTypeUrl(prefix));
return MakeAny(std::move(type_url), std::move(value));
}
absl::StatusOr<Json> UintValueBase::ConvertToJson(AnyToJsonConverter&) const {
return JsonUint(NativeValue());
}
absl::Status UintValueBase::Equal(ValueManager&, ValueView other,
Value& result) const {
if (auto other_value = As<UintValueView>(other); other_value.has_value()) {
result = BoolValueView{NativeValue() == other_value->NativeValue()};
return absl::OkStatus();
}
if (auto other_value = As<DoubleValueView>(other); other_value.has_value()) {
result =
BoolValueView{internal::Number::FromUint64(NativeValue()) ==
internal::Number::FromDouble(other_value->NativeValue())};
return absl::OkStatus();
}
if (auto other_value = As<IntValueView>(other); other_value.has_value()) {
result =
BoolValueView{internal::Number::FromUint64(NativeValue()) ==
internal::Number::FromInt64(other_value->NativeValue())};
return absl::OkStatus();
}
result = BoolValueView{false};
return absl::OkStatus();
}
absl::StatusOr<Value> UintValueBase::Equal(ValueManager& value_manager,
ValueView other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <cstdint>
#include <sstream>
#include "absl/hash/hash.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using testing::An;
using testing::Ne;
using cel::internal::IsOkAndHolds;
using UintValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(UintValueTest, Kind) {
EXPECT_EQ(UintValue(1).kind(), UintValue::kKind);
EXPECT_EQ(Value(UintValue(1)).kind(), UintValue::kKind);
}
TEST_P(UintValueTest, DebugString) {
{
std::ostringstream out;
out << UintValue(1);
EXPECT_EQ(out.str(), "1u");
}
{
std::ostringstream out;
out << Value(UintValue(1));
EXPECT_EQ(out.str(), "1u");
}
}
TEST_P(UintValueTest, GetSerializedSize) {
EXPECT_THAT(UintValue().GetSerializedSize(value_manager()), IsOkAndHolds(0));
}
TEST_P(UintValueTest, ConvertToAny) {
EXPECT_THAT(UintValue().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.UInt64Value"),
absl::Cord())));
}
TEST_P(UintValueTest, ConvertToJson) {
EXPECT_THAT(UintValue(1).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(UintValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(UintValue(1)), NativeTypeId::For<UintValue>());
EXPECT_EQ(NativeTypeId::Of(Value(UintValue(1))),
NativeTypeId::For<UintValue>());
}
TEST_P(UintValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<UintValue>(UintValue(1)));
EXPECT_TRUE(InstanceOf<UintValue>(Value(UintValue(1))));
}
TEST_P(UintValueTest, Cast) {
EXPECT_THAT(Cast<UintValue>(UintValue(1)), An<UintValue>());
EXPECT_THAT(Cast<UintValue>(Value(UintValue(1))), An<UintValue>());
}
TEST_P(UintValueTest, As) {
EXPECT_THAT(As<UintValue>(UintValue(1)), Ne(absl::nullopt));
EXPECT_THAT(As<UintValue>(Value(UintValue(1))), Ne(absl::nullopt));
}
TEST_P(UintValueTest, HashValue) {
EXPECT_EQ(absl::HashOf(UintValue(1)), absl::HashOf(uint64_t{1}));
}
TEST_P(UintValueTest, Equality) {
EXPECT_NE(UintValue(0u), 1u);
EXPECT_NE(1u, UintValue(0u));
EXPECT_NE(UintValue(0u), UintValue(1u));
}
TEST_P(UintValueTest, LessThan) {
EXPECT_LT(UintValue(0), 1);
EXPECT_LT(0, UintValue(1));
EXPECT_LT(UintValue(0), UintValue(1));
}
INSTANTIATE_TEST_SUITE_P(
UintValueTest, UintValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
UintValueTest::ToString);
using UintValueViewTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(UintValueViewTest, Kind) {
EXPECT_EQ(UintValueView(1).kind(), UintValueView::kKind);
EXPECT_EQ(ValueView(UintValueView(1)).kind(), UintValueView::kKind);
}
TEST_P(UintValueViewTest, DebugString) {
{
std::ostringstream out;
out << UintValueView(1);
EXPECT_EQ(out.str(), "1u");
}
{
std::ostringstream out;
out << ValueView(UintValueView(1));
EXPECT_EQ(out.str(), "1u");
}
}
TEST_P(UintValueViewTest, GetSerializedSize) {
EXPECT_THAT(UintValueView().GetSerializedSize(value_manager()),
IsOkAndHolds(0));
}
TEST_P(UintValueViewTest, ConvertToAny) {
EXPECT_THAT(UintValueView().ConvertToAny(value_manager()),
IsOkAndHolds(MakeAny(MakeTypeUrl("google.protobuf.UInt64Value"),
absl::Cord())));
}
TEST_P(UintValueViewTest, ConvertToJson) {
EXPECT_THAT(UintValueView(1).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(UintValueViewTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(UintValueView(1)),
NativeTypeId::For<UintValueView>());
EXPECT_EQ(NativeTypeId::Of(ValueView(UintValueView(1))),
NativeTypeId::For<UintValueView>());
}
TEST_P(UintValueViewTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<UintValueView>(UintValueView(1)));
EXPECT_TRUE(InstanceOf<UintValueView>(ValueView(UintValueView(1))));
}
TEST_P(UintValueViewTest, Cast) {
EXPECT_THAT(Cast<UintValueView>(UintValueView(1)), An<UintValueView>());
EXPECT_THAT(Cast<UintValueView>(ValueView(UintValueView(1))),
An<UintValueView>());
}
TEST_P(UintValueViewTest, As) {
EXPECT_THAT(As<UintValueView>(UintValueView(1)), Ne(absl::nullopt));
EXPECT_THAT(As<UintValueView>(ValueView(UintValueView(1))),
Ne(absl::nullopt));
}
TEST_P(UintValueViewTest, HashValue) {
EXPECT_EQ(absl::HashOf(UintValueView(1)), absl::HashOf(uint64_t{1}));
}
TEST_P(UintValueViewTest, Equality) {
EXPECT_NE(UintValueView(UintValue(0u)), 1u);
EXPECT_NE(1u, UintValueView(0u));
EXPECT_NE(UintValueView(0u), UintValueView(1u));
EXPECT_NE(UintValueView(0u), UintValue(1u));
EXPECT_NE(UintValue(1u), UintValueView(0u));
}
TEST_P(UintValueViewTest, LessThan) {
EXPECT_LT(UintValueView(0), 1);
EXPECT_LT(0, UintValueView(1));
EXPECT_LT(UintValueView(0), UintValueView(1));
EXPECT_LT(UintValueView(0), UintValue(1));
EXPECT_LT(UintValue(0), UintValueView(1));
}
INSTANTIATE_TEST_SUITE_P(
UintValueViewTest, UintValueViewTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
UintValueViewTest::ToString);
}
} |
184 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_EVAL_CONST_TENSOR_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_EVAL_CONST_TENSOR_H_
#include <cstdint>
#include <optional>
#include "absl/functional/function_ref.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
class GraphRunner;
class Node;
class OpRegistryInterface;
class ShapeRefiner;
class Tensor;
struct EvaluateConstantTensorRunner {
const OpRegistryInterface* op_registry = nullptr;
int32_t graph_def_version = 0;
GraphRunner* graph_runner = nullptr;
};
absl::StatusOr<std::optional<Tensor>> EvaluateConstantTensor(
const Node& node, int node_output,
const ShapeRefiner& refiner,
absl::FunctionRef<std::optional<Tensor>(const Node&, int)> lookup,
std::optional<EvaluateConstantTensorRunner> runner);
}
#endif
#include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace {
using ::tensorflow::shape_inference::InferenceContext;
bool IsRank(const Node& n) { return n.type_string() == "Rank"; }
bool IsSize(const Node& n) { return n.type_string() == "Size"; }
bool IsShape(const Node& n) { return n.type_string() == "Shape"; }
bool IsStridedSlice(const Node& n) { return n.type_string() == "StridedSlice"; }
bool IsPlaceholderWithDefault(const Node& n) {
return n.type_string() == "PlaceholderWithDefault";
}
bool IsUnstack(const Node& n) { return n.type_string() == "Unpack"; }
bool HasIntAttr(const Node& n, absl::string_view name, int64_t expected) {
int64_t actual;
return TryGetNodeAttr(n.def(), name, &actual) && actual == expected;
}
std::optional<int64_t> GetIntConst(const Node& node) {
const TensorProto* proto;
Tensor tensor;
if (node.IsConstant() && TryGetNodeAttr(node.def(), "value", &proto) &&
(proto->dtype() == DT_INT32 || proto->dtype() == DT_INT64) &&
TensorShape(proto->tensor_shape()).num_elements() == 1 &&
tensor.FromProto(*proto)) {
if (proto->dtype() == DT_INT32) {
return *static_cast<const int32_t*>(tensor.data());
} else {
return *static_cast<const int64_t*>(tensor.data());
}
}
return std::nullopt;
}
std::optional<int64_t> GetSliceIndex(const Node& node, const int node_output) {
std::optional<int64_t> ix;
if (IsUnstack(node)) {
if (HasIntAttr(node, "axis", 0)) {
ix = node_output;
}
} else if (IsStridedSlice(node)) {
const Edge* edge;
if (HasIntAttr(node, "begin_mask", 0) && HasIntAttr(node, "end_mask", 0) &&
HasIntAttr(node, "ellipsis_mask", 0) &&
HasIntAttr(node, "new_axis_mask", 0) &&
HasIntAttr(node, "shrink_axis_mask", 1) &&
node.input_edge(1, &edge).ok()) {
ix = GetIntConst(*edge->src());
}
}
return ix;
}
absl::StatusOr<std::optional<Tensor>> TryInferFromShapes(
const Node& node, const int node_output, const ShapeRefiner& refiner) {
std::optional<Tensor> result;
if (node.num_inputs() == 0 || node_output >= node.num_outputs()) {
return result;
}
const auto dtype = node.output_type(node_output);
if (dtype != DT_INT32 && dtype != DT_INT64) {
return result;
}
absl::InlinedVector<int64_t, 8> data;
std::optional<TensorShape> shape;
const Edge* edge;
if (IsShape(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->FullyDefined(c->input(0))) {
const int64_t rank = c->Rank(c->input(0));
for (int i = 0; i < rank; ++i) {
data.push_back(c->Value(c->Dim(c->input(0), i)));
}
shape.emplace({rank});
}
} else if (IsRank(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->RankKnown(c->input(0))) {
data.push_back(c->Rank(c->input(0)));
shape.emplace();
}
} else if (IsSize(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->FullyDefined(c->input(0))) {
int64_t size = 1;
for (int i = 0, rank = c->Rank(c->input(0)); i < rank; i++) {
size *= c->Value(c->Dim(c->input(0), i));
}
data.push_back(size);
shape.emplace();
}
} else if (node.input_edge(0, &edge).ok() && IsShape(*edge->src())) {
InferenceContext* c = refiner.GetContext(edge->src());
if (c != nullptr && c->RankKnown(c->input(0))) {
const int64_t rank = c->Rank(c->input(0));
std::optional<int64_t> ix = GetSliceIndex(node, node_output);
if (ix.has_value() && -rank <= *ix && *ix < rank &&
c->ValueKnown(c->Dim(c->input(0), *ix))) {
data.push_back(c->Value(c->Dim(c->input(0), *ix)));
shape.emplace();
}
}
}
if (!shape.has_value()) {
return result;
}
if (dtype == DT_INT32) {
for (const int64_t value : data) {
if (TF_PREDICT_FALSE(value >= std::numeric_limits<int32_t>::max())) {
return errors::InvalidArgument("Value is out of int32 range: ", value);
}
}
}
result.emplace(dtype, *shape);
if (dtype == DT_INT32) {
absl::c_copy(data, static_cast<int32_t*>(result->data()));
} else {
absl::c_copy(data, static_cast<int64_t*>(result->data()));
}
return result;
}
bool IsSupportedForEvaluation(const Node& node) {
if (node.IsConstant() || node.IsArg()) {
return true;
}
if (node.num_inputs() == 0 || IsPlaceholderWithDefault(node)) {
return false;
}
if (node.op_def().is_stateful()) {
return false;
}
if (node.IsEnter() || node.IsExit() || node.IsMerge()) {
return false;
}
if (node.IsFunctionCall()) {
return false;
}
for (const auto& [name, attr] : node.attrs()) {
if (attr.has_func() || !attr.list().func().empty()) {
return false;
}
}
return KernelDefAvailable(DEVICE_CPU, node.def());
}
struct Subgraph {
Subgraph(const OpRegistryInterface* op_registry, int32_t graph_def_version)
: graph(op_registry == nullptr ? OpRegistry::Global() : op_registry) {
VersionDef versions = graph.versions();
versions.set_producer(graph_def_version);
graph.set_versions(versions);
}
GraphRunner::NamedTensorList inputs;
Graph graph;
};
using NodeOutput = std::pair<const Node*, int>;
std::string OutputName(const NodeOutput& output) {
return strings::StrCat(output.first->name(), ":", output.second);
}
absl::StatusOr<std::unique_ptr<Subgraph>> ExtractConstantSubgraph(
const Node& target_node, const ShapeRefiner& refiner,
const absl::FunctionRef<std::optional<Tensor>(const Node&, int)> lookup,
const OpRegistryInterface* op_registry, const int32_t graph_def_version) {
std::unique_ptr<Subgraph> subgraph;
if (!target_node.IsEnter() && !IsSupportedForEvaluation(target_node)) {
return subgraph;
}
std::vector<const Edge*> edges;
for (const Edge* edge : target_node.in_edges()) {
if (!edge->IsControlEdge()) {
edges.push_back(edge);
}
}
absl::flat_hash_map<const Node*, Node*> new_by_old_node;
absl::InlinedVector<const Node*, 8> arg_nodes;
absl::flat_hash_map<NodeOutput, Tensor> const_inputs;
for (int edge_ix = 0; edge_ix < edges.size(); ++edge_ix) {
const Edge& edge = *edges[edge_ix];
const Node& node = *edge.src();
const NodeOutput node_output = {&node, edge.src_output()};
if (new_by_old_node.contains(&node) || const_inputs.contains(node_output)) {
continue;
}
if (node.IsArg()) {
arg_nodes.push_back(&node);
continue;
}
auto tensor = lookup(node, node_output.second);
if (!tensor.has_value()) {
TF_ASSIGN_OR_RETURN(
tensor, TryInferFromShapes(node, node_output.second, refiner));
}
if (tensor.has_value()) {
const_inputs.emplace(node_output, *std::move(tensor));
} else if (!IsSupportedForEvaluation(node)) {
return subgraph;
} else {
new_by_old_node.emplace(&node, nullptr);
for (const Edge* edge : node.in_edges()) {
if (!edge->IsControlEdge()) {
edges.push_back(edge);
}
}
}
}
bool all_args_provided = true;
for (const Node* node : arg_nodes) {
auto tensor = lookup(*node, 0);
all_args_provided = all_args_provided && tensor.has_value();
if (all_args_provided) {
const_inputs.emplace(NodeOutput{node, 0}, *std::move(tensor));
}
}
if (!all_args_provided) {
return subgraph;
}
subgraph = std::make_unique<Subgraph>(op_registry, graph_def_version);
auto& inputs = subgraph->inputs;
inputs.reserve(const_inputs.size());
for (auto& [node_output, tensor] : const_inputs) {
if (!new_by_old_node.contains(node_output.first)) {
inputs.emplace_back(OutputName(node_output), std::move(tensor));
}
}
Graph& graph = subgraph->graph;
new_by_old_node[&target_node] = graph.CopyNode(&target_node);
for (const Edge* edge : edges) {
Node*& src = new_by_old_node[edge->src()];
if (src == nullptr) {
src = graph.CopyNode(edge->src());
}
Node* dst = new_by_old_node.at(edge->dst());
graph.AddEdge(src, edge->src_output(), dst, edge->dst_input());
}
return subgraph;
}
}
absl::StatusOr<std::optional<Tensor>> EvaluateConstantTensor(
const Node& node, const int node_output, const ShapeRefiner& refiner,
const absl::FunctionRef<std::optional<Tensor>(const Node&, int)> lookup,
const std::optional<EvaluateConstantTensorRunner> runner) {
std::optional<Tensor> result;
if (result = lookup(node, node_output); result.has_value()) {
return result;
}
if (node.IsArg()) {
return result;
}
if (node.IsConstant()) {
const TensorProto* proto;
TF_RETURN_IF_ERROR(GetNodeAttr(node.def(), "value", &proto));
result.emplace();
if (TF_PREDICT_FALSE(!result->FromProto(*proto))) {
return errors::InvalidArgument("Unable to evaluate a constant node");
}
return result;
}
TF_ASSIGN_OR_RETURN(result, TryInferFromShapes(node, node_output, refiner));
if (result.has_value()) {
return result;
}
if (!runner.has_value()) {
return result;
}
TF_ASSIGN_OR_RETURN(
const auto subgraph,
ExtractConstantSubgraph(node, refiner, lookup, runner->op_registry,
runner->graph_def_version));
if (subgraph != nullptr) {
GraphRunner* graph_runner = runner->graph_runner;
std::unique_ptr<GraphRunner> tmp_graph_runner;
if (graph_runner == nullptr) {
tmp_graph_runner = std::make_unique<GraphRunner>(Env::Default());
graph_runner = tmp_graph_runner.get();
}
FunctionLibraryRuntime* function_library = nullptr;
std::vector<Tensor> outputs;
auto status =
graph_runner->Run(&subgraph->graph, function_library, subgraph->inputs,
{OutputName({&node, node_output})}, &outputs);
if (status.ok()) {
result = std::move(outputs[0]);
}
}
return result;
}
} | #include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/meta/type_traits.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/logging_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class EvaluateConstantTensorTest : public ::testing::Test {
public:
EvaluateConstantTensorTest& WithRunner() {
runner_ = EvaluateConstantTensorRunner{
scope_.graph()->op_registry(),
scope_.graph()->versions().producer(),
};
return *this;
}
absl::StatusOr<std::optional<Tensor>> Run(const Output& output) {
TF_RETURN_IF_ERROR(scope_.status());
const auto& graph = *scope_.graph();
ShapeRefiner refiner(graph.versions(), graph.op_registry());
for (const auto* node : graph.nodes()) {
TF_RETURN_IF_ERROR(refiner.AddNode(node));
}
auto lookup = [this](const Node& node, int index) -> std::optional<Tensor> {
requested_.insert(&node);
auto it = cache_.find(std::make_pair(&node, index));
if (it == cache_.end()) {
return std::nullopt;
}
return it->second;
};
auto runner = runner_;
runner_ = std::nullopt;
requested_.clear();
return EvaluateConstantTensor(*output.node(), output.index(), refiner,
lookup, runner);
}
void ExpectTensor(const Output& output, const Tensor& expected) {
TF_ASSERT_OK_AND_ASSIGN(auto actual, Run(output));
ASSERT_TRUE(actual.has_value());
test::ExpectEqual(*actual, expected);
}
void ExpectNull(const Output& output) {
TF_ASSERT_OK_AND_ASSIGN(auto actual, Run(output));
ASSERT_FALSE(actual.has_value());
}
void ExpectError(const Output& output) { EXPECT_FALSE(Run(output).ok()); }
protected:
Scope scope_ = Scope::NewRootScope();
absl::flat_hash_map<std::pair<const Node*, int>, Tensor> cache_;
absl::flat_hash_set<const Node*> requested_;
std::optional<EvaluateConstantTensorRunner> runner_ = std::nullopt;
};
template <typename T>
Output Placeholder(const Scope& scope, const PartialTensorShape& shape) {
return ops::Placeholder(scope, DataTypeToEnum<T>::value,
ops::Placeholder::Shape(shape));
}
Output Slice(const Scope& scope, const Output& input, int index) {
return ops::StridedSlice(
scope, input, ops::Const(scope, {index}), ops::Const(scope, {index + 1}),
ops::Const(scope, {1}), ops::StridedSlice::ShrinkAxisMask(1));
}
TEST_F(EvaluateConstantTensorTest, Constant) {
auto expected = test::AsTensor<float>({1, 2, 3});
auto op = ops::Const(scope_, expected);
ExpectTensor(op, expected);
}
TEST_F(EvaluateConstantTensorTest, Shape) {
auto input = Placeholder<float>(scope_, {2, 3, 5});
auto shape = ops::Shape(scope_, input);
ExpectTensor(shape, test::AsTensor<int32_t>({2, 3, 5}));
}
TEST_F(EvaluateConstantTensorTest, ValueOutOfRange) {
const int64_t dim = std::numeric_limits<int32_t>::max();
auto input = Placeholder<float>(scope_, {dim});
auto shape32 = ops::Shape(scope_, input, ops::Shape::OutType(DT_INT32));
auto shape64 = ops::Shape(scope_, input, ops::Shape::OutType(DT_INT64));
ExpectError(shape32);
ExpectTensor(shape64, test::AsTensor<int64_t>({dim}));
}
TEST_F(EvaluateConstantTensorTest, PartialShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
ExpectNull(shape);
}
TEST_F(EvaluateConstantTensorTest, Rank) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto rank = ops::Rank(scope_, input);
ExpectTensor(rank, test::AsScalar<int32_t>(3));
}
TEST_F(EvaluateConstantTensorTest, Size) {
auto input = Placeholder<float>(scope_, {2, 3, 5});
auto size = ops::Size(scope_, input);
ExpectTensor(size, test::AsScalar<int32_t>(2 * 3 * 5));
}
TEST_F(EvaluateConstantTensorTest, PartialSize) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto size = ops::Size(scope_, input);
ExpectNull(size);
}
TEST_F(EvaluateConstantTensorTest, SliceShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
auto slice0 = Slice(scope_, shape, 0);
auto slice1 = Slice(scope_, shape, 1);
auto slice2 = Slice(scope_, shape, 2);
ExpectTensor(slice0, test::AsScalar<int32_t>(2));
ExpectNull(slice1);
ExpectTensor(slice2, test::AsScalar<int32_t>(5));
}
TEST_F(EvaluateConstantTensorTest, UnpackShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
auto unpack = ops::Unstack(scope_, shape, 3, ops::Unstack::Axis(0));
ExpectTensor(unpack[0], test::AsScalar<int32_t>(2));
ExpectNull(unpack[1]);
ExpectTensor(unpack[2], test::AsScalar<int32_t>(5));
}
TEST_F(EvaluateConstantTensorTest, Lookup) {
auto input = Placeholder<float>(scope_, {2});
ExpectNull(input);
auto expected = test::AsTensor<float>({3, 5});
cache_.emplace(std::make_pair(input.node(), 0), expected);
ExpectTensor(input, expected);
}
TEST_F(EvaluateConstantTensorTest, ConstantFolding) {
auto input1 = Placeholder<float>(scope_, {2, -1, 5});
auto input2 = ops::_Arg(scope_, DT_INT32, 0);
auto shape = ops::Shape(scope_, input1);
auto result = ops::Add(scope_, Slice(scope_, shape, 2), input2);
ExpectNull(result);
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(input2.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(5 + 7));
}
TEST_F(EvaluateConstantTensorTest, DoNotEvalPlaceholderWithDefault) {
auto tensor = test::AsTensor<float>({1, 2, 3});
auto result1 = ops::Identity(scope_, tensor);
auto result2 = ops::PlaceholderWithDefault(scope_, tensor, tensor.shape());
WithRunner().ExpectTensor(result1, tensor);
WithRunner().ExpectNull(result2);
}
TEST_F(EvaluateConstantTensorTest, AllArgsMustBeRequestedForConstSubgraph) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto arg2 = ops::_Arg(scope_, DT_INT32, 2);
auto result = ops::Mul(scope_, arg0, ops::Add(scope_, arg1, arg2));
cache_.emplace(std::make_pair(arg1.node(), 0), test::AsScalar<int32_t>(3));
WithRunner().ExpectNull(result);
EXPECT_TRUE(requested_.contains(arg0.node()));
EXPECT_TRUE(requested_.contains(arg1.node()));
EXPECT_TRUE(requested_.contains(arg2.node()));
cache_.emplace(std::make_pair(arg0.node(), 0), test::AsScalar<int32_t>(5));
cache_.emplace(std::make_pair(arg2.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(5 * (3 + 7)));
}
TEST_F(EvaluateConstantTensorTest, NoArgsMustBeRequestedForNonConstSubgraph) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto arg2 = ops::_Arg(scope_, DT_INT32, 2);
auto feed = Placeholder<int32_t>(scope_, {});
auto result = ops::Mul(scope_, arg0,
ops::Add(scope_, arg1, ops::Add(scope_, arg2, feed)));
WithRunner().ExpectNull(result);
EXPECT_FALSE(requested_.contains(arg0.node()));
EXPECT_FALSE(requested_.contains(arg1.node()));
EXPECT_FALSE(requested_.contains(arg2.node()));
EXPECT_TRUE(requested_.contains(feed.node()));
}
TEST_F(EvaluateConstantTensorTest, MissingKernel) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto print = ops::Print(scope_, arg1, {arg1.output});
auto result = ops::Add(scope_, arg0, print);
ASSERT_FALSE(KernelDefAvailable(DEVICE_CPU, print.node()->def()));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(arg0.node(), 0), test::AsScalar<int32_t>(3));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(arg1.node(), 0), test::AsScalar<int32_t>(5));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(print.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(3 + 7));
}
template <bool kEvaluated>
void BM_ConstantFolding(::testing::benchmark::State& state) {
Scope scope = Scope::NewRootScope();
auto input1 = Placeholder<float>(scope, {2, -1, 5});
auto input2 = ops::_Arg(scope, DT_INT32, 0);
auto input3 = ops::_Arg(scope, DT_INT32, 0);
auto shape = ops::Shape(scope, input1);
auto result =
ops::Mul(scope, ops::Add(scope, Slice(scope, shape, 2), input2), input3);
TF_CHECK_OK(scope.status());
const auto& graph = *scope.graph();
ShapeRefiner refiner(graph.versions(), graph.op_registry());
for (const auto* node : graph.nodes()) {
TF_CHECK_OK(refiner.AddNode(node));
}
auto tensor2 = test::AsScalar<int32_t>(7);
auto tensor3 = test::AsScalar<int32_t>(11);
auto lookup = [&](const Node& node, int index) -> std::optional<Tensor> {
if (kEvaluated && &node == input2.node()) {
return tensor2;
}
if (&node == input3.node()) {
return tensor3;
}
return std::nullopt;
};
GraphRunner graph_runner(Env::Default());
const EvaluateConstantTensorRunner runner = {
graph.op_registry(), graph.versions().producer(), &graph_runner};
for (auto unused : state) {
auto status_or =
EvaluateConstantTensor(*result.node(), 0, refiner, lookup, runner);
TF_CHECK_OK(status_or.status());
CHECK_EQ(status_or->has_value(), kEvaluated);
}
}
BENCHMARK_TEMPLATE(BM_ConstantFolding, false);
BENCHMARK_TEMPLATE(BM_ConstantFolding, true);
}
} |
185 | #ifndef AROLLA_QEXPR_CORE_UTILITY_OPERATORS_H_
#define AROLLA_QEXPR_CORE_UTILITY_OPERATORS_H_
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/qtype.h"
namespace arolla {
OperatorPtr MakeCopyOp(QTypePtr type);
}
#endif
#include "arolla/qexpr/operators/core/utility_operators.h"
#include <memory>
#include <string>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/qtype.h"
namespace arolla {
namespace {
class CopyOperator : public QExprOperator {
public:
explicit CopyOperator(QTypePtr type)
: QExprOperator("core._copy", QExprOperatorSignature::Get({type}, type)) {
}
private:
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) const final {
return MakeBoundOperator(
[input_slot = input_slots[0], output_slot = output_slot](
EvaluationContext*, FramePtr frame) {
input_slot.CopyTo(frame, output_slot, frame);
});
}
};
}
OperatorPtr MakeCopyOp(QTypePtr type) {
return OperatorPtr(std::make_unique<CopyOperator>(type));
}
} | #include "arolla/qexpr/operators/core/utility_operators.h"
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla {
namespace {
using ::testing::Eq;
TEST(UtilityOperatorsTest, Identity) {
auto i32 = GetQType<int>();
auto copy_op = MakeCopyOp(i32);
ASSERT_EQ(copy_op->signature(), QExprOperatorSignature::Get({i32}, i32));
FrameLayout::Builder layout_builder;
auto i0_slot = layout_builder.AddSlot<int>();
auto i1_slot = layout_builder.AddSlot<int>();
ASSERT_OK_AND_ASSIGN(
auto copy_bound_op0,
copy_op->Bind(ToTypedSlots(i0_slot), TypedSlot::FromSlot(i1_slot)));
auto memory_layout = std::move(layout_builder).Build();
RootEvaluationContext root_ctx(&memory_layout);
EvaluationContext ctx(root_ctx);
root_ctx.Set(i0_slot, 7);
copy_bound_op0->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
EXPECT_THAT(root_ctx.Get(i1_slot), Eq(7));
}
TEST(UtilityOperatorsTest, MakeTuple) {
auto i32 = GetQType<int>();
auto f64 = GetQType<double>();
auto tuple_qtype = MakeTupleQType({i32, f64});
ASSERT_OK_AND_ASSIGN(auto copy_op,
OperatorRegistry::GetInstance()->LookupOperator(
"core.make_tuple", {i32, f64}, tuple_qtype));
ASSERT_EQ(copy_op->signature(),
QExprOperatorSignature::Get({i32, f64}, tuple_qtype));
FrameLayout::Builder layout_builder;
auto tuple0_slot = AddSlot(tuple_qtype, &layout_builder);
ASSERT_EQ(tuple0_slot.SubSlotCount(), 2);
ASSERT_OK_AND_ASSIGN(auto i0_slot, tuple0_slot.SubSlot(0).ToSlot<int>());
ASSERT_OK_AND_ASSIGN(auto d0_slot, tuple0_slot.SubSlot(1).ToSlot<double>());
auto tuple1_slot = AddSlot(tuple_qtype, &layout_builder);
ASSERT_EQ(tuple1_slot.SubSlotCount(), 2);
ASSERT_OK_AND_ASSIGN(auto i1_slot, tuple1_slot.SubSlot(0).ToSlot<int>());
ASSERT_OK_AND_ASSIGN(auto d1_slot, tuple1_slot.SubSlot(1).ToSlot<double>());
ASSERT_OK_AND_ASSIGN(
auto copy_bound_op,
copy_op->Bind(ToTypedSlots(i0_slot, d0_slot), {tuple1_slot}));
auto memory_layout = std::move(layout_builder).Build();
RootEvaluationContext root_ctx(&memory_layout);
EvaluationContext ctx(root_ctx);
root_ctx.Set(i0_slot, 7);
root_ctx.Set(d0_slot, 4.5);
copy_bound_op->Run(&ctx, root_ctx.frame());
EXPECT_OK(ctx.status());
EXPECT_THAT(root_ctx.Get(i1_slot), Eq(7));
EXPECT_THAT(root_ctx.Get(d1_slot), Eq(4.5));
}
}
} |
186 | #ifndef QUICHE_OBLIVIOUS_HTTP_OBLIVIOUS_HTTP_GATEWAY_H_
#define QUICHE_OBLIVIOUS_HTTP_OBLIVIOUS_HTTP_GATEWAY_H_
#include <memory>
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "openssl/base.h"
#include "openssl/hpke.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/quiche_random.h"
#include "quiche/oblivious_http/buffers/oblivious_http_request.h"
#include "quiche/oblivious_http/buffers/oblivious_http_response.h"
#include "quiche/oblivious_http/common/oblivious_http_header_key_config.h"
namespace quiche {
class QUICHE_EXPORT ObliviousHttpGateway {
public:
static absl::StatusOr<ObliviousHttpGateway> Create(
absl::string_view hpke_private_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config,
QuicheRandom* quiche_random = nullptr);
ObliviousHttpGateway(ObliviousHttpGateway&& other) = default;
ObliviousHttpGateway& operator=(ObliviousHttpGateway&& other) = default;
~ObliviousHttpGateway() = default;
absl::StatusOr<ObliviousHttpRequest> DecryptObliviousHttpRequest(
absl::string_view encrypted_data,
absl::string_view request_label =
ObliviousHttpHeaderKeyConfig::kOhttpRequestLabel) const;
absl::StatusOr<ObliviousHttpResponse> CreateObliviousHttpResponse(
std::string plaintext_data,
ObliviousHttpRequest::Context& oblivious_http_request_context,
absl::string_view response_label =
ObliviousHttpHeaderKeyConfig::kOhttpResponseLabel) const;
private:
explicit ObliviousHttpGateway(
bssl::UniquePtr<EVP_HPKE_KEY> recipient_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config,
QuicheRandom* quiche_random);
bssl::UniquePtr<EVP_HPKE_KEY> server_hpke_key_;
ObliviousHttpHeaderKeyConfig ohttp_key_config_;
QuicheRandom* quiche_random_;
};
}
#endif
#include "quiche/oblivious_http/oblivious_http_gateway.h"
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "quiche/common/quiche_crypto_logging.h"
#include "quiche/common/quiche_random.h"
namespace quiche {
ObliviousHttpGateway::ObliviousHttpGateway(
bssl::UniquePtr<EVP_HPKE_KEY> recipient_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config,
QuicheRandom* quiche_random)
: server_hpke_key_(std::move(recipient_key)),
ohttp_key_config_(ohttp_key_config),
quiche_random_(quiche_random) {}
absl::StatusOr<ObliviousHttpGateway> ObliviousHttpGateway::Create(
absl::string_view hpke_private_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config,
QuicheRandom* quiche_random) {
if (hpke_private_key.empty()) {
return absl::InvalidArgumentError("Invalid/Empty HPKE private key.");
}
bssl::UniquePtr<EVP_HPKE_KEY> recipient_key(EVP_HPKE_KEY_new());
if (recipient_key == nullptr) {
return SslErrorAsStatus(
"Failed to initialize ObliviousHttpGateway/Server's Key.");
}
if (!EVP_HPKE_KEY_init(
recipient_key.get(), ohttp_key_config.GetHpkeKem(),
reinterpret_cast<const uint8_t*>(hpke_private_key.data()),
hpke_private_key.size())) {
return SslErrorAsStatus("Failed to import HPKE private key.");
}
if (quiche_random == nullptr) quiche_random = QuicheRandom::GetInstance();
return ObliviousHttpGateway(std::move(recipient_key), ohttp_key_config,
quiche_random);
}
absl::StatusOr<ObliviousHttpRequest>
ObliviousHttpGateway::DecryptObliviousHttpRequest(
absl::string_view encrypted_data, absl::string_view request_label) const {
return ObliviousHttpRequest::CreateServerObliviousRequest(
encrypted_data, *(server_hpke_key_), ohttp_key_config_, request_label);
}
absl::StatusOr<ObliviousHttpResponse>
ObliviousHttpGateway::CreateObliviousHttpResponse(
std::string plaintext_data,
ObliviousHttpRequest::Context& oblivious_http_request_context,
absl::string_view response_label) const {
return ObliviousHttpResponse::CreateServerObliviousResponse(
std::move(plaintext_data), oblivious_http_request_context, response_label,
quiche_random_);
}
} | #include "quiche/oblivious_http/oblivious_http_gateway.h"
#include <stdint.h>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/platform/api/quiche_thread.h"
#include "quiche/common/quiche_random.h"
#include "quiche/oblivious_http/buffers/oblivious_http_request.h"
namespace quiche {
namespace {
std::string GetHpkePrivateKey() {
absl::string_view hpke_key_hex =
"b77431ecfa8f4cfc30d6e467aafa06944dffe28cb9dd1409e33a3045f5adc8a1";
std::string hpke_key_bytes;
EXPECT_TRUE(absl::HexStringToBytes(hpke_key_hex, &hpke_key_bytes));
return hpke_key_bytes;
}
std::string GetHpkePublicKey() {
absl::string_view public_key =
"6d21cfe09fbea5122f9ebc2eb2a69fcc4f06408cd54aac934f012e76fcdcef62";
std::string public_key_bytes;
EXPECT_TRUE(absl::HexStringToBytes(public_key, &public_key_bytes));
return public_key_bytes;
}
const ObliviousHttpHeaderKeyConfig GetOhttpKeyConfig(uint8_t key_id,
uint16_t kem_id,
uint16_t kdf_id,
uint16_t aead_id) {
auto ohttp_key_config =
ObliviousHttpHeaderKeyConfig::Create(key_id, kem_id, kdf_id, aead_id);
EXPECT_TRUE(ohttp_key_config.ok());
return std::move(ohttp_key_config.value());
}
TEST(ObliviousHttpGateway, TestProvisioningKeyAndDecapsulate) {
constexpr absl::string_view kX25519SecretKey =
"3c168975674b2fa8e465970b79c8dcf09f1c741626480bd4c6162fc5b6a98e1a";
std::string x25519_secret_key_bytes;
ASSERT_TRUE(
absl::HexStringToBytes(kX25519SecretKey, &x25519_secret_key_bytes));
auto instance = ObliviousHttpGateway::Create(
x25519_secret_key_bytes,
GetOhttpKeyConfig(
1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256, EVP_HPKE_HKDF_SHA256,
EVP_HPKE_AES_128_GCM));
constexpr absl::string_view kEncapsulatedRequest =
"010020000100014b28f881333e7c164ffc499ad9796f877f4e1051ee6d31bad19dec96c2"
"08b4726374e469135906992e1268c594d2a10c695d858c40a026e7965e7d86b83dd440b2"
"c0185204b4d63525";
std::string encapsulated_request_bytes;
ASSERT_TRUE(absl::HexStringToBytes(kEncapsulatedRequest,
&encapsulated_request_bytes));
auto decrypted_req =
instance->DecryptObliviousHttpRequest(encapsulated_request_bytes);
ASSERT_TRUE(decrypted_req.ok());
ASSERT_FALSE(decrypted_req->GetPlaintextData().empty());
}
TEST(ObliviousHttpGateway, TestDecryptingMultipleRequestsWithSingleInstance) {
auto instance = ObliviousHttpGateway::Create(
GetHpkePrivateKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM));
absl::string_view encrypted_req_1 =
"010020000100025f20b60306b61ad9ecad389acd752ca75c4e2969469809fe3d84aae137"
"f73e4ccfe9ba71f12831fdce6c8202fbd38a84c5d8a73ac4c8ea6c10592594845f";
std::string encrypted_req_1_bytes;
ASSERT_TRUE(absl::HexStringToBytes(encrypted_req_1, &encrypted_req_1_bytes));
auto decapsulated_req_1 =
instance->DecryptObliviousHttpRequest(encrypted_req_1_bytes);
ASSERT_TRUE(decapsulated_req_1.ok());
ASSERT_FALSE(decapsulated_req_1->GetPlaintextData().empty());
absl::string_view encrypted_req_2 =
"01002000010002285ebc2fcad72cc91b378050cac29a62feea9cd97829335ee9fc87e672"
"4fa13ff2efdff620423d54225d3099088e7b32a5165f805a5d922918865a0a447a";
std::string encrypted_req_2_bytes;
ASSERT_TRUE(absl::HexStringToBytes(encrypted_req_2, &encrypted_req_2_bytes));
auto decapsulated_req_2 =
instance->DecryptObliviousHttpRequest(encrypted_req_2_bytes);
ASSERT_TRUE(decapsulated_req_2.ok());
ASSERT_FALSE(decapsulated_req_2->GetPlaintextData().empty());
}
TEST(ObliviousHttpGateway, TestInvalidHPKEKey) {
EXPECT_EQ(ObliviousHttpGateway::Create(
"Invalid HPKE key",
GetOhttpKeyConfig(70, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM))
.status()
.code(),
absl::StatusCode::kInternal);
EXPECT_EQ(ObliviousHttpGateway::Create(
"",
GetOhttpKeyConfig(70, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM))
.status()
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST(ObliviousHttpGateway, TestObliviousResponseHandling) {
auto ohttp_key_config =
GetOhttpKeyConfig(3, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM);
auto instance =
ObliviousHttpGateway::Create(GetHpkePrivateKey(), ohttp_key_config);
ASSERT_TRUE(instance.ok());
auto encapsualte_request_on_client =
ObliviousHttpRequest::CreateClientObliviousRequest(
"test", GetHpkePublicKey(), ohttp_key_config);
ASSERT_TRUE(encapsualte_request_on_client.ok());
auto decapsulated_req_on_server = instance->DecryptObliviousHttpRequest(
encapsualte_request_on_client->EncapsulateAndSerialize());
ASSERT_TRUE(decapsulated_req_on_server.ok());
auto server_request_context =
std::move(decapsulated_req_on_server.value()).ReleaseContext();
auto encapsulate_resp_on_gateway = instance->CreateObliviousHttpResponse(
"some response", server_request_context);
ASSERT_TRUE(encapsulate_resp_on_gateway.ok());
ASSERT_FALSE(encapsulate_resp_on_gateway->EncapsulateAndSerialize().empty());
}
TEST(ObliviousHttpGateway,
TestHandlingMultipleResponsesForMultipleRequestsWithSingleInstance) {
auto instance = ObliviousHttpGateway::Create(
GetHpkePrivateKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM),
QuicheRandom::GetInstance());
std::string encrypted_request_1_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("010020000100025f20b60306b61ad9ecad389acd752ca75c4"
"e2969469809fe3d84aae137"
"f73e4ccfe9ba71f12831fdce6c8202fbd38a84c5d8a73ac4c"
"8ea6c10592594845f",
&encrypted_request_1_bytes));
auto decrypted_request_1 =
instance->DecryptObliviousHttpRequest(encrypted_request_1_bytes);
ASSERT_TRUE(decrypted_request_1.ok());
std::string encrypted_request_2_bytes;
ASSERT_TRUE(
absl::HexStringToBytes("01002000010002285ebc2fcad72cc91b378050cac29a62fee"
"a9cd97829335ee9fc87e672"
"4fa13ff2efdff620423d54225d3099088e7b32a5165f805a5"
"d922918865a0a447a",
&encrypted_request_2_bytes));
auto decrypted_request_2 =
instance->DecryptObliviousHttpRequest(encrypted_request_2_bytes);
ASSERT_TRUE(decrypted_request_2.ok());
auto oblivious_request_context_1 =
std::move(decrypted_request_1.value()).ReleaseContext();
auto encrypted_response_1 = instance->CreateObliviousHttpResponse(
"test response 1", oblivious_request_context_1);
ASSERT_TRUE(encrypted_response_1.ok());
ASSERT_FALSE(encrypted_response_1->EncapsulateAndSerialize().empty());
auto oblivious_request_context_2 =
std::move(decrypted_request_2.value()).ReleaseContext();
auto encrypted_response_2 = instance->CreateObliviousHttpResponse(
"test response 2", oblivious_request_context_2);
ASSERT_TRUE(encrypted_response_2.ok());
ASSERT_FALSE(encrypted_response_2->EncapsulateAndSerialize().empty());
}
TEST(ObliviousHttpGateway, TestWithMultipleThreads) {
class TestQuicheThread : public QuicheThread {
public:
TestQuicheThread(const ObliviousHttpGateway& gateway_receiver,
std::string request_payload, std::string response_payload)
: QuicheThread("gateway_thread"),
gateway_receiver_(gateway_receiver),
request_payload_(request_payload),
response_payload_(response_payload) {}
protected:
void Run() override {
auto decrypted_request =
gateway_receiver_.DecryptObliviousHttpRequest(request_payload_);
ASSERT_TRUE(decrypted_request.ok());
ASSERT_FALSE(decrypted_request->GetPlaintextData().empty());
auto gateway_request_context =
std::move(decrypted_request.value()).ReleaseContext();
auto encrypted_response = gateway_receiver_.CreateObliviousHttpResponse(
response_payload_, gateway_request_context);
ASSERT_TRUE(encrypted_response.ok());
ASSERT_FALSE(encrypted_response->EncapsulateAndSerialize().empty());
}
private:
const ObliviousHttpGateway& gateway_receiver_;
std::string request_payload_, response_payload_;
};
auto gateway_receiver = ObliviousHttpGateway::Create(
GetHpkePrivateKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM),
QuicheRandom::GetInstance());
std::string request_payload_1;
ASSERT_TRUE(
absl::HexStringToBytes("010020000100025f20b60306b61ad9ecad389acd752ca75c4"
"e2969469809fe3d84aae137"
"f73e4ccfe9ba71f12831fdce6c8202fbd38a84c5d8a73ac4c"
"8ea6c10592594845f",
&request_payload_1));
TestQuicheThread t1(*gateway_receiver, request_payload_1, "test response 1");
std::string request_payload_2;
ASSERT_TRUE(
absl::HexStringToBytes("01002000010002285ebc2fcad72cc91b378050cac29a62fee"
"a9cd97829335ee9fc87e672"
"4fa13ff2efdff620423d54225d3099088e7b32a5165f805a5"
"d922918865a0a447a",
&request_payload_2));
TestQuicheThread t2(*gateway_receiver, request_payload_2, "test response 2");
t1.Start();
t2.Start();
t1.Join();
t2.Join();
}
}
} |
187 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_LOWER_FUNCTION_CALL_OP_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_LOWER_FUNCTION_CALL_OP_H_
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class FunctionLibraryDefinition;
class Graph;
class Node;
Status RewriteFunctionCallNode(Node* n, Graph* g,
const FunctionLibraryDefinition& flib_def,
bool keep_caller_fetchable);
}
#endif
#include "tensorflow/core/common_runtime/lower_function_call_op.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/lower_function_call_inline_policy.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
namespace tensorflow {
using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode;
using OutputControlSrc = InlineFunctionBodyOptions::OutputControlSource;
Status RewriteFunctionCallNode(Node* n, Graph* g,
const FunctionLibraryDefinition& flib_def,
bool keep_caller_fetchable) {
VLOG(2) << "Lower function call node: " << SummarizeNode(*n);
InlineFunctionBodyOptions inline_options;
inline_options.keep_caller_node = keep_caller_fetchable
? KeepCallerNode::kFetchable
: KeepCallerNode::kTargetable;
FunctionCallInlinePolicy policy = GetFunctionCallInlinePolicy(n);
if (policy == FunctionCallInlinePolicy::kMultiDevicePlacer) {
inline_options.output_control_src = OutputControlSrc::kControlOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::MultiDevice();
} else if (policy == FunctionCallInlinePolicy::kSingleDevicePlacer) {
inline_options.output_control_src = OutputControlSrc::kDataOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::SingleDevice();
} else {
return errors::InvalidArgument("Unsupported function inlining policy");
}
core::RefCountPtr<FunctionRecord> fdef;
if (n->IsPartitionedCall()) {
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "f", &func));
fdef = flib_def.FindRecord(func.name());
} else if (n->type_string() == FunctionLibraryDefinition::kGradientOp) {
VLOG(2) << "Skip SymbolicGradient lowering";
return absl::OkStatus();
} else {
fdef = flib_def.FindRecord(n->type_string());
}
if (fdef == nullptr) {
return errors::Internal("Can't find a function: node=", SummarizeNode(*n));
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(std::move(fdef), n->attrs(), &flib_def, &fbody));
if (flags::Global().enable_function_pruning_before_inlining.value()) {
VLOG(2) << "Pruning enabled before inlining";
PruneFunctionBody(
fbody->record->fdef(), fbody->graph,
absl::Span<Node*>(fbody->arg_nodes.data(), fbody->arg_nodes.size()));
} else {
VLOG(2) << "Pruning disabled before inlining";
}
Status can_inline_function_call =
ValidateInlining(n, fbody.get(), inline_options);
if (can_inline_function_call.ok()) {
TF_RETURN_IF_ERROR(
InlineFunctionBody(flib_def, g, n, fbody.get(), inline_options));
} else {
VLOG(2) << "Failed to inline function call node: "
<< can_inline_function_call.message();
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncAttr(const string& name) {
AttrValue attr;
attr.mutable_func()->set_name(name);
return attr;
}
AttrValue FuncAttr(const string& name, const DataType type) {
AttrValue attr;
attr.mutable_func()->set_name(name);
(*attr.mutable_func()->mutable_attr())["T"].set_type(type);
return attr;
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerFunctionCallTest, InlineFunctionCall) {
using FDH = FunctionDefHelper;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) =
FDH::Create("AddAndMul", {"i: int32"}, {"o: int32"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_INT32}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_INT32}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* function_call;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(NodeBuilder("F", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("AddAndMul"))
.Finalize(root.graph(), &function_call));
TF_ASSERT_OK(root.DoShapeInference(function_call));
auto b = ops::Identity(root.WithOpName("B"), Output(function_call, 0));
root.graph()->AddControlEdge(function_call, b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
int add_count = 0;
int mul_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
if (op->type_string() == "Add") add_count++;
if (op->type_string() == "Mul") mul_count++;
}
ASSERT_EQ(partitioned_call_count, 0);
ASSERT_EQ(add_count, 1);
ASSERT_EQ(mul_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(b)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 100);
}
}
TEST(LowerFunctionCallTest, InlineFunctionCallAfterPruning) {
flags::Global().enable_function_pruning_before_inlining.reset(true);
using FDH = FunctionDefHelper;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = FDH::Create(
"AddAndMul", {"i: int32", "j: int32", "k: int32", "r: resource"},
{"o: int32"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_INT32}}},
{{"div"}, "FloorDiv", {"i", "i"}, {{"T", DT_INT32}}},
{{"gather"},
"ResourceGather",
{"r", "i"},
{{"Tindices", DT_INT32}, {"dtype", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_INT32}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x = ops::Placeholder(root.WithOpName("X"), DT_INT32);
auto y = ops::Placeholder(root.WithOpName("Y"), DT_INT32);
auto z = ops::Placeholder(root.WithOpName("Z"), DT_INT32);
auto r = ops::Placeholder(root.WithOpName("R"), DT_RESOURCE);
Node* function_call;
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(x.node()), NodeBuilder::NodeOut(y.node()),
NodeBuilder::NodeOut(z.node()), NodeBuilder::NodeOut(r.node())});
TF_ASSERT_OK(NodeBuilder("F", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("AddAndMul"))
.Finalize(root.graph(), &function_call));
TF_ASSERT_OK(root.DoShapeInference(function_call));
auto b = ops::Identity(root.WithOpName("B"), Output(function_call, 0));
root.graph()->AddControlEdge(function_call, b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
int add_count = 0;
int mul_count = 0;
int floor_div_count = 0;
int resource_gather_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
if (op->type_string() == "Add") add_count++;
if (op->type_string() == "Mul") mul_count++;
if (op->type_string() == "FloorDiv") floor_div_count++;
if (op->type_string() == "ResourceGather") resource_gather_count++;
}
ASSERT_EQ(partitioned_call_count, 0);
ASSERT_EQ(add_count, 1);
ASSERT_EQ(mul_count, 1);
ASSERT_EQ(floor_div_count, 0);
ASSERT_EQ(resource_gather_count, 0);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(x.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(b)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 100);
}
flags::Global().enable_function_pruning_before_inlining.reset(false);
}
TEST(LowerFunctionCallTest, DoNotInlineTpuOrXlaFunctions) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef tpu_func = test::function::XTimesTwo();
tpu_func.mutable_signature()->set_name("TpuXTimesTwo");
(*tpu_func.mutable_attr())["_tpu_replicate"].set_b(true);
FunctionDef xla_func = test::function::XTimesTwo();
xla_func.mutable_signature()->set_name("XlaXTimesTwo");
(*xla_func.mutable_attr())["_xla_compile_id"].set_s("cluster_0");
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* tpu_call;
TF_ASSERT_OK(NodeBuilder("B", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("XTimesTwo", DT_INT32))
.Attr("_tpu_replicate", "cluster")
.Finalize(root.graph(), &tpu_call));
Node* xla_call;
TF_ASSERT_OK(NodeBuilder("C", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("XTimesTwo", DT_INT32))
.Attr("_xla_compile_id", "cluster")
.Finalize(root.graph(), &xla_call));
TF_ASSERT_OK(root.DoShapeInference(tpu_call));
TF_ASSERT_OK(root.DoShapeInference(xla_call));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
}
ASSERT_EQ(partitioned_call_count, 2);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(
session.Run(feeds, {Output(tpu_call), Output(xla_call)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 2);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
EXPECT_EQ(out_tensors[1].scalar<int>()(), 20);
}
}
}
} |
188 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_SERVING_DEVICE_SELECTOR_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_GPU_GPU_SERVING_DEVICE_SELECTOR_H_
#include <cstdint>
#include <memory>
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/container/fixed_array.h"
#include "absl/container/node_hash_map.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/framework/resource_base.h"
namespace tensorflow {
namespace gpu {
class GpuServingDeviceSelector;
const char kGpuServingDeviceSelectorResourceName[] =
"gpu_serving_device_selector";
class GpuServingDeviceSelectorResource : public ResourceBase {
public:
explicit GpuServingDeviceSelectorResource(
int num_devices, std::unique_ptr<tsl::ServingDeviceSelector::Policy>
device_selector_policy)
: selector_(std::make_unique<GpuServingDeviceSelector>(
num_devices, std::move(device_selector_policy))) {}
std::string DebugString() const override {
return "GpuServingDeviceSelectorResource";
};
GpuServingDeviceSelector* selector() const { return selector_.get(); }
private:
std::unique_ptr<GpuServingDeviceSelector> selector_;
};
class GpuServingDeviceSelector : public tsl::ServingDeviceSelector {
public:
GpuServingDeviceSelector(
int num_devices,
std::unique_ptr<ServingDeviceSelector::Policy> device_selector_policy);
tsl::DeviceReservation ReserveDevice(
absl::string_view program_fingerprint) override;
void Enqueue(int32_t index_on_host, absl::string_view fingerprint);
void Completed(int32_t index_on_host, bool had_error = false);
private:
friend class ServingDeviceSelectorTestHelper;
static void OverwriteNowNsFunctionForTest(int64_t (*now_ns)());
void FreeDeviceReservation(
const tsl::DeviceReservation& reservation) override;
int64_t TotalEstimatedTimeTillIdleNs() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
absl::Mutex mu_;
absl::FixedArray<DeviceState, 8> device_states_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<ServingDeviceSelector::Policy> device_selector_policy_;
int64_t req_id_counter_ ABSL_GUARDED_BY(mu_);
absl::node_hash_map<std::string, ExecutionInfo> execution_info_
ABSL_GUARDED_BY(mu_);
std::optional<int64_t> min_exec_time_ ABSL_GUARDED_BY(mu_);
};
}
}
#endif
#include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/container/fixed_array.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h"
namespace tensorflow {
namespace gpu {
constexpr int64_t kDefaultEstimateNs = 1;
ABSL_CONST_INIT int64_t (*NowNs)() = +[]() -> int64_t {
return absl::GetCurrentTimeNanos();
};
using DeviceStates = GpuServingDeviceSelector::DeviceStates;
GpuServingDeviceSelector::GpuServingDeviceSelector(
const int num_devices,
std::unique_ptr<ServingDeviceSelector::Policy> device_selector_policy)
: device_states_(num_devices),
device_selector_policy_(std::move(device_selector_policy)),
req_id_counter_(0) {}
tsl::DeviceReservation GpuServingDeviceSelector::ReserveDevice(
absl::string_view program_fingerprint) {
absl::MutexLock lock(&mu_);
DeviceStates device_states;
device_states.states = absl::Span<const DeviceState>(device_states_);
auto [it, emplaced] =
execution_info_.try_emplace(program_fingerprint, ExecutionInfo());
const int device_index =
device_selector_policy_->SelectDevice(program_fingerprint, device_states);
ServingDeviceSelector::EnqueueHelper(
device_states_.at(device_index), device_index, it->second,
program_fingerprint, 0, req_id_counter_++,
1, 0, NowNs());
return tsl::DeviceReservation(device_index, this);
}
void GpuServingDeviceSelector::FreeDeviceReservation(
const tsl::DeviceReservation& reservation) {
Completed(reservation.device_index());
}
void GpuServingDeviceSelector::Enqueue(int32_t index_on_host,
absl::string_view fingerprint) {
if (fingerprint.empty()) {
LOG(ERROR) << "Empty fingerprint.";
return;
}
absl::MutexLock lock(&mu_);
auto [it, emplaced] =
execution_info_.try_emplace(fingerprint, ExecutionInfo());
DeviceState& device_state = device_states_.at(index_on_host);
ServingDeviceSelector::EnqueueHelper(device_state, index_on_host, it->second,
fingerprint,
0, -1,
1,
0, NowNs());
int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs();
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set(
total_estimated_time_ns);
}
void GpuServingDeviceSelector::Completed(int32_t index_on_host,
bool had_error) {
absl::MutexLock lock(&mu_);
DeviceState& device_state = device_states_.at(index_on_host);
ServingDeviceSelector::CompletedHelper(device_state, index_on_host, 0,
min_exec_time_, had_error, NowNs());
int64_t total_estimated_time_ns = TotalEstimatedTimeTillIdleNs();
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Set(
total_estimated_time_ns);
}
int64_t GpuServingDeviceSelector::TotalEstimatedTimeTillIdleNs() {
int64_t total_gpu_load_ns = 0;
for (const auto& device_state : device_states_) {
total_gpu_load_ns += ServingDeviceSelector::EstimateTimeTillIdleNs(
device_state, 0, min_exec_time_.value_or(kDefaultEstimateNs), NowNs());
}
return total_gpu_load_ns;
}
void GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(
int64_t (*now_ns)()) {
NowNs = now_ns;
}
}
} | #include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/serving_device_selector_policies.h"
#include "tensorflow/core/common_runtime/gpu/gpu_scheduling_metrics_storage.h"
namespace tensorflow {
namespace gpu {
class ServingDeviceSelectorTestHelper {
public:
ServingDeviceSelectorTestHelper() {
GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(NowNs);
now_ns_ = 0;
}
~ServingDeviceSelectorTestHelper() {
GpuServingDeviceSelector::OverwriteNowNsFunctionForTest(
absl::GetCurrentTimeNanos);
}
static void ElapseNs(int64_t ns) { now_ns_ += ns; }
static int64_t NowNs() { return now_ns_; }
private:
static int64_t now_ns_;
};
int64_t ServingDeviceSelectorTestHelper::now_ns_ = 0;
namespace {
TEST(GpuServingDeviceSelector, Basic) {
GpuServingDeviceSelector selector(2,
std::make_unique<tsl::RoundRobinPolicy>());
const std::string program_fingerprint = "TensorFlow";
tsl::DeviceReservation reservation =
selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 1);
reservation = selector.ReserveDevice(program_fingerprint);
EXPECT_EQ(reservation.device_index(), 0);
}
TEST(GpuServingDeviceSelector, DefaultPolicyOnlyEnqueueCall) {
ServingDeviceSelectorTestHelper helper;
auto policy = std::make_unique<tsl::RoundRobinPolicy>();
auto serving_device_selector =
std::make_unique<tensorflow::gpu::GpuServingDeviceSelector>(
4, std::move(policy));
serving_device_selector->Enqueue(3, "16ms");
serving_device_selector->Enqueue(2, "8ms");
serving_device_selector->Enqueue(1, "4ms");
serving_device_selector->Enqueue(0, "2ms");
serving_device_selector->Enqueue(3, "16ms");
serving_device_selector->Enqueue(2, "8ms");
serving_device_selector->Enqueue(1, "4ms");
serving_device_selector->Enqueue(0, "2ms");
helper.ElapseNs(2e6);
serving_device_selector->Completed(0);
helper.ElapseNs(2e6);
serving_device_selector->Completed(0);
serving_device_selector->Completed(1);
helper.ElapseNs(4e6);
serving_device_selector->Completed(1);
serving_device_selector->Completed(2);
helper.ElapseNs(8e6);
serving_device_selector->Completed(2);
serving_device_selector->Completed(3);
helper.ElapseNs(16e6);
serving_device_selector->Completed(3);
serving_device_selector->Enqueue(3, "16ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
16e6);
serving_device_selector->Enqueue(2, "8ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
24e6);
serving_device_selector->Enqueue(1, "4ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
28e6);
serving_device_selector->Enqueue(0, "2ms");
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
30e6);
helper.ElapseNs(2e6);
serving_device_selector->Completed(0);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
22e6);
helper.ElapseNs(2e6);
serving_device_selector->Completed(1);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
16e6);
helper.ElapseNs(4e6);
serving_device_selector->Completed(2);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
8e6);
helper.ElapseNs(8e6);
serving_device_selector->Completed(3);
EXPECT_EQ(
GpuSchedulingMetricsStorage::GetGlobalStorage().TotalGpuLoadNs().Get(),
0e6);
}
}
}
} |
189 | #ifndef TENSORFLOW_TSL_PLATFORM_NET_H_
#define TENSORFLOW_TSL_PLATFORM_NET_H_
namespace tsl {
namespace internal {
int PickUnusedPortOrDie();
}
}
#endif
#include "tsl/platform/net.h"
#include <netinet/in.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <unistd.h>
#include <cerrno>
#include <cstdlib>
#include <cstring>
#include <random>
#include <unordered_set>
#include "tsl/platform/logging.h"
#include "tsl/platform/strcat.h"
#define MAX_EPHEMERAL_PORT 60999
#define MIN_EPHEMERAL_PORT 32768
namespace tsl {
namespace internal {
namespace {
bool IsPortAvailable(int* port, bool is_tcp) {
const int protocol = is_tcp ? IPPROTO_TCP : 0;
const int fd = socket(AF_INET, is_tcp ? SOCK_STREAM : SOCK_DGRAM, protocol);
struct sockaddr_in addr;
socklen_t addr_len = sizeof(addr);
int actual_port;
CHECK_GE(*port, 0);
CHECK_LE(*port, MAX_EPHEMERAL_PORT);
if (fd < 0) {
LOG(ERROR) << "socket() failed: " << strerror(errno);
return false;
}
int one = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0) {
LOG(ERROR) << "setsockopt() failed: " << strerror(errno);
if (close(fd) < 0) {
LOG(ERROR) << "close() failed: " << strerror(errno);
};
return false;
}
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = INADDR_ANY;
addr.sin_port = htons(static_cast<uint16_t>(*port));
if (bind(fd, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)) < 0) {
LOG(WARNING) << "bind(port=" << *port << ") failed: " << strerror(errno);
if (close(fd) < 0) {
LOG(ERROR) << "close() failed: " << strerror(errno);
};
return false;
}
if (getsockname(fd, reinterpret_cast<struct sockaddr*>(&addr), &addr_len) <
0) {
LOG(WARNING) << "getsockname() failed: " << strerror(errno);
if (close(fd) < 0) {
LOG(ERROR) << "close() failed: " << strerror(errno);
};
return false;
}
CHECK_LE(addr_len, sizeof(addr));
actual_port = ntohs(addr.sin_port);
CHECK_GT(actual_port, 0);
if (*port == 0) {
*port = actual_port;
} else {
CHECK_EQ(*port, actual_port);
}
if (close(fd) < 0) {
LOG(ERROR) << "close() failed: " << strerror(errno);
};
return true;
}
const int kNumRandomPortsToPick = 100;
const int kMaximumTrials = 1000;
}
int PickUnusedPortOrDie() {
static std::unordered_set<int> chosen_ports;
bool is_tcp = true;
int trial = 0;
std::default_random_engine rgen(std::random_device{}());
std::uniform_int_distribution<int> rdist(MIN_EPHEMERAL_PORT,
MAX_EPHEMERAL_PORT - 1);
while (true) {
int port;
trial++;
CHECK_LE(trial, kMaximumTrials)
<< "Failed to pick an unused port for testing.";
if (trial == 1) {
port = getpid() % (MAX_EPHEMERAL_PORT - MIN_EPHEMERAL_PORT) +
MIN_EPHEMERAL_PORT;
} else if (trial <= kNumRandomPortsToPick) {
port = rdist(rgen);
} else {
port = 0;
}
if (chosen_ports.find(port) != chosen_ports.end()) {
continue;
}
if (!IsPortAvailable(&port, is_tcp)) {
continue;
}
CHECK_GT(port, 0);
if (!IsPortAvailable(&port, !is_tcp)) {
is_tcp = !is_tcp;
continue;
}
chosen_ports.insert(port);
return port;
}
return 0;
}
}
} | #include "tsl/platform/net.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace internal {
TEST(Net, PickUnusedPortOrDie) {
int port0 = PickUnusedPortOrDie();
int port1 = PickUnusedPortOrDie();
CHECK_GE(port0, 0);
CHECK_LT(port0, 65536);
CHECK_GE(port1, 0);
CHECK_LT(port1, 65536);
CHECK_NE(port0, port1);
}
}
} |
190 | #ifndef AROLLA_UTIL_FINGERPRINT_H_
#define AROLLA_UTIL_FINGERPRINT_H_
#include <cstddef>
#include <cstdint>
#include <iosfwd>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/util/meta.h"
#include "arolla/util/struct_field.h"
#include "arolla/util/types.h"
namespace arolla {
struct Fingerprint {
absl::uint128 value;
std::string AsString() const;
signed_size_t PythonHash() const;
};
Fingerprint RandomFingerprint();
class FingerprintHasher {
public:
explicit FingerprintHasher(absl::string_view salt);
Fingerprint Finish() &&;
template <typename... Args>
FingerprintHasher& Combine(const Args&... args) &;
template <typename... Args>
FingerprintHasher&& Combine(const Args&... args) &&;
template <typename SpanT>
FingerprintHasher& CombineSpan(SpanT&& values) &;
template <typename SpanT>
FingerprintHasher&& CombineSpan(SpanT&& values) &&;
void CombineRawBytes(const void* data, size_t size);
private:
std::pair<uint64_t, uint64_t> state_;
};
namespace fingerprint_impl {
template <typename T, class = void>
struct HasArollaFingerprintMethod : std::false_type {};
template <class T>
struct HasArollaFingerprintMethod<
T, std::void_t<decltype(static_cast<void (T::*)(FingerprintHasher*) const>(
&T::ArollaFingerprint))>> : std::true_type {};
}
template <typename T>
struct FingerprintHasherTraits {
FingerprintHasherTraits() = delete;
};
inline bool operator==(const Fingerprint& lhs, const Fingerprint& rhs) {
return lhs.value == rhs.value;
}
inline bool operator!=(const Fingerprint& lhs, const Fingerprint& rhs) {
return !(lhs == rhs);
}
inline bool operator<(const Fingerprint& lhs, const Fingerprint& rhs) {
return lhs.value < rhs.value;
}
std::ostream& operator<<(std::ostream& ostream, const Fingerprint& fingerprint);
template <typename H>
H AbslHashValue(H state, const Fingerprint& fingerprint) {
return H::combine(std::move(state), fingerprint.value);
}
template <typename... Args>
FingerprintHasher& FingerprintHasher::Combine(const Args&... args) & {
auto combine = [this](const auto& arg) {
using Arg = std::decay_t<decltype(arg)>;
if constexpr (fingerprint_impl::HasArollaFingerprintMethod<Arg>::value) {
arg.ArollaFingerprint(this);
} else if constexpr (std::is_default_constructible_v<
FingerprintHasherTraits<Arg>>) {
FingerprintHasherTraits<Arg>()(this, arg);
} else if constexpr (std::is_arithmetic_v<Arg> || std::is_enum_v<Arg>) {
CombineRawBytes(&arg, sizeof(arg));
} else {
static_assert(sizeof(Arg) == 0,
"Please, define `void "
"T::ArollaFingerprint(FingerprintHasher* hasher) const` "
"or specialise FingerprintHasherTraits for your type.");
}
};
(combine(args), ...);
return *this;
}
template <typename... Args>
FingerprintHasher&& FingerprintHasher::Combine(const Args&... args) && {
Combine(args...);
return std::move(*this);
}
template <typename SpanT>
FingerprintHasher& FingerprintHasher::CombineSpan(SpanT&& values) & {
const auto span = absl::MakeConstSpan(values);
using T = typename decltype(span)::value_type;
Combine(values.size());
if constexpr (std::is_default_constructible_v<FingerprintHasherTraits<T>>) {
constexpr FingerprintHasherTraits<T> traits;
for (const auto& x : values) {
traits(this, x);
}
} else if constexpr (std::is_arithmetic_v<T> || std::is_enum_v<T>) {
CombineRawBytes(values.data(), values.size() * sizeof(values[0]));
} else {
static_assert(sizeof(T) == 0,
"Please specialise FingerprintHasherTraits for your type.");
}
return *this;
}
template <typename SpanT>
FingerprintHasher&& FingerprintHasher::CombineSpan(SpanT&& values) && {
CombineSpan(std::forward<SpanT>(values));
return std::move(*this);
}
template <>
struct FingerprintHasherTraits<Fingerprint> {
void operator()(FingerprintHasher* hasher, const Fingerprint& value) const {
hasher->CombineRawBytes(&value.value, sizeof(value.value));
}
};
template <>
struct FingerprintHasherTraits<std::string> {
void operator()(FingerprintHasher* hasher, const std::string& value) const {
hasher->Combine(value.size()).CombineRawBytes(value.data(), value.size());
}
};
template <>
struct FingerprintHasherTraits<absl::string_view> {
void operator()(FingerprintHasher* hasher, absl::string_view value) const {
hasher->Combine(value.size()).CombineRawBytes(value.data(), value.size());
}
};
template <class Struct>
void CombineStructFields(FingerprintHasher* hasher, const Struct& value) {
static_assert(HasStructFields<Struct>(), "no struct fields found");
meta::foreach_tuple_element(
GetStructFields<Struct>(), [&](const auto& struct_field) {
hasher->Combine(*UnsafeGetStructFieldPtr(struct_field, &value));
});
}
template <typename T>
struct FingerprintHasherTraits<T*> {
static_assert(sizeof(T*) == 0,
"Pointer values are runtime specific and not fingerprintable.");
};
template <typename T>
struct FingerprintHasherTraits<std::unique_ptr<T>> {
static_assert(
sizeof(std::unique_ptr<T>) == 0,
"Unique pointer values are runtime specific and not fingerprintable.");
};
template <typename T>
struct FingerprintHasherTraits<std::shared_ptr<T>> {
static_assert(
sizeof(std::shared_ptr<T>) == 0,
"Shared pointer values are runtime specific and not fingerprintable.");
};
#define AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(CPP_TYPE) \
template <> \
struct FingerprintHasherTraits<CPP_TYPE> { \
void operator()(FingerprintHasher* hasher, const CPP_TYPE& value) const; \
}
}
#endif
#include "arolla/util/fingerprint.h"
#include <cstddef>
#include <cstdint>
#include <ostream>
#include <string>
#include "absl/hash/hash.h"
#include "absl/numeric/int128.h"
#include "absl/random/random.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "cityhash/city.h"
#include "arolla/util/types.h"
namespace arolla {
namespace {
uint32_t RuntimeSeed() {
static uint32_t result = absl::Hash<int>{}(501816262);
return result;
}
}
std::string Fingerprint::AsString() const {
return absl::StrFormat("%032x", value);
}
signed_size_t Fingerprint::PythonHash() const {
return absl::Hash<Fingerprint>()(*this);
}
std::ostream& operator<<(std::ostream& ostream,
const Fingerprint& fingerprint) {
return ostream << absl::StreamFormat("%032x", fingerprint.value);
}
Fingerprint RandomFingerprint() {
absl::BitGen bitgen;
return Fingerprint{absl::MakeUint128(absl::Uniform<uint64_t>(bitgen),
absl::Uniform<uint64_t>(bitgen))};
}
FingerprintHasher::FingerprintHasher(absl::string_view salt)
: state_{3102879407, 2758948377}
{
Combine(RuntimeSeed(), salt);
}
Fingerprint FingerprintHasher::Finish() && {
return Fingerprint{absl::MakeUint128(state_.second, state_.first)};
}
void FingerprintHasher::CombineRawBytes(const void* data, size_t size) {
state_ = cityhash::CityHash128WithSeed(
static_cast<const char*>(data), size, state_);
}
} | #include "arolla/util/fingerprint.h"
#include <limits>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "arolla/util/struct_field.h"
namespace arolla {
namespace {
static_assert(
std::is_trivially_constructible_v<Fingerprint>,
"Make sure that fingerprint is trivially constructed, so that adding it to "
"a struct does not slow down the struct's initialization time.");
struct A {};
static_assert(!std::is_default_constructible_v<FingerprintHasherTraits<A>>);
struct AWithFingerPrintMethod {
void ArollaFingerprint(FingerprintHasher* hasher) const {
hasher->Combine(19);
}
};
struct AWithStructFields {
int a;
double b;
constexpr static auto ArollaStructFields() {
using CppType = AWithStructFields;
return std::tuple{
AROLLA_DECLARE_STRUCT_FIELD(a),
AROLLA_DECLARE_STRUCT_FIELD(b),
};
}
void ArollaFingerprint(FingerprintHasher* hasher) const {
CombineStructFields(hasher, *this);
}
};
template <typename... Ts>
Fingerprint MakeDummyFingerprint(const Ts&... values) {
return FingerprintHasher("dummy-salt").Combine(values...).Finish();
}
TEST(FingerprintTest, Empty) {
Fingerprint fgpt{};
EXPECT_EQ(fgpt.AsString(), "00000000000000000000000000000000");
}
TEST(FingerprintTest, RandomFingerprint) {
constexpr int N = 1024;
absl::flat_hash_set<Fingerprint> set;
set.reserve(N);
for (int i = 0; i < N; ++i) {
set.insert(RandomFingerprint());
}
EXPECT_EQ(set.size(), N);
}
TEST(FingerprintTest, AWithFingerPrintMethod) {
EXPECT_EQ(MakeDummyFingerprint(AWithFingerPrintMethod()),
MakeDummyFingerprint(19));
}
TEST(FingerprintTest, AWithStructFields) {
EXPECT_EQ(MakeDummyFingerprint(AWithStructFields{.a = 5, .b = 7.}),
MakeDummyFingerprint(5, 7.));
}
TEST(FingerprintTest, TestPrimitives) {
EXPECT_NE(MakeDummyFingerprint(5), MakeDummyFingerprint(6));
EXPECT_NE(MakeDummyFingerprint<std::string>("5"),
MakeDummyFingerprint<std::string>("6"));
}
TEST(FingerprintTest, FloatingPointZero) {
EXPECT_NE(MakeDummyFingerprint(0.0).PythonHash(),
MakeDummyFingerprint(-0.0).PythonHash());
EXPECT_NE(MakeDummyFingerprint(0.f).PythonHash(),
MakeDummyFingerprint(-0.f).PythonHash());
}
TEST(FingerprintTest, FloatingPointNAN) {
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<float>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<float>::quiet_NaN())
.PythonHash());
EXPECT_NE(MakeDummyFingerprint(std::numeric_limits<double>::quiet_NaN())
.PythonHash(),
MakeDummyFingerprint(-std::numeric_limits<double>::quiet_NaN())
.PythonHash());
}
TEST(FingerprintTest, PythonHash) {
EXPECT_EQ(MakeDummyFingerprint(4).PythonHash(),
MakeDummyFingerprint(4).PythonHash());
EXPECT_NE(MakeDummyFingerprint(5).PythonHash(),
MakeDummyFingerprint(6).PythonHash());
}
TEST(FingerprintTest, Less) {
EXPECT_LT(Fingerprint{27}, Fingerprint{37});
EXPECT_FALSE(Fingerprint{27} < Fingerprint{27});
}
TEST(FingerprintTest, CombineRawBytes) {
{
FingerprintHasher h1("dummy-salt");
FingerprintHasher h2("dummy-salt");
h1.CombineRawBytes("foobar", 6);
h2.CombineRawBytes("foobar", 6);
EXPECT_EQ(std::move(h1).Finish(), std::move(h2).Finish());
}
{
FingerprintHasher h1("dummy-salt");
FingerprintHasher h2("dummy-salt");
h1.CombineRawBytes("foobar", 6);
h2.CombineRawBytes("barfoo", 6);
EXPECT_NE(std::move(h1).Finish(), std::move(h2).Finish());
}
}
class Circle {
public:
Circle(int x, int y, int r) : center_(x, y), radius_(r) {
FingerprintHasher hasher("arolla::TestCircle");
hasher.Combine(center_.first, center_.second, radius_);
fingerprint_ = std::move(hasher).Finish();
}
const Fingerprint& fingerprint() { return fingerprint_; }
private:
std::pair<int, int> center_;
int radius_;
Fingerprint fingerprint_;
};
TEST(FingerprintTest, UserDefined) {
EXPECT_NE(Circle(0, 0, 1).fingerprint(), Circle(0, 0, 2).fingerprint());
EXPECT_NE(Circle(1, 1, 1).fingerprint(), Circle(0, 0, 1).fingerprint());
}
TEST(FingerprintTest, HasArollaFingerprintMethodRegression) {
struct OverloadedType {
int ArollaFingerprint() const { return 0; }
void ArollaFingerprint(FingerprintHasher*) const {}
};
EXPECT_TRUE(
fingerprint_impl::HasArollaFingerprintMethod<OverloadedType>::value);
struct WrongType {
int ArollaFingerprint() const { return 0; }
};
EXPECT_FALSE(fingerprint_impl::HasArollaFingerprintMethod<WrongType>::value);
}
}
} |
191 | #ifndef TENSORFLOW_JAVA_SRC_GEN_CC_SOURCE_WRITER_H_
#define TENSORFLOW_JAVA_SRC_GEN_CC_SOURCE_WRITER_H_
#include <string>
#include <stack>
#include <list>
#include <set>
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
namespace tensorflow {
namespace java {
class SourceWriter {
public:
SourceWriter();
virtual ~SourceWriter();
SourceWriter& Indent(int tab);
SourceWriter& Prefix(const char* line_prefix);
SourceWriter& Write(const StringPiece& str);
SourceWriter& WriteFromFile(const string& fname, Env* env = Env::Default());
SourceWriter& Append(const StringPiece& str);
SourceWriter& AppendType(const Type& type);
SourceWriter& EndLine();
SourceWriter& BeginBlock(const string& expression = "");
SourceWriter& EndBlock();
SourceWriter& BeginMethod(const Method& method, int modifiers,
const Javadoc* javadoc = nullptr);
SourceWriter& EndMethod();
SourceWriter& BeginType(const Type& type, int modifiers,
const std::list<Type>* extra_dependencies = nullptr,
const Javadoc* javadoc = nullptr);
SourceWriter& BeginInnerType(const Type& type, int modifiers,
const Javadoc* javadoc = nullptr);
SourceWriter& EndType();
SourceWriter& WriteField(const Variable& field, int modifiers,
const Javadoc* javadoc = nullptr);
protected:
virtual void DoAppend(const StringPiece& str) = 0;
private:
class TypeVisitor {
public:
virtual ~TypeVisitor() = default;
void Visit(const Type& type);
protected:
virtual void DoVisit(const Type& type) = 0;
};
class GenericNamespace : public TypeVisitor {
public:
GenericNamespace() = default;
explicit GenericNamespace(const GenericNamespace* parent)
: generic_names_(parent->generic_names_) {}
std::list<const Type*> declared_types() {
return declared_types_;
}
protected:
virtual void DoVisit(const Type& type);
private:
std::list<const Type*> declared_types_;
std::set<string> generic_names_;
};
class TypeImporter : public TypeVisitor {
public:
explicit TypeImporter(const string& current_package)
: current_package_(current_package) {}
virtual ~TypeImporter() = default;
const std::set<string> imports() {
return imports_;
}
protected:
virtual void DoVisit(const Type& type);
private:
string current_package_;
std::set<string> imports_;
};
string left_margin_;
string line_prefix_;
bool newline_ = true;
std::stack<GenericNamespace*> generic_namespaces_;
SourceWriter& WriteModifiers(int modifiers);
SourceWriter& WriteJavadoc(const Javadoc& javadoc);
SourceWriter& WriteAnnotations(const std::list<Annotation>& annotations);
SourceWriter& WriteGenerics(const std::list<const Type*>& generics);
GenericNamespace* PushGenericNamespace(int modifiers);
void PopGenericNamespace();
};
class SourceFileWriter : public SourceWriter {
public:
explicit SourceFileWriter(WritableFile* file) : file_(file) {}
virtual ~SourceFileWriter() = default;
protected:
void DoAppend(const StringPiece& str) override {
TF_CHECK_OK(file_->Append(str));
}
private:
WritableFile* file_;
};
class SourceBufferWriter : public SourceWriter {
public:
SourceBufferWriter() : owns_buffer_(true), buffer_(new string()) {}
explicit SourceBufferWriter(string* buffer)
: owns_buffer_(false), buffer_(buffer) {}
virtual ~SourceBufferWriter() {
if (owns_buffer_) delete buffer_;
}
const string& str() { return *buffer_; }
protected:
void DoAppend(const StringPiece& str) override {
buffer_->append(str.begin(), str.end());
}
private:
bool owns_buffer_;
string* buffer_;
};
}
}
#endif
#include <string>
#include <algorithm>
#include <list>
#include "tensorflow/java/src/gen/cc/source_writer.h"
namespace tensorflow {
namespace java {
SourceWriter::SourceWriter() {
generic_namespaces_.push(new GenericNamespace());
}
SourceWriter::~SourceWriter() {
while (!generic_namespaces_.empty()) {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
}
SourceWriter& SourceWriter::Indent(int tab) {
left_margin_.resize(
std::max(static_cast<int>(left_margin_.size() + tab), 0), ' ');
return *this;
}
SourceWriter& SourceWriter::Prefix(const char* line_prefix) {
line_prefix_ = line_prefix;
return *this;
}
SourceWriter& SourceWriter::Write(const StringPiece& str) {
size_t line_pos = 0;
do {
size_t start_pos = line_pos;
line_pos = str.find('\n', start_pos);
if (line_pos != string::npos) {
++line_pos;
Append(str.substr(start_pos, line_pos - start_pos));
newline_ = true;
} else {
Append(str.substr(start_pos, str.size() - start_pos));
}
} while (line_pos != string::npos && line_pos < str.size());
return *this;
}
SourceWriter& SourceWriter::WriteFromFile(const string& fname, Env* env) {
string data_;
TF_CHECK_OK(ReadFileToString(env, fname, &data_));
return Write(data_);
}
SourceWriter& SourceWriter::Append(const StringPiece& str) {
if (!str.empty()) {
if (newline_) {
DoAppend(left_margin_ + line_prefix_);
newline_ = false;
}
DoAppend(str);
}
return *this;
}
SourceWriter& SourceWriter::AppendType(const Type& type) {
if (type.wildcard()) {
Append("?");
} else {
Append(type.name());
if (!type.parameters().empty()) {
Append("<");
bool first = true;
for (const Type& t : type.parameters()) {
if (!first) {
Append(", ");
}
AppendType(t);
first = false;
}
Append(">");
}
}
return *this;
}
SourceWriter& SourceWriter::EndLine() {
Append("\n");
newline_ = true;
return *this;
}
SourceWriter& SourceWriter::BeginBlock(const string& expression) {
if (!expression.empty()) {
Append(expression + " {");
} else {
Append(newline_ ? "{" : " {");
}
return EndLine().Indent(2);
}
SourceWriter& SourceWriter::EndBlock() {
return Indent(-2).Append("}").EndLine();
}
SourceWriter& SourceWriter::BeginMethod(const Method& method, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
if (!method.constructor()) {
generic_namespace->Visit(method.return_type());
}
for (const Variable& v : method.arguments()) {
generic_namespace->Visit(v.type());
}
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!method.annotations().empty()) {
WriteAnnotations(method.annotations());
}
WriteModifiers(modifiers);
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
Append(" ");
}
if (!method.constructor()) {
AppendType(method.return_type()).Append(" ");
}
Append(method.name()).Append("(");
bool first = true;
for (const Variable& v : method.arguments()) {
if (!first) {
Append(", ");
}
AppendType(v.type()).Append(v.variadic() ? "... " : " ").Append(v.name());
first = false;
}
return Append(")").BeginBlock();
}
SourceWriter& SourceWriter::EndMethod() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::BeginType(const Type& type, int modifiers,
const std::list<Type>* extra_dependencies,
const Javadoc* javadoc) {
if (!type.package().empty()) {
Append("package ").Append(type.package()).Append(";").EndLine();
}
TypeImporter type_importer(type.package());
type_importer.Visit(type);
if (extra_dependencies != nullptr) {
for (const Type& t : *extra_dependencies) {
type_importer.Visit(t);
}
}
if (!type_importer.imports().empty()) {
EndLine();
for (const string& s : type_importer.imports()) {
Append("import ").Append(s).Append(";").EndLine();
}
}
return BeginInnerType(type, modifiers, javadoc);
}
SourceWriter& SourceWriter::BeginInnerType(const Type& type, int modifiers,
const Javadoc* javadoc) {
GenericNamespace* generic_namespace = PushGenericNamespace(modifiers);
generic_namespace->Visit(type);
EndLine();
if (javadoc != nullptr) {
WriteJavadoc(*javadoc);
}
if (!type.annotations().empty()) {
WriteAnnotations(type.annotations());
}
WriteModifiers(modifiers);
CHECK_EQ(Type::Kind::CLASS, type.kind()) << ": Not supported yet";
Append("class ").Append(type.name());
if (!generic_namespace->declared_types().empty()) {
WriteGenerics(generic_namespace->declared_types());
}
if (!type.supertypes().empty()) {
bool first_interface = true;
for (const Type& t : type.supertypes()) {
if (t.kind() == Type::CLASS) {
Append(" extends ");
} else if (first_interface) {
Append(" implements ");
first_interface = false;
} else {
Append(", ");
}
AppendType(t);
}
}
return BeginBlock();
}
SourceWriter& SourceWriter::EndType() {
EndBlock();
PopGenericNamespace();
return *this;
}
SourceWriter& SourceWriter::WriteField(const Variable& field, int modifiers,
const Javadoc* javadoc) {
if (javadoc != nullptr && !javadoc->brief().empty()) {
Append("").EndLine();
}
WriteModifiers(modifiers);
AppendType(field.type()).Append(" ").Append(field.name()).Append(";");
EndLine();
return *this;
}
SourceWriter& SourceWriter::WriteModifiers(int modifiers) {
if (modifiers & PUBLIC) {
Append("public ");
} else if (modifiers & PROTECTED) {
Append("protected ");
} else if (modifiers & PRIVATE) {
Append("private ");
}
if (modifiers & STATIC) {
Append("static ");
}
if (modifiers & FINAL) {
Append("final ");
}
return *this;
}
SourceWriter& SourceWriter::WriteJavadoc(const Javadoc& javadoc) {
Append("").EndLine();
}
SourceWriter& SourceWriter::WriteAnnotations(
const std::list<Annotation>& annotations) {
for (const Annotation& a : annotations) {
Append("@" + a.name());
if (!a.attributes().empty()) {
Append("(").Append(a.attributes()).Append(")");
}
EndLine();
}
return *this;
}
SourceWriter& SourceWriter::WriteGenerics(
const std::list<const Type*>& generics) {
Append("<");
bool first = true;
for (const Type* pt : generics) {
if (!first) {
Append(", ");
}
Append(pt->name());
if (!pt->supertypes().empty()) {
Append(" extends ").AppendType(pt->supertypes().front());
}
first = false;
}
return Append(">");
}
SourceWriter::GenericNamespace* SourceWriter::PushGenericNamespace(
int modifiers) {
GenericNamespace* generic_namespace;
if (modifiers & STATIC) {
generic_namespace = new GenericNamespace();
} else {
generic_namespace = new GenericNamespace(generic_namespaces_.top());
}
generic_namespaces_.push(generic_namespace);
return generic_namespace;
}
void SourceWriter::PopGenericNamespace() {
GenericNamespace* generic_namespace = generic_namespaces_.top();
generic_namespaces_.pop();
delete generic_namespace;
}
void SourceWriter::TypeVisitor::Visit(const Type& type) {
DoVisit(type);
for (const Type& t : type.parameters()) {
Visit(t);
}
for (const Annotation& t : type.annotations()) {
DoVisit(t);
}
for (const Type& t : type.supertypes()) {
Visit(t);
}
}
void SourceWriter::GenericNamespace::DoVisit(const Type& type) {
if (type.kind() == Type::GENERIC && !type.wildcard() &&
generic_names_.find(type.name()) == generic_names_.end()) {
declared_types_.push_back(&type);
generic_names_.insert(type.name());
}
}
void SourceWriter::TypeImporter::DoVisit(const Type& type) {
if (!type.package().empty() && type.package() != current_package_) {
imports_.insert(type.canonical_name());
}
}
}
} | #include <list>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/java/src/gen/cc/java_defs.h"
#include "tensorflow/java/src/gen/cc/source_writer.h"
namespace tensorflow {
namespace java {
namespace {
TEST(AppendTest, SingleLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineText) {
SourceBufferWriter writer;
writer.Append("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Append("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(AppendTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Append("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, SingleLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye and I say hello!");
const char* expected = "You say goodbye and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineText) {
SourceBufferWriter writer;
writer.Write("You say goodbye\nand I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndent) {
SourceBufferWriter writer;
writer.Indent(2).Write("You say goodbye\nand I say hello!");
const char* expected = " You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithPrefix) {
SourceBufferWriter writer;
writer.Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = "--You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteTest, MultiLineTextWithIndentAndPrefix) {
SourceBufferWriter writer;
writer.Indent(2).Prefix("--").Write("You say goodbye\nand I say hello!");
const char* expected = " --You say goodbye\n --and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Basic) {
SourceBufferWriter writer;
writer.Append("You say goodbye").EndLine().Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Indent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!");
const char* expected = "You say goodbye\n and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndOutdent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Append("Hello, hello!");
const char* expected = "You say goodbye\n and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, Prefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!");
const char* expected = "You say goodbye\n--and I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, PrefixAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n--and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, IndentAndPrefixAndOutdentAndRemovePrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Prefix("--")
.Append("and I say hello!")
.EndLine()
.Indent(-2)
.Prefix("")
.Append("Hello, hello!");
const char* expected = "You say goodbye\n --and I say hello!\nHello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, NegativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(-10)
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, CumulativeIndent) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Indent(2)
.Append("and I say hello!")
.EndLine()
.Indent(2)
.Append("Hello, hello!");
const char* expected =
"You say goodbye\n and I say hello!\n Hello, hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(MarginTest, EmptyPrefix) {
SourceBufferWriter writer;
writer.Append("You say goodbye")
.EndLine()
.Prefix("")
.Append("and I say hello!");
const char* expected = "You say goodbye\nand I say hello!";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, BlocksAndLines) {
SourceBufferWriter writer;
writer.Append("int i = 0;").EndLine()
.Append("int j = 10;").EndLine()
.Append("if (true)")
.BeginBlock()
.Append("int aLongWayToTen = 0;").EndLine()
.Append("while (++i <= j)")
.BeginBlock()
.Append("++aLongWayToTen;").EndLine()
.EndBlock()
.EndBlock();
const char* expected =
"int i = 0;\n"
"int j = 10;\n"
"if (true) {\n"
" int aLongWayToTen = 0;\n"
" while (++i <= j) {\n"
" ++aLongWayToTen;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, Types) {
SourceBufferWriter writer;
Type generic = Type::Generic("T").add_supertype(Type::Class("Number"));
writer.AppendType(Type::Int())
.Append(", ")
.AppendType(Type::Class("String"))
.Append(", ")
.AppendType(generic)
.Append(", ")
.AppendType(Type::ListOf(generic))
.Append(", ")
.AppendType(Type::ListOf(Type::IterableOf(generic)))
.Append(", ")
.AppendType(Type::ListOf(Type::Wildcard()));
const char* expected =
"int, String, T, List<T>, List<Iterable<T>>, List<?>";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(StreamTest, FileSnippet) {
SourceBufferWriter writer;
const string fname = tensorflow::io::JoinPath(
tensorflow::testing::TensorFlowSrcRoot(),
"java/src/gen/resources/test.java.snippet");
writer.WriteFromFile(fname)
.BeginBlock()
.WriteFromFile(fname)
.EndBlock();
const char* expected =
"
"System.out.println(\"Hello!\");\n"
"{\n"
"
" System.out.println(\"Hello!\");\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleClassWithDependencies) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
std::list<Type> deps;
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeA", "org.test.sub"));
deps.push_back(Type::Class("TypeB", "org.other"));
deps.push_back(Type::Class("SamePackageType", "org.tensorflow"));
deps.push_back(Type::Class("NoPackageType"));
writer.BeginType(clazz, PUBLIC, &deps).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"import org.other.TypeB;\n"
"import org.test.sub.TypeA;\n\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, AnnotatedAndDocumentedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Javadoc clazz_doc = Javadoc::Create("Javadoc test")
.details("This is a\nmultiline description.");
clazz.add_annotation(Annotation::Create("Bean"));
clazz.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC, nullptr, &clazz_doc).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"\n"
"@Bean\n"
"@SuppressWarnings(\"rawtypes\")\n"
"public class Test {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
clazz.add_parameter(Type::Generic("T"));
clazz.add_parameter(Type::Generic("U").add_supertype(Type::Class("Number")));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number> {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassAndSupertypes) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T");
clazz.add_parameter(type_t);
Type type_u = Type::Generic("U").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_u);
clazz.add_supertype(Type::Interface("Parameterizable").add_parameter(type_u));
clazz.add_supertype(Type::Interface("Runnable"));
clazz.add_supertype(Type::Class("SuperTest").add_parameter(type_t));
writer.BeginType(clazz, PUBLIC).EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T, U extends Number>"
" extends SuperTest<T> implements Parameterizable<U>, Runnable {\n}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, ParameterizedClassFields) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Variable field1 = Variable::Create("field1", Type::Class("String"));
Variable field2 = Variable::Create("field2", Type::Class("String"));
Variable field3 = Variable::Create("field3", type_t);
Javadoc field3_doc = Javadoc::Create("This variable is documented");
writer.BeginType(clazz, PUBLIC)
.WriteField(field1, STATIC | PUBLIC | FINAL)
.WriteField(field2, PRIVATE)
.WriteField(field3, PRIVATE, &field3_doc)
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" public static final String field1;\n"
" private String field2;\n"
" \n"
" private T field3;\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, SimpleInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type inner_class = Type::Class("InnerTest");
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public class InnerTest {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteType, StaticParameterizedInnerClass) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Type inner_class = Type::Class("InnerTest");
inner_class.add_parameter(type_t);
writer.BeginType(clazz, PUBLIC)
.BeginInnerType(inner_class, PUBLIC | STATIC)
.EndType()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static class InnerTest<T extends Number> {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, SimpleMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, AnnotatedAndDocumentedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Method method = Method::Create("doNothing", Type::Void());
Javadoc method_doc =
Javadoc::Create("Javadoc test")
.details("This method has a\nmultiline description.");
method.add_annotation(Annotation::Create("Override"));
method.add_annotation(Annotation::Create("SuppressWarnings")
.attributes("\"rawtypes\""));
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" @Override\n"
" @SuppressWarnings(\"rawtypes\")\n"
" public void doNothing() {\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, DocumentedMethodWithArguments) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Variable reverse = Variable::Create("reverse", Type::Boolean());
Method method = Method::Create("boolToInt", Type::Int());
method.add_argument(Variable::Create("b", Type::Boolean()));
method.add_argument(reverse);
Javadoc method_doc =
Javadoc::Create("Converts a boolean to an int")
.details("This method will convert\na boolean to an int")
.add_param_tag(reverse.name(), "if true, value is reversed")
.add_tag("return", "int value for this boolean");
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC, &method_doc)
.Append("if (b && !reverse)")
.BeginBlock()
.Append("return 1;")
.EndLine()
.EndBlock()
.Append("return 0;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test {\n"
" \n"
" \n"
" public int boolToInt(boolean b, boolean reverse) {\n"
" if (b && !reverse) {\n"
" return 1;\n"
" }\n"
" return 0;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, ParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
TEST(WriteMethod, StaticParameterizedMethod) {
SourceBufferWriter writer;
Type clazz = Type::Class("Test", "org.tensorflow");
Type type_t = Type::Generic("T").add_supertype(Type::Class("Number"));
clazz.add_parameter(type_t);
Method method = Method::Create("doNothing", type_t);
writer.BeginType(clazz, PUBLIC)
.BeginMethod(method, PUBLIC | STATIC)
.Append("return null;")
.EndLine()
.EndMethod()
.EndType();
const char* expected =
"package org.tensorflow;\n\n"
"public class Test<T extends Number> {\n"
" \n"
" public static <T extends Number> T doNothing() {\n"
" return null;\n"
" }\n"
"}\n";
ASSERT_STREQ(expected, writer.str().data());
}
}
}
} |
192 | #ifndef TENSORFLOW_LITE_TOCO_TOCO_CMDLINE_FLAGS_H_
#define TENSORFLOW_LITE_TOCO_TOCO_CMDLINE_FLAGS_H_
#include <string>
#include <vector>
#include "tensorflow/lite/toco/args.h"
#include "tensorflow/lite/toco/toco_flags.pb.h"
#include "tensorflow/lite/toco/types.pb.h"
namespace toco {
bool ParseTocoFlagsFromCommandLineFlags(int* argc, char* argv[],
std::string* msg,
ParsedTocoFlags* parsed_toco_flags_ptr);
void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags,
TocoFlags* toco_flags);
}
#endif
#include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/toco/toco_port.h"
namespace toco {
bool ParseTocoFlagsFromCommandLineFlags(
int* argc, char* argv[], std::string* msg,
ParsedTocoFlags* parsed_toco_flags_ptr) {
using tensorflow::Flag;
ParsedTocoFlags& parsed_flags = *parsed_toco_flags_ptr;
std::vector<tensorflow::Flag> flags = {
Flag("input_file", parsed_flags.input_file.bind(),
parsed_flags.input_file.default_value(),
"Input file (model of any supported format). For Protobuf "
"formats, both text and binary are supported regardless of file "
"extension."),
Flag("savedmodel_directory", parsed_flags.savedmodel_directory.bind(),
parsed_flags.savedmodel_directory.default_value(),
"Deprecated. Full path to the directory containing the SavedModel."),
Flag("output_file", parsed_flags.output_file.bind(),
parsed_flags.output_file.default_value(),
"Output file. "
"For Protobuf formats, the binary format will be used."),
Flag("input_format", parsed_flags.input_format.bind(),
parsed_flags.input_format.default_value(),
"Input file format. One of: TENSORFLOW_GRAPHDEF, TFLITE."),
Flag("output_format", parsed_flags.output_format.bind(),
parsed_flags.output_format.default_value(),
"Output file format. "
"One of TENSORFLOW_GRAPHDEF, TFLITE, GRAPHVIZ_DOT."),
Flag("savedmodel_tagset", parsed_flags.savedmodel_tagset.bind(),
parsed_flags.savedmodel_tagset.default_value(),
"Deprecated. Comma-separated set of tags identifying the "
"MetaGraphDef within the SavedModel to analyze. All tags in the tag "
"set must be specified."),
Flag("default_ranges_min", parsed_flags.default_ranges_min.bind(),
parsed_flags.default_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_ranges_max", parsed_flags.default_ranges_max.bind(),
parsed_flags.default_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of uint8 arrays."),
Flag("default_int16_ranges_min",
parsed_flags.default_int16_ranges_min.bind(),
parsed_flags.default_int16_ranges_min.default_value(),
"If defined, will be used as the default value for the min bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("default_int16_ranges_max",
parsed_flags.default_int16_ranges_max.bind(),
parsed_flags.default_int16_ranges_max.default_value(),
"If defined, will be used as the default value for the max bound "
"of min/max ranges used for quantization of int16 arrays."),
Flag("inference_type", parsed_flags.inference_type.bind(),
parsed_flags.inference_type.default_value(),
"Target data type of arrays in the output file (for input_arrays, "
"this may be overridden by inference_input_type). "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("inference_input_type", parsed_flags.inference_input_type.bind(),
parsed_flags.inference_input_type.default_value(),
"Target data type of input arrays. "
"If not specified, inference_type is used. "
"One of FLOAT, QUANTIZED_UINT8."),
Flag("input_type", parsed_flags.input_type.bind(),
parsed_flags.input_type.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type."),
Flag("input_types", parsed_flags.input_types.bind(),
parsed_flags.input_types.default_value(),
"Deprecated ambiguous flag that set both --input_data_types and "
"--inference_input_type. Was meant to be a "
"comma-separated list, but this was deprecated before "
"multiple-input-types was ever properly supported."),
Flag("drop_fake_quant", parsed_flags.drop_fake_quant.bind(),
parsed_flags.drop_fake_quant.default_value(),
"Ignore and discard FakeQuant nodes. For instance, to "
"generate plain float code without fake-quantization from a "
"quantized graph."),
Flag(
"reorder_across_fake_quant",
parsed_flags.reorder_across_fake_quant.bind(),
parsed_flags.reorder_across_fake_quant.default_value(),
"Normally, FakeQuant nodes must be strict boundaries for graph "
"transformations, in order to ensure that quantized inference has "
"the exact same arithmetic behavior as quantized training --- which "
"is the whole point of quantized training and of FakeQuant nodes in "
"the first place. "
"However, that entails subtle requirements on where exactly "
"FakeQuant nodes must be placed in the graph. Some quantized graphs "
"have FakeQuant nodes at unexpected locations, that prevent graph "
"transformations that are necessary in order to generate inference "
"code for these graphs. Such graphs should be fixed, but as a "
"temporary work-around, setting this reorder_across_fake_quant flag "
"allows TOCO to perform necessary graph transformaitons on them, "
"at the cost of no longer faithfully matching inference and training "
"arithmetic."),
Flag("allow_custom_ops", parsed_flags.allow_custom_ops.bind(),
parsed_flags.allow_custom_ops.default_value(),
"If true, allow TOCO to create TF Lite Custom operators for all the "
"unsupported TensorFlow ops."),
Flag("custom_opdefs", parsed_flags.custom_opdefs.bind(),
parsed_flags.custom_opdefs.default_value(),
"List of strings representing custom ops OpDefs that are included "
"in the GraphDef."),
Flag("allow_dynamic_tensors", parsed_flags.allow_dynamic_tensors.bind(),
parsed_flags.allow_dynamic_tensors.default_value(),
"Boolean flag indicating whether the converter should allow models "
"with dynamic Tensor shape. When set to False, the converter will "
"generate runtime memory offsets for activation Tensors (with 128 "
"bits alignment) and error out on models with undetermined Tensor "
"shape. (Default: True)"),
Flag(
"drop_control_dependency",
parsed_flags.drop_control_dependency.bind(),
parsed_flags.drop_control_dependency.default_value(),
"If true, ignore control dependency requirements in input TensorFlow "
"GraphDef. Otherwise an error will be raised upon control dependency "
"inputs."),
Flag("debug_disable_recurrent_cell_fusion",
parsed_flags.debug_disable_recurrent_cell_fusion.bind(),
parsed_flags.debug_disable_recurrent_cell_fusion.default_value(),
"If true, disable fusion of known identifiable cell subgraphs into "
"cells. This includes, for example, specific forms of LSTM cell."),
Flag("propagate_fake_quant_num_bits",
parsed_flags.propagate_fake_quant_num_bits.bind(),
parsed_flags.propagate_fake_quant_num_bits.default_value(),
"If true, use FakeQuant* operator num_bits attributes to adjust "
"array data_types."),
Flag("allow_nudging_weights_to_use_fast_gemm_kernel",
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel.bind(),
parsed_flags.allow_nudging_weights_to_use_fast_gemm_kernel
.default_value(),
"Some fast uint8 GEMM kernels require uint8 weights to avoid the "
"value 0. This flag allows nudging them to 1 to allow proceeding, "
"with moderate inaccuracy."),
Flag("dedupe_array_min_size_bytes",
parsed_flags.dedupe_array_min_size_bytes.bind(),
parsed_flags.dedupe_array_min_size_bytes.default_value(),
"Minimum size of constant arrays to deduplicate; arrays smaller "
"will not be deduplicated."),
Flag("split_tflite_lstm_inputs",
parsed_flags.split_tflite_lstm_inputs.bind(),
parsed_flags.split_tflite_lstm_inputs.default_value(),
"Split the LSTM inputs from 5 tensors to 18 tensors for TFLite. "
"Ignored if the output format is not TFLite."),
Flag("quantize_to_float16", parsed_flags.quantize_to_float16.bind(),
parsed_flags.quantize_to_float16.default_value(),
"Used in conjunction with post_training_quantize. Specifies that "
"the weights should be quantized to fp16 instead of the default "
"(int8)"),
Flag("quantize_weights", parsed_flags.quantize_weights.bind(),
parsed_flags.quantize_weights.default_value(),
"Deprecated. Please use --post_training_quantize instead."),
Flag("post_training_quantize", parsed_flags.post_training_quantize.bind(),
parsed_flags.post_training_quantize.default_value(),
"Boolean indicating whether to quantize the weights of the "
"converted float model. Model size will be reduced and there will "
"be latency improvements (at the cost of accuracy)."),
Flag("enable_select_tf_ops", parsed_flags.enable_select_tf_ops.bind(),
parsed_flags.enable_select_tf_ops.default_value(), ""),
Flag("force_select_tf_ops", parsed_flags.force_select_tf_ops.bind(),
parsed_flags.force_select_tf_ops.default_value(), ""),
Flag("unfold_batchmatmul", parsed_flags.unfold_batchmatmul.bind(),
parsed_flags.unfold_batchmatmul.default_value(), ""),
Flag("accumulation_type", parsed_flags.accumulation_type.bind(),
parsed_flags.accumulation_type.default_value(),
"Accumulation type to use with quantize_to_float16"),
Flag("allow_bfloat16", parsed_flags.allow_bfloat16.bind(),
parsed_flags.allow_bfloat16.default_value(), "")};
bool asked_for_help =
*argc == 2 && (!strcmp(argv[1], "--help") || !strcmp(argv[1], "-help"));
if (asked_for_help) {
*msg += tensorflow::Flags::Usage(argv[0], flags);
return false;
} else {
return tensorflow::Flags::Parse(argc, argv, flags);
}
}
namespace {
enum class FlagRequirement {
kNone,
kMustBeSpecified,
kMustNotBeSpecified,
kUseDefault,
};
template <typename T>
void EnforceFlagRequirement(const T& flag, const std::string& flag_name,
FlagRequirement requirement) {
if (requirement == FlagRequirement::kMustBeSpecified) {
QCHECK(flag.specified()) << "Missing required flag " << flag_name;
}
if (requirement == FlagRequirement::kMustNotBeSpecified) {
QCHECK(!flag.specified())
<< "Given other flags, this flag should not have been specified: "
<< flag_name;
}
}
template <typename T>
std::optional<T> GetFlagValue(const Arg<T>& flag, FlagRequirement requirement) {
if (flag.specified()) return flag.value();
if (requirement == FlagRequirement::kUseDefault) return flag.default_value();
return std::optional<T>();
}
}
void ReadTocoFlagsFromCommandLineFlags(const ParsedTocoFlags& parsed_toco_flags,
TocoFlags* toco_flags) {
namespace port = toco::port;
port::CheckInitGoogleIsDone("InitGoogle is not done yet");
#define READ_TOCO_FLAG(name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
toco_flags->set_##name(flag_value.value()); \
} \
} while (false)
#define PARSE_TOCO_FLAG(Type, name, requirement) \
do { \
EnforceFlagRequirement(parsed_toco_flags.name, #name, requirement); \
auto flag_value = GetFlagValue(parsed_toco_flags.name, requirement); \
if (flag_value.has_value()) { \
Type x; \
QCHECK(Type##_Parse(flag_value.value(), &x)) \
<< "Unrecognized " << #Type << " value " \
<< parsed_toco_flags.name.value(); \
toco_flags->set_##name(x); \
} \
} while (false)
PARSE_TOCO_FLAG(FileFormat, input_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(FileFormat, output_format, FlagRequirement::kUseDefault);
PARSE_TOCO_FLAG(IODataType, inference_type, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, inference_input_type, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_min, FlagRequirement::kNone);
READ_TOCO_FLAG(default_int16_ranges_max, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(reorder_across_fake_quant, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_custom_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(drop_control_dependency, FlagRequirement::kNone);
READ_TOCO_FLAG(debug_disable_recurrent_cell_fusion, FlagRequirement::kNone);
READ_TOCO_FLAG(propagate_fake_quant_num_bits, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_nudging_weights_to_use_fast_gemm_kernel,
FlagRequirement::kNone);
READ_TOCO_FLAG(dedupe_array_min_size_bytes, FlagRequirement::kNone);
READ_TOCO_FLAG(split_tflite_lstm_inputs, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_weights, FlagRequirement::kNone);
READ_TOCO_FLAG(quantize_to_float16, FlagRequirement::kNone);
READ_TOCO_FLAG(post_training_quantize, FlagRequirement::kNone);
READ_TOCO_FLAG(enable_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(force_select_tf_ops, FlagRequirement::kNone);
READ_TOCO_FLAG(unfold_batchmatmul, FlagRequirement::kNone);
PARSE_TOCO_FLAG(IODataType, accumulation_type, FlagRequirement::kNone);
READ_TOCO_FLAG(allow_bfloat16, FlagRequirement::kNone);
if (parsed_toco_flags.force_select_tf_ops.value() &&
!parsed_toco_flags.enable_select_tf_ops.value()) {
LOG(WARNING) << "--force_select_tf_ops should always be used with "
"--enable_select_tf_ops.";
}
if (parsed_toco_flags.input_type.specified()) {
LOG(WARNING)
<< "--input_type is deprecated. It was an ambiguous flag that set both "
"--input_data_types and --inference_input_type. If you are trying "
"to complement the input file with information about the type of "
"input arrays, use --input_data_type. If you are trying to control "
"the quantization/dequantization of real-numbers input arrays in "
"the output file, use --inference_input_type.";
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(parsed_toco_flags.input_type.value(),
&input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.input_types.specified()) {
LOG(WARNING)
<< "--input_types is deprecated. It was an ambiguous flag that set "
"both --input_data_types and --inference_input_type. If you are "
"trying to complement the input file with information about the "
"type of input arrays, use --input_data_type. If you are trying to "
"control the quantization/dequantization of real-numbers input "
"arrays in the output file, use --inference_input_type.";
std::vector<std::string> input_types =
absl::StrSplit(parsed_toco_flags.input_types.value(), ',');
QCHECK(!input_types.empty());
for (size_t i = 1; i < input_types.size(); i++) {
QCHECK_EQ(input_types[i], input_types[0]);
}
toco::IODataType input_type;
QCHECK(toco::IODataType_Parse(input_types[0], &input_type));
toco_flags->set_inference_input_type(input_type);
}
if (parsed_toco_flags.quantize_weights.value()) {
LOG(WARNING)
<< "--quantize_weights is deprecated. Falling back to "
"--post_training_quantize. Please switch --post_training_quantize.";
toco_flags->set_post_training_quantize(
parsed_toco_flags.quantize_weights.value());
}
if (parsed_toco_flags.quantize_weights.value()) {
if (toco_flags->inference_type() == IODataType::QUANTIZED_UINT8) {
LOG(WARNING)
<< "--post_training_quantize quantizes a graph of inference_type "
"FLOAT. Overriding inference type QUANTIZED_UINT8 to FLOAT.";
toco_flags->set_inference_type(IODataType::FLOAT);
}
}
#undef READ_TOCO_FLAG
#undef PARSE_TOCO_FLAG
}
} | #include "tensorflow/lite/toco/toco_cmdline_flags.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/testing/util.h"
namespace toco {
namespace {
TEST(TocoCmdlineFlagsTest, DefaultValue) {
int argc = 1;
const char* args[] = {"toco", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), true);
}
TEST(TocoCmdlineFlagsTest, ParseFlags) {
int argc = 2;
const char* args[] = {"toco", "--allow_dynamic_tensors=false", nullptr};
std::string message;
ParsedTocoFlags result_flags;
EXPECT_TRUE(ParseTocoFlagsFromCommandLineFlags(
&argc, const_cast<char**>(args), &message, &result_flags));
EXPECT_EQ(result_flags.allow_dynamic_tensors.value(), false);
}
}
}
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
::toco::port::InitGoogleWasDoneElsewhere();
return RUN_ALL_TESTS();
} |
193 | #ifndef TENSORFLOW_CORE_DATA_UNBOUNDED_THREAD_POOL_H_
#define TENSORFLOW_CORE_DATA_UNBOUNDED_THREAD_POOL_H_
#include <deque>
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/thread_factory.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/threadpool_interface.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
namespace data {
class UnboundedThreadPool : public thread::ThreadPoolInterface {
public:
UnboundedThreadPool(Env* env, const string& thread_name)
: unbounded_work_queue_(env, thread_name) {}
UnboundedThreadPool(Env* env, const string& thread_name,
const ThreadOptions& thread_options)
: unbounded_work_queue_(env, thread_name, thread_options) {}
~UnboundedThreadPool() override = default;
std::shared_ptr<ThreadFactory> get_thread_factory();
void Schedule(std::function<void()> fn) override;
int NumThreads() const override;
int CurrentThreadId() const override;
private:
class LogicalThreadFactory;
class LogicalThreadWrapper;
void ScheduleOnWorkQueue(std::function<void()> fn,
std::shared_ptr<Notification> done);
UnboundedWorkQueue unbounded_work_queue_;
};
}
}
#endif
#include "tensorflow/core/data/unbounded_thread_pool.h"
#include <functional>
#include <memory>
#include <utility>
#include "absl/memory/memory.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
namespace data {
class UnboundedThreadPool::LogicalThreadWrapper : public Thread {
public:
explicit LogicalThreadWrapper(std::shared_ptr<Notification> done)
: done_(std::move(done)) {}
~LogicalThreadWrapper() override {
done_->WaitForNotification();
}
private:
std::shared_ptr<Notification> done_;
};
class UnboundedThreadPool::LogicalThreadFactory : public ThreadFactory {
public:
explicit LogicalThreadFactory(UnboundedThreadPool* pool) : pool_(pool) {}
std::unique_ptr<Thread> StartThread(const string& name,
std::function<void()> fn) override {
auto done = std::make_shared<Notification>();
pool_->ScheduleOnWorkQueue(std::move(fn), done);
return std::make_unique<LogicalThreadWrapper>(std::move(done));
}
private:
UnboundedThreadPool* const pool_;
};
std::shared_ptr<ThreadFactory> UnboundedThreadPool::get_thread_factory() {
return std::make_shared<LogicalThreadFactory>(this);
}
void UnboundedThreadPool::Schedule(std::function<void()> fn) {
auto tagged_fn = [fn = std::move(fn)]() {
tensorflow::ResourceTagger tag(kTFDataResourceTag, "ThreadPool");
fn();
};
ScheduleOnWorkQueue(std::move(tagged_fn), nullptr);
}
int UnboundedThreadPool::NumThreads() const { return -1; }
int UnboundedThreadPool::CurrentThreadId() const { return -1; }
namespace {
void WorkQueueFunc(const std::function<void()>& fn,
std::shared_ptr<Notification> done) {
fn();
if (done) {
done->Notify();
}
}
}
void UnboundedThreadPool::ScheduleOnWorkQueue(
std::function<void()> fn, std::shared_ptr<Notification> done) {
unbounded_work_queue_.Schedule(
std::bind(&WorkQueueFunc, std::move(fn), std::move(done)));
}
}
} | #include "tensorflow/core/data/unbounded_thread_pool.h"
#include <atomic>
#include <memory>
#include <vector>
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(UnboundedThreadPool, ConcurrentThreadCreation) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
const int kNumThreadsToCreate = 10;
std::atomic<int> i(0);
for (int j = 0; j < kNumThreadsToCreate; ++j) {
threads.push_back(thread_factory->StartThread("", [=, &i,
&thread_factory]() {
std::vector<std::unique_ptr<Thread>> nested_threads;
for (int k = 0; k < kNumThreadsToCreate; ++k) {
nested_threads.push_back(
thread_factory->StartThread("", [&i]() { ++i; }));
}
nested_threads.clear();
}));
}
threads.clear();
EXPECT_EQ(i, kNumThreadsToCreate * kNumThreadsToCreate);
}
TEST(UnboundedThreadPool, MultipleBlockingThreads) {
UnboundedThreadPool pool(Env::Default(), "test");
auto thread_factory = pool.get_thread_factory();
std::vector<std::unique_ptr<Thread>> threads;
std::vector<int> round_sizes = {5, 10, 15, 20};
for (const int round_size : round_sizes) {
Notification n;
BlockingCounter bc(round_size);
for (int j = 0; j < round_size; ++j) {
threads.push_back(thread_factory->StartThread("", [&bc, &n]() {
bc.DecrementCount();
n.WaitForNotification();
}));
}
bc.Wait();
n.Notify();
threads.clear();
}
}
}
}
} |
194 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_PRELU_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_PRELU_H_
#include <memory>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
namespace tflite {
namespace gpu {
namespace gl {
std::unique_ptr<NodeShader> NewPReLUNodeShader();
}
}
}
#endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/prelu.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class PReLULinearAlpha : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr);
auto alpha = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.alpha);
if (!alpha) {
return absl::InvalidArgumentError("Alpha is missing");
}
if (alpha->shape.v != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Alpha shape does not match the number of channels.");
}
*generated_code = GeneratedCode{
{},
{{"alpha", MakeReadonlyObject(alpha->data)}},
{},
uint3(static_cast<int>(ctx.output_shapes[0][2]),
static_cast<int>(ctx.output_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)),
uint3(),
"value_0 = max(value_0, 0.0) + $alpha[gid.z]$ * min(value_0, "
"0.0);",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class PReLUFull : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr);
auto alpha = std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha);
if (!alpha) {
return absl::InvalidArgumentError("Alpha is missing");
}
if (alpha->shape.h != ctx.output_shapes[0][1] ||
alpha->shape.w != ctx.output_shapes[0][2] ||
alpha->shape.c != ctx.output_shapes[0][3]) {
return absl::InvalidArgumentError(
"Alpha shape does not match input shape.");
}
ObjectSize obj_size =
uint3(static_cast<int>(ctx.output_shapes[0][2]),
static_cast<int>(ctx.output_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4));
*generated_code = GeneratedCode{
{},
{{"alpha", MakeReadonlyObject(obj_size, ConvertToPHWC4(*alpha))}},
{},
uint3(static_cast<int>(ctx.output_shapes[0][2]),
static_cast<int>(ctx.output_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)),
uint3(),
"value_0 = max(value_0, 0.0) + $alpha[gid.x, gid.y, gid.z]$ "
"* min(value_0, 0.0);",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
class PReLU : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr);
auto* alpha = std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha);
return alpha ? full_.GenerateCode(ctx, generated_code)
: linear_.GenerateCode(ctx, generated_code);
}
private:
PReLULinearAlpha linear_;
PReLUFull full_;
};
}
std::unique_ptr<NodeShader> NewPReLUNodeShader() {
return std::make_unique<PReLU>();
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/kernels/prelu.h"
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace gl {
namespace {
TEST(PReluTest, LinearAlpha) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
PReLUAttributes attr;
Tensor<Linear, DataType::FLOAT32> alpha;
alpha.shape.v = 1;
alpha.id = 1;
alpha.data = {2};
attr.alpha = std::move(alpha);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 1);
SingleOpModel model({ToString(OperationType::PRELU), attr}, {input},
{output});
ASSERT_TRUE(model.PopulateTensor(0, {-1.0, -2.0, 1.0, 2.0}));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-2, -4, 1, 2}));
}
TEST(PReluTest, 2DAlpha) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 1);
OperationType op_type = OperationType::PRELU;
PReLUAttributes attr;
Tensor<HWC, DataType::FLOAT32> alpha;
alpha.shape = HWC(2, 2, 1);
alpha.id = 1;
alpha.data = {1, 2, 2, 2};
attr.alpha = std::move(alpha);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 1);
SingleOpModel model({ToString(op_type), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {0.0, -1.0, 2.0, -3.0}));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0, -2, 2, -6}));
}
TEST(PReluTest, 2DAlphaWidthNotEqualHeight) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 1, 1);
OperationType op_type = OperationType::PRELU;
PReLUAttributes attr;
Tensor<HWC, DataType::FLOAT32> alpha;
alpha.shape = HWC(2, 1, 1);
alpha.id = 1;
alpha.data = {1, 1};
attr.alpha = std::move(alpha);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 1, 1);
SingleOpModel model({ToString(op_type), attr}, {input}, {output});
ASSERT_TRUE(model.PopulateTensor(0, {-1.0, -1.0}));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-1, -1}));
}
TEST(PReluTest, 3DAlpha) {
TensorRef<BHWC> input;
input.type = DataType::FLOAT32;
input.ref = 0;
input.shape = BHWC(1, 2, 2, 2);
OperationType op_type = OperationType::PRELU;
PReLUAttributes attr;
Tensor<HWC, DataType::FLOAT32> alpha;
alpha.shape = HWC(2, 2, 2);
alpha.id = 1;
alpha.data = {1, 1, 2, 2, 2, 2, 2, 2};
attr.alpha = std::move(alpha);
TensorRef<BHWC> output;
output.type = DataType::FLOAT32;
output.ref = 2;
output.shape = BHWC(1, 2, 2, 2);
SingleOpModel model({ToString(op_type), attr}, {input}, {output});
ASSERT_TRUE(
model.PopulateTensor(0, {0.0, 0.0, -1.0, -1.0, 2.0, 2.0, -3.0, -3.0}));
ASSERT_OK(model.Invoke(*NewPReLUNodeShader()));
EXPECT_THAT(model.GetOutput(0),
Pointwise(FloatNear(1e-6), {0, 0, -2, -2, 2, 2, -6, -6}));
}
}
}
}
} |
195 | #ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_HYBRID_SLOW_START_H_
#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_HYBRID_SLOW_START_H_
#include <cstdint>
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_export.h"
namespace quic {
class QUICHE_EXPORT HybridSlowStart {
public:
HybridSlowStart();
HybridSlowStart(const HybridSlowStart&) = delete;
HybridSlowStart& operator=(const HybridSlowStart&) = delete;
void OnPacketAcked(QuicPacketNumber acked_packet_number);
void OnPacketSent(QuicPacketNumber packet_number);
bool ShouldExitSlowStart(QuicTime::Delta rtt, QuicTime::Delta min_rtt,
QuicPacketCount congestion_window);
void Restart();
bool IsEndOfRound(QuicPacketNumber ack) const;
void StartReceiveRound(QuicPacketNumber last_sent);
bool started() const { return started_; }
private:
enum HystartState {
NOT_FOUND,
DELAY,
};
bool started_;
HystartState hystart_found_;
QuicPacketNumber last_sent_packet_number_;
QuicPacketNumber end_packet_number_;
uint32_t rtt_sample_count_;
QuicTime::Delta current_min_rtt_;
};
}
#endif
#include "quiche/quic/core/congestion_control/hybrid_slow_start.h"
#include <algorithm>
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
const int64_t kHybridStartLowWindow = 16;
const uint32_t kHybridStartMinSamples = 8;
const int kHybridStartDelayFactorExp = 3;
const int64_t kHybridStartDelayMinThresholdUs = 4000;
const int64_t kHybridStartDelayMaxThresholdUs = 16000;
HybridSlowStart::HybridSlowStart()
: started_(false),
hystart_found_(NOT_FOUND),
rtt_sample_count_(0),
current_min_rtt_(QuicTime::Delta::Zero()) {}
void HybridSlowStart::OnPacketAcked(QuicPacketNumber acked_packet_number) {
if (IsEndOfRound(acked_packet_number)) {
started_ = false;
}
}
void HybridSlowStart::OnPacketSent(QuicPacketNumber packet_number) {
last_sent_packet_number_ = packet_number;
}
void HybridSlowStart::Restart() {
started_ = false;
hystart_found_ = NOT_FOUND;
}
void HybridSlowStart::StartReceiveRound(QuicPacketNumber last_sent) {
QUIC_DVLOG(1) << "Reset hybrid slow start @" << last_sent;
end_packet_number_ = last_sent;
current_min_rtt_ = QuicTime::Delta::Zero();
rtt_sample_count_ = 0;
started_ = true;
}
bool HybridSlowStart::IsEndOfRound(QuicPacketNumber ack) const {
return !end_packet_number_.IsInitialized() || end_packet_number_ <= ack;
}
bool HybridSlowStart::ShouldExitSlowStart(QuicTime::Delta latest_rtt,
QuicTime::Delta min_rtt,
QuicPacketCount congestion_window) {
if (!started_) {
StartReceiveRound(last_sent_packet_number_);
}
if (hystart_found_ != NOT_FOUND) {
return true;
}
rtt_sample_count_++;
if (rtt_sample_count_ <= kHybridStartMinSamples) {
if (current_min_rtt_.IsZero() || current_min_rtt_ > latest_rtt) {
current_min_rtt_ = latest_rtt;
}
}
if (rtt_sample_count_ == kHybridStartMinSamples) {
int64_t min_rtt_increase_threshold_us =
min_rtt.ToMicroseconds() >> kHybridStartDelayFactorExp;
min_rtt_increase_threshold_us = std::min(min_rtt_increase_threshold_us,
kHybridStartDelayMaxThresholdUs);
QuicTime::Delta min_rtt_increase_threshold =
QuicTime::Delta::FromMicroseconds(std::max(
min_rtt_increase_threshold_us, kHybridStartDelayMinThresholdUs));
if (current_min_rtt_ > min_rtt + min_rtt_increase_threshold) {
hystart_found_ = DELAY;
}
}
return congestion_window >= kHybridStartLowWindow &&
hystart_found_ != NOT_FOUND;
}
} | #include "quiche/quic/core/congestion_control/hybrid_slow_start.h"
#include <memory>
#include <utility>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
class HybridSlowStartTest : public QuicTest {
protected:
HybridSlowStartTest()
: one_ms_(QuicTime::Delta::FromMilliseconds(1)),
rtt_(QuicTime::Delta::FromMilliseconds(60)) {}
void SetUp() override { slow_start_ = std::make_unique<HybridSlowStart>(); }
const QuicTime::Delta one_ms_;
const QuicTime::Delta rtt_;
std::unique_ptr<HybridSlowStart> slow_start_;
};
TEST_F(HybridSlowStartTest, Simple) {
QuicPacketNumber packet_number(1);
QuicPacketNumber end_packet_number(3);
slow_start_->StartReceiveRound(end_packet_number);
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number));
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
end_packet_number = QuicPacketNumber(20);
slow_start_->StartReceiveRound(end_packet_number);
while (packet_number < end_packet_number) {
EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
}
EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
}
TEST_F(HybridSlowStartTest, Delay) {
const int kHybridStartMinSamples = 8;
QuicPacketNumber end_packet_number(1);
slow_start_->StartReceiveRound(end_packet_number++);
for (int n = 0; n < kHybridStartMinSamples; ++n) {
EXPECT_FALSE(slow_start_->ShouldExitSlowStart(
rtt_ + QuicTime::Delta::FromMilliseconds(n), rtt_, 100));
}
slow_start_->StartReceiveRound(end_packet_number++);
for (int n = 1; n < kHybridStartMinSamples; ++n) {
EXPECT_FALSE(slow_start_->ShouldExitSlowStart(
rtt_ + QuicTime::Delta::FromMilliseconds(n + 10), rtt_, 100));
}
EXPECT_TRUE(slow_start_->ShouldExitSlowStart(
rtt_ + QuicTime::Delta::FromMilliseconds(10), rtt_, 100));
}
}
} |
196 | #ifndef XLA_SERVICE_GPU_DOT_DIMENSION_SORTER_H_
#define XLA_SERVICE_GPU_DOT_DIMENSION_SORTER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class DotDimensionSorter : public HloModulePass {
public:
absl::string_view name() const override { return "dot_dimension_sorter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/dot_dimension_sorter.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
absl::Status SortDotDimensions(HloDotInstruction* dot) {
const DotDimensionNumbers& dims = dot->dot_dimension_numbers();
DotDimensionNumbers new_dims(dims);
new_dims.clear_lhs_contracting_dimensions();
new_dims.clear_rhs_contracting_dimensions();
const bool sort_by_lhs =
DistinctNumbersAreConsecutiveIfSorted(dims.lhs_contracting_dimensions());
const absl::Span<const int64_t>& sort_key =
sort_by_lhs ? dims.lhs_contracting_dimensions()
: dims.rhs_contracting_dimensions();
std::vector<int64_t> permutation;
for (const int64_t a : sort_key) {
permutation.push_back(a - *absl::c_min_element(sort_key));
}
const std::vector<int64_t> sorted_lhs =
Permute(dims.lhs_contracting_dimensions(), permutation);
*new_dims.mutable_lhs_contracting_dimensions() = {sorted_lhs.begin(),
sorted_lhs.end()};
const std::vector<int64_t> sorted_rhs =
Permute(dims.rhs_contracting_dimensions(), permutation);
*new_dims.mutable_rhs_contracting_dimensions() = {sorted_rhs.begin(),
sorted_rhs.end()};
std::unique_ptr<HloInstruction> new_dot = HloInstruction::CreateDot(
dot->shape(), dot->mutable_operand(0), dot->mutable_operand(1), new_dims,
dot->precision_config(), {dot->sparsity().begin(), dot->sparsity().end()},
absl::MakeSpan(dot->operands()).subspan(HloDotInstruction::kOperands));
dot->SetupDerivedInstruction(new_dot.get());
VLOG(3) << "Sorted dot() dimensions:\n"
<< "\t before: " << dot->ToString() << "\n"
<< "\t after: " << new_dot->ToString();
return dot->parent()->ReplaceWithNewInstruction(dot, std::move(new_dot));
}
}
absl::StatusOr<bool> DotDimensionSorter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> dots_to_process;
for (const HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() != HloOpcode::kDot) {
continue;
}
if ((instr->operand(0)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(0)->shape().layout())) ||
(instr->operand(1)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(1)->shape().layout()))) {
continue;
}
const DotDimensionNumbers& dims = instr->dot_dimension_numbers();
if (dims.lhs_contracting_dimensions_size() == 0) {
continue;
}
const bool cons_lhs = DistinctNumbersAreConsecutiveIfSorted(
dims.lhs_contracting_dimensions());
const bool cons_rhs = DistinctNumbersAreConsecutiveIfSorted(
dims.rhs_contracting_dimensions());
const bool sorted_lhs =
absl::c_is_sorted(dims.lhs_contracting_dimensions());
const bool sorted_rhs =
absl::c_is_sorted(dims.rhs_contracting_dimensions());
if ((cons_lhs && !sorted_lhs && !cons_rhs) ||
(cons_rhs && !sorted_rhs && !cons_lhs) ||
(cons_lhs && !sorted_lhs && cons_rhs && !sorted_rhs)) {
dots_to_process.push_back(instr);
}
}
}
if (dots_to_process.empty()) {
return false;
}
for (HloInstruction* dot : dots_to_process) {
TF_RETURN_IF_ERROR(SortDotDimensions(Cast<HloDotInstruction>(dot)));
}
return true;
}
}
} | #include "xla/service/gpu/dot_dimension_sorter.h"
#include <memory>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class WithoutDotDimensionSorterTest : public GpuCodegenTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("dot_dimension_sorter");
return debug_options;
}
};
TEST_F(WithoutDotDimensionSorterTest, UnsortedDimsCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, SortedDimsDoNotCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, DimOrderCanBeChanged) {
const char* hlo_text_ref = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
const char* hlo_text_modified = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_ref, hlo_text_modified,
ErrorSpec{1e-5, 1e-3},
true));
}
using DotDimensionSorterTest = GpuCodegenTest;
TEST_F(DotDimensionSorterTest, SortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
const auto& dims =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims.lhs_contracting_dimensions(0), 3);
EXPECT_EQ(dims.lhs_contracting_dimensions(1), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(1), 1);
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
const auto& dims2 =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims2.lhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims2.lhs_contracting_dimensions(1), 3);
EXPECT_EQ(dims2.rhs_contracting_dimensions(0), 1);
EXPECT_EQ(dims2.rhs_contracting_dimensions(1), 2);
}
TEST_F(DotDimensionSorterTest, NothingToReorder) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionSorterTest, SparseDotSortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,16] parameter(0)
p1 = f16[122,96,32] parameter(1)
meta = u16[1,144,96,2] parameter(2)
ROOT _ = f16[1,144,122] dot(p0, p1, meta), sparsity=L.3@2:4,
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
HloDotInstruction* dot = DynCast<HloDotInstruction>(
module->entry_computation()->root_instruction());
EXPECT_TRUE(dot != nullptr && dot->sparse_operands() == 1);
}
}
}
} |
197 | #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SOFTMAX1X1_H_
#define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SOFTMAX1X1_H_
#include <string>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
namespace tflite {
namespace gpu {
class Softmax1x1 : public GPUOperation {
public:
Softmax1x1() = default;
Softmax1x1(const OperationDef& definition, const GpuInfo& gpu_info,
const BHWC& shape);
void GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info,
std::vector<int3>* work_groups) const override {
work_groups->push_back(work_group_size_);
}
absl::Status BindArguments(ArgumentsBinder* args) override;
int3 GetGridSize() const override;
Softmax1x1(Softmax1x1&& kernel);
Softmax1x1& operator=(Softmax1x1&& kernel);
Softmax1x1(const Softmax1x1&) = delete;
Softmax1x1& operator=(const Softmax1x1&) = delete;
friend Softmax1x1 CreateSoftmax1x1();
private:
std::string GetSoftmaxKernelCode(const OperationDef& op_def);
};
Softmax1x1 CreateSoftmax1x1(const OperationDef& definition,
const GpuInfo& gpu_info, const BHWC& shape);
}
}
#endif
#include "tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
namespace {
std::string MakeAccOp(OperationType op_type, const std::string& a,
const std::string& b) {
if (op_type == OperationType::ADD) {
return a + " = " + a + " + " + b;
} else if (op_type == OperationType::MAXIMUM) {
return a + " = max(" + a + ", " + b + ")";
} else {
return a;
}
}
std::string GetReduceCode(const std::string& value, OperationType op_type,
int group_reduction_size) {
std::vector<int> stages;
if (group_reduction_size == 1024) {
stages = {8, 8, 4, 4};
} else if (group_reduction_size == 512) {
stages = {8, 8, 8};
} else if (group_reduction_size == 256) {
stages = {8, 8, 4};
} else if (group_reduction_size == 128) {
stages = {8, 4, 4};
} else if (group_reduction_size == 64) {
stages = {8, 8};
} else if (group_reduction_size == 32) {
stages = {8, 4};
} else if (group_reduction_size == 16) {
stages = {4, 4};
} else if (group_reduction_size <= 8) {
stages = {group_reduction_size};
}
std::string c;
c += " LOCAL_MEM_BARRIER;\n";
c += " loc_mem[tid] = " + value + ";\n";
int stride = 1;
for (int i = 0; i < stages.size(); ++i) {
const bool last_stage = i == stages.size() - 1;
const std::string condition =
last_stage ? "tid == 0"
: "tid % " + std::to_string(stride * stages[i]) + " == 0";
const std::string location = last_stage ? "loc_mem[0]" : "loc_mem[tid]";
c += " LOCAL_MEM_BARRIER;\n";
c += " if (" + condition + ") {\n";
for (int j = 1; j < stages[i]; ++j) {
c += " " +
MakeAccOp(op_type, value,
"loc_mem[tid + " + std::to_string(stride * j) + "]") +
";\n";
}
c += " " + location + " = " + value + ";\n";
c += " }\n";
stride *= stages[i];
}
c += " LOCAL_MEM_BARRIER;\n";
c += " " + value + " = loc_mem[0];\n";
return c;
}
}
Softmax1x1::Softmax1x1(const OperationDef& definition, const GpuInfo& gpu_info,
const BHWC& shape)
: GPUOperation(definition) {
if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno7xx()) {
work_group_size_ = int3(512, 1, 1);
} else if (gpu_info.IsMali()) {
work_group_size_ = int3(1024, 1, 1);
} else {
work_group_size_ = int3(128, 1, 1);
}
const int slices = DivideRoundUp(shape.c, 4);
while (work_group_size_.x >= slices * 2) {
work_group_size_.x /= 2;
}
while (work_group_size_.x >= gpu_info.GetMaxWorkGroupSizeForX()) {
work_group_size_.x /= 2;
}
code_ = GetSoftmaxKernelCode(definition_);
}
Softmax1x1::Softmax1x1(Softmax1x1&& kernel) : GPUOperation(std::move(kernel)) {}
Softmax1x1& Softmax1x1::operator=(Softmax1x1&& kernel) {
if (this != &kernel) {
GPUOperation::operator=(std::move(kernel));
}
return *this;
}
std::string Softmax1x1::GetSoftmaxKernelCode(const OperationDef& op_def) {
AddSrcTensor("src_tensor", op_def.src_tensors[0]);
AddDstTensor("dst_tensor", op_def.dst_tensors[0]);
args_.AddFloat("mask_x");
args_.AddFloat("mask_y");
args_.AddFloat("mask_z");
args_.AddFloat("mask_w");
std::string c;
c += "MAIN_FUNCTION($0) {\n";
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = GROUP_ID_1;\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " if (B >= args.dst_tensor.Batch()) return;\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = GROUP_ID_1;\n";
}
c += " int Y = GROUP_ID_2;\n";
c += " if (X >= args.dst_tensor.Width()) return;\n";
c += " if (Y >= args.dst_tensor.Height()) return;\n";
c += " float4 mask = INIT_FLOAT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c +=
" float4 maxx4 = INIT_FLOAT4(args.src_tensor.Read<float>(X, Y, 0).x);\n";
c += " int tid = LOCAL_ID_0;\n";
const int group_reduction_size = work_group_size_.x;
c += " for (int s = tid; s < args.src_tensor.Slices(); s += " +
std::to_string(group_reduction_size) + ") {\n";
c += " float4 mask_a = s == args.src_tensor.Slices() - 1 ? mask : "
"INIT_FLOAT4(1.0f);\n";
c += " float4 mask_b = INIT_FLOAT4(1.0f) - mask_a;\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, s);\n";
c += " src = src * mask_a + mask_b * src.x;\n";
c += " maxx4 = max(maxx4, src);\n";
c += " }\n";
c += " float maximum = max(maxx4.x, maxx4.y);\n";
c += " maximum = max(maximum, maxx4.z);\n";
c += " maximum = max(maximum, maxx4.w);\n";
c += " __local float loc_mem[" + std::to_string(group_reduction_size) +
"];\n";
c += GetReduceCode("maximum", OperationType::MAXIMUM, group_reduction_size);
c += " float sum = 0.0f;\n";
c += " for (int s = tid; s < args.src_tensor.Slices(); s += " +
std::to_string(group_reduction_size) + ") {\n";
c += " float4 mask_temp = s == args.src_tensor.Slices() - 1 ? mask : "
"INIT_FLOAT4(1.0f);\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, s) - "
"INIT_FLOAT4(maximum);\n";
c += " sum += dot(mask_temp, exp(src));\n";
c += " }\n";
c += GetReduceCode("sum", OperationType::ADD, group_reduction_size);
c += " sum = 1.0f / sum;\n";
c += " int dst_s = GLOBAL_ID_0;\n";
c += " if (dst_s < args.dst_tensor.Slices()) {\n";
c += " float4 src = args.src_tensor.Read<float>(X, Y, dst_s) - "
"INIT_FLOAT4(maximum);\n";
c += " FLT4 res = TO_FLT4(exp(src) * sum);\n";
c += " args.dst_tensor.Write(res, X, Y, dst_s);\n";
c += " }\n";
c += "}\n";
return c;
}
absl::Status Softmax1x1::BindArguments(ArgumentsBinder* args) {
float4 mask = GetMaskForLastPlane(src_[0]->Channels());
RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x));
RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y));
RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z));
RETURN_IF_ERROR(args->SetFloat("mask_w", mask.w));
return absl::OkStatus();
}
int3 Softmax1x1::GetGridSize() const {
return int3(dst_[0]->Slices(), dst_[0]->Width() * dst_[0]->Batch(),
dst_[0]->Height());
}
Softmax1x1 CreateSoftmax1x1(const OperationDef& definition,
const GpuInfo& gpu_info, const BHWC& shape) {
return Softmax1x1(definition, gpu_info, shape);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/softmax_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Softmax1x1) {
auto status = Softmax1x1Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, Softmax1x1BigNumber) {
auto status = Softmax1x1BigNumberTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} |
198 | #ifndef TENSORFLOW_CORE_TFRT_COMMON_PJRT_STATE_H_
#define TENSORFLOW_CORE_TFRT_COMMON_PJRT_STATE_H_
#include <map>
#include <memory>
#include <set>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/local_device_state.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/stream_executor/integrations/tf_allocator_adapter.h"
#include "xla/tsl/framework/allocator.h"
#include "tensorflow/core/framework/resource_base.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
const char kPjRtStateResourceName[] = "pjrt_state";
using PjRtClientsMap = std::map<DeviceType, std::unique_ptr<xla::PjRtClient>>;
struct PjRtGpuClientCreationInfo {
std::set<int> allowed_devices;
std::unique_ptr<se::MultiDeviceAdapter> allocator;
std::unique_ptr<tsl::Allocator> host_memory_allocator;
std::map<int, std::unique_ptr<xla::LocalDeviceState>> local_device_states;
xla::LocalClient* local_client;
};
class PjRtState : public ResourceBase {
public:
static PjRtState* Create();
absl::StatusOr<xla::PjRtClient*> GetPjRtClient(const DeviceType& device_type);
absl::StatusOr<xla::PjRtClient*> GetOrCreatePjRtClient(
const DeviceType& device_type);
Status SetPjRtClient(const DeviceType& device_type,
std::unique_ptr<xla::PjRtClient> client);
Status MovePjRtClientToUnused(const DeviceType& device_type);
string DebugString() const override;
absl::Status SetPjRtGpuClientCreationInfo(
std::unique_ptr<PjRtGpuClientCreationInfo> info);
PjRtGpuClientCreationInfo* GetPjRtGpuClientCreationInfo();
private:
explicit PjRtState() {}
absl::Mutex mu_;
PjRtClientsMap clients_ ABSL_GUARDED_BY(mu_);
std::vector<std::unique_ptr<xla::PjRtClient>> unused_ ABSL_GUARDED_BY(mu_);
std::unique_ptr<PjRtGpuClientCreationInfo> pjrt_gpu_client_creation_info_
ABSL_GUARDED_BY(mu_);
};
}
#endif
#include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/tf_pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
PjRtState* PjRtState::Create() { return new PjRtState(); }
absl::StatusOr<xla::PjRtClient*> PjRtState::GetPjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
absl::StatusOr<xla::PjRtClient*> PjRtState::GetOrCreatePjRtClient(
const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
return it->second.get();
}
std::unique_ptr<xla::PjRtClient> pjrt_client;
xla::PjrtClientFactoryOptions options = xla::PjrtClientFactoryOptions();
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtClient> client,
xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
device_type, options));
pjrt_client = xla::TfPjRtClient::CreateTfPjRtClient(std::move(client));
clients_[device_type] = std::move(pjrt_client);
return clients_[device_type].get();
}
Status PjRtState::SetPjRtClient(const DeviceType& device_type,
std::unique_ptr<xla::PjRtClient> client) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
}
clients_[device_type] = std::move(client);
return absl::OkStatus();
}
Status PjRtState::MovePjRtClientToUnused(const DeviceType& device_type) {
absl::MutexLock lock(&mu_);
if (auto it = clients_.find(device_type); it != clients_.end()) {
unused_.push_back(std::move(it->second));
clients_.erase(it);
return absl::OkStatus();
}
return errors::NotFound("PjRt client not found for device type ",
device_type);
}
Status PjRtState::SetPjRtGpuClientCreationInfo(
std::unique_ptr<PjRtGpuClientCreationInfo> info) {
absl::MutexLock lock(&mu_);
pjrt_gpu_client_creation_info_ = std::move(info);
return absl::OkStatus();
}
PjRtGpuClientCreationInfo* PjRtState::GetPjRtGpuClientCreationInfo() {
absl::MutexLock lock(&mu_);
return pjrt_gpu_client_creation_info_.get();
}
string PjRtState::DebugString() const { return "PjRtState"; }
} | #include "tensorflow/core/tfrt/common/pjrt_state.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace {
using tensorflow::PjRtState;
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
class PjRtStateTestFixture : public testing::Test {
protected:
PjRtStateTestFixture() { pjrt_state_ = PjRtState::Create(); }
~PjRtStateTestFixture() override {
tensorflow::core::ScopedUnref pjrt_state_ref(pjrt_state_);
}
PjRtState* pjrt_state_;
};
TEST_F(PjRtStateTestFixture, SetAndGetPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
TEST_F(PjRtStateTestFixture, AddAlreadyExistsPjRtClient) {
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_1,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(
tensorflow::DEVICE_CPU, xla::GetTfrtCpuClient(true,
1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client_2,
pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU));
EXPECT_NE(pjrt_client_1, pjrt_client_2);
}
TEST_F(PjRtStateTestFixture, GetNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, DeletePjRtClient) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
xla::PjRtClient* pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_state_->GetPjRtClient(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
EXPECT_EQ(pjrt_client_ptr->platform_name(), "cpu");
}
TEST_F(PjRtStateTestFixture, DeleteNotExistPjRtClient) {
EXPECT_THAT(pjrt_state_->MovePjRtClientToUnused(tensorflow::DEVICE_CPU),
StatusIs(tensorflow::error::NOT_FOUND,
HasSubstr("PjRt client not found for device type")));
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientExist) {
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client,
xla::GetTfrtCpuClient(true, 1));
auto pjrt_client_ptr = pjrt_client.get();
TF_ASSERT_OK(pjrt_state_->SetPjRtClient(tensorflow::DEVICE_CPU,
std::move(pjrt_client)));
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_client_get,
pjrt_state_->GetOrCreatePjRtClient(tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client_get, pjrt_client_ptr);
}
TEST_F(PjRtStateTestFixture, GetOrCreatePjRtClientNotExist) {
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, pjrt_state_->GetOrCreatePjRtClient(
tensorflow::DEVICE_CPU));
EXPECT_THAT(pjrt_client, testing::NotNull());
}
} |
199 | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_SUMMARY_OPTIMIZER_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_EAGER_SUMMARY_OPTIMIZER_H_
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
namespace tensorflow::summary_optimizer {
namespace internal {
std::string NormalizeEdgeName(absl::string_view name);
}
std::pair<absl::string_view, bool> GetDisableSummariesInputArg(
const FunctionDef& fdef);
std::vector<FunctionDef> StripSummaries(const FunctionDef& fdef,
const FunctionLibraryDefinition& flib);
std::string StrippedFunctionName(absl::string_view fname);
}
#endif
#include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
namespace tensorflow::summary_optimizer {
namespace {
constexpr char kDisableSummariesAtRuntime[] = "disable_summaries_at_runtime";
constexpr char kFlushSummaryWriter[] = "FlushSummaryWriter";
constexpr char kWriteSummary[] = "write_summary";
constexpr char kForwardFunctionName[] = "forward_function_name";
constexpr char kBackwardFunctionName[] = "backward_function_name";
constexpr char kEmptyString[] = "";
using summary_optimizer::internal::NormalizeEdgeName;
using ArgDef = OpDef::ArgDef;
void UpdateNestedFunctionName(NodeDef& ndef) {
for (auto& [k, v] : *ndef.mutable_attr()) {
if (v.has_func()) {
v.mutable_func()->set_name(StrippedFunctionName(v.func().name()));
} else if (v.list().func_size() > 0) {
for (auto& func : *v.mutable_list()->mutable_func()) {
func.set_name(StrippedFunctionName(func.name()));
}
}
}
}
void PruneDeletedInputDeps(
const absl::flat_hash_set<std::string>& nodes_to_keep, NodeDef& ndef) {
auto inputs = ndef.input();
ndef.clear_input();
for (const std::string& input : inputs) {
if (nodes_to_keep.contains(NormalizeEdgeName(input))) {
ndef.add_input(input);
}
}
}
FunctionDef StripSummary(const FunctionDef& fdef_with_summaries) {
FunctionDef fdef = fdef_with_summaries;
fdef.mutable_signature()->set_name(
StrippedFunctionName(fdef.signature().name()));
auto nodes = fdef.node_def();
fdef.clear_node_def();
absl::flat_hash_set<std::string> nodes_to_keep;
absl::c_transform(nodes, std::inserter(nodes_to_keep, nodes_to_keep.end()),
[](const NodeDef& node_def) { return node_def.name(); });
absl::c_transform(fdef.signature().input_arg(),
std::inserter(nodes_to_keep, nodes_to_keep.end()),
[](const ArgDef& input_arg) { return input_arg.name(); });
for (const NodeDef& ndef : nodes) {
if (ndef.op() == kFlushSummaryWriter) nodes_to_keep.erase(ndef.name());
for (const auto& substr : absl::StrSplit(ndef.name(), '/')) {
if (substr == kWriteSummary) {
nodes_to_keep.erase(ndef.name());
break;
}
}
}
for (NodeDef& ndef : nodes) {
if (!nodes_to_keep.contains(ndef.name())) continue;
PruneDeletedInputDeps(nodes_to_keep, ndef);
UpdateNestedFunctionName(ndef);
*fdef.add_node_def() = std::move(ndef);
}
auto control_ret = fdef.control_ret();
fdef.clear_control_ret();
for (const auto& [signature_node_name, node_name] : control_ret) {
if (!nodes_to_keep.contains(NormalizeEdgeName(node_name))) continue;
fdef.mutable_control_ret()->insert({signature_node_name, node_name});
}
auto control_outputs = fdef.signature().control_output();
fdef.mutable_signature()->clear_control_output();
for (const std::string& control_output : control_outputs) {
if (!fdef.control_ret().contains(control_output)) continue;
fdef.mutable_signature()->add_control_output(control_output);
}
for (auto& [k, v] : *fdef.mutable_attr()) {
if (k == kForwardFunctionName || k == kBackwardFunctionName) {
v.set_s(StrippedFunctionName(v.s()));
}
if (k == kDisableSummariesAtRuntime) v.clear_list();
}
return fdef;
}
}
namespace internal {
std::string NormalizeEdgeName(absl::string_view name) {
std::vector<std::string> edge_name =
absl::StrSplit(name, absl::ByAnyChar("^:"));
return edge_name[0].empty() ? edge_name[1] : edge_name[0];
}
}
std::pair<absl::string_view, bool> GetDisableSummariesInputArg(
const FunctionDef& fdef) {
auto it = fdef.attr().find(kDisableSummariesAtRuntime);
if (it == fdef.attr().end()) return {kEmptyString, false};
if (it->second.has_list()) {
const auto& list = it->second.list();
if (list.s_size() == 1 && list.b_size() == 1) {
return {list.s(0), list.b(0)};
}
}
return {kEmptyString, false};
}
std::vector<FunctionDef> StripSummaries(const FunctionDef& fdef,
const FunctionLibraryDefinition& flib) {
std::vector<FunctionDef> results;
if (GetDisableSummariesInputArg(fdef).first.empty()) return results;
results.push_back(StripSummary(fdef));
FunctionLibraryDefinition reachable_library = flib.ReachableDefinitions(fdef);
for (const std::string& fname : reachable_library.ListFunctionNames()) {
auto* nested_fdef = flib.Find(fname);
if (nested_fdef == nullptr) continue;
results.push_back(StripSummary(*nested_fdef));
}
return results;
}
std::string StrippedFunctionName(absl::string_view fname) {
return absl::StrCat(fname, "__instance__no_summaries");
}
} | #include "tensorflow/core/common_runtime/eager/summary_optimizer.h"
#include <algorithm>
#include <string>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using ::tensorflow::summary_optimizer::GetDisableSummariesInputArg;
using ::tensorflow::summary_optimizer::StrippedFunctionName;
using ::tensorflow::summary_optimizer::StripSummaries;
using ::tensorflow::summary_optimizer::internal::NormalizeEdgeName;
using ::tsl::protobuf::TextFormat;
using ::tsl::protobuf::util::MessageDifferencer;
template <typename T>
void CompareProto(const T& expected, const std::string& text_proto) {
T proto;
ASSERT_TRUE(TextFormat::ParseFromString(text_proto, &proto));
MessageDifferencer differencer;
EXPECT_TRUE(differencer.Compare(expected, proto));
}
TEST(SummaryOptimizerInternal, NormalizesEdgeName) {
EXPECT_EQ(NormalizeEdgeName("include_summary"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary:0"), "include_summary");
EXPECT_EQ(NormalizeEdgeName("^include_summary/identity:0"),
"include_summary/identity");
}
TEST(SummaryOptimizer, GetsDisableSummariesInputArg) {
FunctionDef fdef;
auto input_arg = GetDisableSummariesInputArg(fdef);
EXPECT_EQ(input_arg.first, "");
EXPECT_FALSE(input_arg.second);
AttrValue attr_val;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
list { s: "remove_summary" b: true }
)pb",
&attr_val));
fdef.mutable_attr()->insert({"disable_summaries_at_runtime", attr_val});
input_arg = GetDisableSummariesInputArg(fdef);
EXPECT_EQ(input_arg.first, "remove_summary");
EXPECT_TRUE(input_arg.second);
}
TEST(SummaryOptimizer, StripsSummaries) {
FunctionDef fdef;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
signature {
name: "train" # Function name should be updated.
input_arg: { name: "include_summaries" }
control_output: "out_pruned" # Control output should be pruned
# because it was pruned from
# `control_ret`.
control_output: "out"
}
node_def { name: "x" }
node_def {
name: "write_summary/Identity"
} # Node should get pruned based on name.
node_def {
name: "Identity/x"
input: "write_summary/Identity" # Summary scope input should get
# pruned.
input: "x"
}
node_def {
name: "nested_fn"
op: "PartitionedCall"
attr {
key: "f"
value: { func: { name: "nested_fn" } }
}
}
node_def {
name: "list_of_nested_fns"
op: "SomeCustomOp"
attr {
key: "functions"
value: {
list: {
func: { name: "nested_fn2" }
func: { name: "nested_fn3" }
}
}
}
}
node_def {
op: "FlushSummaryWriter"
} # Node should get pruned based on op.
control_ret {
key: "out_pruned",
value: "write_summary/Identity:0"
} # Control return should get pruned because node was pruned.
control_ret { key: "out", value: "Identity/x" }
attr {
key: "forward_function_name"
value: {
s: "__inference_train_1"
} # Forward function name should be updated.
}
attr {
key: "backward_function_name"
value: {
s: "__inference_train_2"
} # Backward function name should be updated.
}
attr {
key: "disable_summaries_at_runtime"
value: { list { s: "include_summaries" b: false } }
}
)pb",
&fdef));
FunctionDef nested_fdef;
nested_fdef.mutable_signature()->set_name("nested_fn");
FunctionDef nested_fdef2;
nested_fdef2.mutable_signature()->set_name("nested_fn2");
FunctionDef nested_fdef3;
nested_fdef3.mutable_signature()->set_name("nested_fn3");
FunctionLibraryDefinition flib(OpRegistry::Global());
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef2));
TF_ASSERT_OK(flib.AddFunctionDef(nested_fdef3));
std::vector<FunctionDef> stripped_fdefs = StripSummaries(fdef, flib);
ASSERT_EQ(stripped_fdefs.size(), 4);
struct {
bool operator()(const FunctionDef& lhs, const FunctionDef& rhs) const {
return lhs.signature().name() > rhs.signature().name();
}
} fdefOrdering;
std::sort(stripped_fdefs.begin(), stripped_fdefs.end(), fdefOrdering);
CompareProto(stripped_fdefs[0], R"pb(
signature {
name: "train__instance__no_summaries"
input_arg: { name: "include_summaries" }
control_output: "out"
}
node_def { name: "x" }
node_def { name: "Identity/x" input: "x" }
node_def {
name: "nested_fn"
op: "PartitionedCall"
attr {
key: "f"
value: { func: { name: "nested_fn__instance__no_summaries" } }
}
}
node_def {
name: "list_of_nested_fns"
op: "SomeCustomOp"
attr {
key: "functions"
value: {
list: {
func: { name: "nested_fn2__instance__no_summaries" }
func: { name: "nested_fn3__instance__no_summaries" }
}
}
}
}
control_ret { key: "out", value: "Identity/x" }
attr {
key: "forward_function_name",
value: { s: "__inference_train_1__instance__no_summaries" }
}
attr {
key: "backward_function_name",
value: { s: "__inference_train_2__instance__no_summaries" }
}
attr {
key: "disable_summaries_at_runtime"
value {}
}
)pb");
CompareProto(stripped_fdefs[1], R"pb(
signature { name: "nested_fn__instance__no_summaries" }
)pb");
CompareProto(stripped_fdefs[2], R"pb(
signature { name: "nested_fn3__instance__no_summaries" }
)pb");
CompareProto(stripped_fdefs[3], R"pb(
signature { name: "nested_fn2__instance__no_summaries" }
)pb");
}
TEST(SummaryOptimizer, DoesNotStripSummariesWhenNotEnabled) {
FunctionDef fdef;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(
signature { name: "train" }
attr {
key: "disable_summaries_at_runtime",
value: {}
}
)pb",
&fdef));
FunctionLibraryDefinition flib(OpRegistry::Global());
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
EXPECT_TRUE(StripSummaries(fdef, flib).empty());
fdef.clear_attr();
TF_ASSERT_OK(flib.RemoveFunction("train"));
TF_ASSERT_OK(flib.AddFunctionDef(fdef));
EXPECT_TRUE(StripSummaries(fdef, flib).empty());
}
TEST(SummaryOptimizer, GeneratesNewFunctionName) {
EXPECT_EQ(StrippedFunctionName("train"), "train__instance__no_summaries");
}
}
} |