ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
0ba8a671-bd6a-4936-ab30-7569bb8b140f | cpp | google/tensorstore | proto_binder | tensorstore/proto/proto_binder.cc | tensorstore/proto/proto_binder_test.cc | #include "tensorstore/proto/proto_binder.h"
#include <string>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
#include "google/protobuf/util/json_util.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/proto/proto_util.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
absl::Status JsonProtoBinderBase::operator()(std::true_type ,
const NoOptions& options,
google::protobuf::Message* obj,
::nlohmann::json* j) const {
if (!j->template get_ptr<::nlohmann::json::object_t*>()) {
return internal_json::ExpectedError(*j, "object");
}
std::string json_ascii = j->dump();
auto status = google::protobuf::util::JsonStringToMessage(json_ascii, obj);
if (status.ok()) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected JSON protocol buffer ", obj->GetDescriptor()->name(),
" object, but received ", j->dump(), " with error ",
std::string_view(status.message().data(), status.message().size())));
}
absl::Status JsonProtoBinderBase::operator()(std::false_type ,
const NoOptions& options,
const google::protobuf::Message* obj,
::nlohmann::json* j) const {
std::string json_ascii;
auto status = google::protobuf::util::MessageToJsonString(*obj, &json_ascii);
if (!status.ok()) {
return absl::InternalError(
std::string_view(status.message().data(), status.message().size()));
}
auto j_parse = ::nlohmann::json::parse(json_ascii, nullptr, false);
if (j_parse.template get_ptr<::nlohmann::json::object_t*>()) {
*j = std::move(j_parse);
return absl::OkStatus();
}
return absl::InternalError("Failed to serialize field as JSON proto");
}
absl::Status AsciiProtoBinderBase::operator()(std::true_type,
const NoOptions& options,
google::protobuf::Message* obj,
::nlohmann::json* j) const {
auto* str = j->template get_ptr<const std::string*>();
if (!str) {
return internal_json::ExpectedError(*j, "string");
}
if (TryParseTextProto(*str, obj)) {
return absl::OkStatus();
}
return absl::InvalidArgumentError(tensorstore::StrCat(
"Expected ASCII protocol buffer ", obj->GetDescriptor()->name(),
" object, but received ", *str));
}
absl::Status AsciiProtoBinderBase::operator()(std::false_type,
const NoOptions& options,
const google::protobuf::Message* obj,
::nlohmann::json* j) const {
std::string obj_text;
google::protobuf::TextFormat::PrintToString(*obj, &obj_text);
*j = obj_text;
return absl::OkStatus();
}
}
} | #include "tensorstore/proto/proto_binder.h"
#include <string>
#include <type_traits>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::JsonSerializationOptions;
using ::tensorstore::internal_json_binding::AsciiProtoBinder;
using ::tensorstore::internal_json_binding::JsonProtoBinder;
static inline constexpr JsonProtoBinder<::tensorstore::proto::Array>
ArrayJsonBinder = {};
static inline constexpr AsciiProtoBinder<::tensorstore::proto::Array>
ArrayAsciiBinder = {};
constexpr const char kProto[] = R"(dtype: "int64"
shape: 1
shape: 2
shape: 4
int_data: 1
int_data: 0
int_data: 2
int_data: 2
int_data: 4
int_data: 5
int_data: 6
int_data: 7
)";
TEST(ProtoBinderTest, Ascii) {
JsonSerializationOptions options;
::tensorstore::proto::Array proto;
::nlohmann::json j = std::string(kProto);
EXPECT_TRUE(ArrayAsciiBinder(std::true_type{}, options, &proto, &j).ok());
EXPECT_THAT(proto, EqualsProto(kProto));
::nlohmann::json out;
EXPECT_TRUE(ArrayAsciiBinder(std::false_type{}, options, &proto, &out).ok());
ASSERT_TRUE(out.get_ptr<const std::string*>());
EXPECT_EQ(*out.get_ptr<const std::string*>(), kProto);
}
TEST(ProtoBinderTest, Json) {
JsonSerializationOptions options;
::tensorstore::proto::Array proto;
::nlohmann::json j = ::nlohmann::json{{"dtype", "int64"},
{"shape", {1, 2, 4}},
{"int_data", {1, 0, 2, 2, 4, 5, 6, 7}}};
EXPECT_TRUE(ArrayJsonBinder(std::true_type{}, options, &proto, &j).ok());
EXPECT_THAT(proto, EqualsProto(kProto));
::nlohmann::json out;
EXPECT_TRUE(ArrayJsonBinder(std::false_type{}, options, &proto, &out).ok());
::nlohmann::json expected{
{"dtype", "int64"},
{"shape", {"1", "2", "4"}},
{"intData", {"1", "0", "2", "2", "4", "5", "6", "7"}}};
EXPECT_EQ(out, expected);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_binder.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_binder_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e6a53ecf-262f-4a73-8681-729d61a7cb24 | cpp | google/quiche | http_encoder | quiche/quic/core/http/http_encoder.cc | quiche/quic/core/http/http_encoder_test.cc | #include "quiche/quic/core/http/http_encoder.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
bool WriteFrameHeader(QuicByteCount length, HttpFrameType type,
QuicDataWriter* writer) {
return writer->WriteVarInt62(static_cast<uint64_t>(type)) &&
writer->WriteVarInt62(length);
}
QuicByteCount GetTotalLength(QuicByteCount payload_length, HttpFrameType type) {
return QuicDataWriter::GetVarInt62Len(payload_length) +
QuicDataWriter::GetVarInt62Len(static_cast<uint64_t>(type)) +
payload_length;
}
}
QuicByteCount HttpEncoder::GetDataFrameHeaderLength(
QuicByteCount payload_length) {
QUICHE_DCHECK_NE(0u, payload_length);
return QuicDataWriter::GetVarInt62Len(payload_length) +
QuicDataWriter::GetVarInt62Len(
static_cast<uint64_t>(HttpFrameType::DATA));
}
quiche::QuicheBuffer HttpEncoder::SerializeDataFrameHeader(
QuicByteCount payload_length, quiche::QuicheBufferAllocator* allocator) {
QUICHE_DCHECK_NE(0u, payload_length);
QuicByteCount header_length = GetDataFrameHeaderLength(payload_length);
quiche::QuicheBuffer header(allocator, header_length);
QuicDataWriter writer(header.size(), header.data());
if (WriteFrameHeader(payload_length, HttpFrameType::DATA, &writer)) {
return header;
}
QUIC_DLOG(ERROR)
<< "Http encoder failed when attempting to serialize data frame header.";
return quiche::QuicheBuffer();
}
std::string HttpEncoder::SerializeHeadersFrameHeader(
QuicByteCount payload_length) {
QUICHE_DCHECK_NE(0u, payload_length);
QuicByteCount header_length =
QuicDataWriter::GetVarInt62Len(payload_length) +
QuicDataWriter::GetVarInt62Len(
static_cast<uint64_t>(HttpFrameType::HEADERS));
std::string frame;
frame.resize(header_length);
QuicDataWriter writer(header_length, frame.data());
if (WriteFrameHeader(payload_length, HttpFrameType::HEADERS, &writer)) {
return frame;
}
QUIC_DLOG(ERROR)
<< "Http encoder failed when attempting to serialize headers "
"frame header.";
return {};
}
std::string HttpEncoder::SerializeSettingsFrame(const SettingsFrame& settings) {
QuicByteCount payload_length = 0;
std::vector<std::pair<uint64_t, uint64_t>> ordered_settings{
settings.values.begin(), settings.values.end()};
std::sort(ordered_settings.begin(), ordered_settings.end());
for (const auto& p : ordered_settings) {
payload_length += QuicDataWriter::GetVarInt62Len(p.first);
payload_length += QuicDataWriter::GetVarInt62Len(p.second);
}
QuicByteCount total_length =
GetTotalLength(payload_length, HttpFrameType::SETTINGS);
std::string frame;
frame.resize(total_length);
QuicDataWriter writer(total_length, frame.data());
if (!WriteFrameHeader(payload_length, HttpFrameType::SETTINGS, &writer)) {
QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize "
"settings frame header.";
return {};
}
for (const auto& p : ordered_settings) {
if (!writer.WriteVarInt62(p.first) || !writer.WriteVarInt62(p.second)) {
QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize "
"settings frame payload.";
return {};
}
}
return frame;
}
std::string HttpEncoder::SerializeGoAwayFrame(const GoAwayFrame& goaway) {
QuicByteCount payload_length = QuicDataWriter::GetVarInt62Len(goaway.id);
QuicByteCount total_length =
GetTotalLength(payload_length, HttpFrameType::GOAWAY);
std::string frame;
frame.resize(total_length);
QuicDataWriter writer(total_length, frame.data());
if (WriteFrameHeader(payload_length, HttpFrameType::GOAWAY, &writer) &&
writer.WriteVarInt62(goaway.id)) {
return frame;
}
QUIC_DLOG(ERROR)
<< "Http encoder failed when attempting to serialize goaway frame.";
return {};
}
std::string HttpEncoder::SerializePriorityUpdateFrame(
const PriorityUpdateFrame& priority_update) {
QuicByteCount payload_length =
QuicDataWriter::GetVarInt62Len(priority_update.prioritized_element_id) +
priority_update.priority_field_value.size();
QuicByteCount total_length = GetTotalLength(
payload_length, HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM);
std::string frame;
frame.resize(total_length);
QuicDataWriter writer(total_length, frame.data());
if (WriteFrameHeader(payload_length,
HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM,
&writer) &&
writer.WriteVarInt62(priority_update.prioritized_element_id) &&
writer.WriteBytes(priority_update.priority_field_value.data(),
priority_update.priority_field_value.size())) {
return frame;
}
QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize "
"PRIORITY_UPDATE frame.";
return {};
}
std::string HttpEncoder::SerializeAcceptChFrame(
const AcceptChFrame& accept_ch) {
QuicByteCount payload_length = 0;
for (const auto& entry : accept_ch.entries) {
payload_length += QuicDataWriter::GetVarInt62Len(entry.origin.size());
payload_length += entry.origin.size();
payload_length += QuicDataWriter::GetVarInt62Len(entry.value.size());
payload_length += entry.value.size();
}
QuicByteCount total_length =
GetTotalLength(payload_length, HttpFrameType::ACCEPT_CH);
std::string frame;
frame.resize(total_length);
QuicDataWriter writer(total_length, frame.data());
if (!WriteFrameHeader(payload_length, HttpFrameType::ACCEPT_CH, &writer)) {
QUIC_DLOG(ERROR)
<< "Http encoder failed to serialize ACCEPT_CH frame header.";
return {};
}
for (const auto& entry : accept_ch.entries) {
if (!writer.WriteStringPieceVarInt62(entry.origin) ||
!writer.WriteStringPieceVarInt62(entry.value)) {
QUIC_DLOG(ERROR)
<< "Http encoder failed to serialize ACCEPT_CH frame payload.";
return {};
}
}
return frame;
}
std::string HttpEncoder::SerializeOriginFrame(const OriginFrame& origin) {
QuicByteCount payload_length = 0;
for (const std::string& entry : origin.origins) {
constexpr QuicByteCount kLengthFieldOverhead = 2;
payload_length += kLengthFieldOverhead + entry.size();
}
QuicByteCount total_length =
GetTotalLength(payload_length, HttpFrameType::ORIGIN);
std::string frame;
frame.resize(total_length);
QuicDataWriter writer(total_length, frame.data());
if (!WriteFrameHeader(payload_length, HttpFrameType::ORIGIN, &writer)) {
QUIC_DLOG(ERROR) << "Http encoder failed to serialize ORIGIN frame header.";
return {};
}
for (const std::string& entry : origin.origins) {
if (!writer.WriteStringPiece16(entry)) {
QUIC_DLOG(ERROR)
<< "Http encoder failed to serialize ACCEPT_CH frame payload.";
return {};
}
}
return frame;
}
std::string HttpEncoder::SerializeGreasingFrame() {
uint64_t frame_type;
QuicByteCount payload_length;
std::string payload;
if (!GetQuicFlag(quic_enable_http3_grease_randomness)) {
frame_type = 0x40;
payload_length = 1;
payload = "a";
} else {
uint32_t result;
QuicRandom::GetInstance()->RandBytes(&result, sizeof(result));
frame_type = 0x1fULL * static_cast<uint64_t>(result) + 0x21ULL;
payload_length = result % 4;
if (payload_length > 0) {
payload.resize(payload_length);
QuicRandom::GetInstance()->RandBytes(payload.data(), payload_length);
}
}
QuicByteCount total_length = QuicDataWriter::GetVarInt62Len(frame_type) +
QuicDataWriter::GetVarInt62Len(payload_length) +
payload_length;
std::string frame;
frame.resize(total_length);
QuicDataWriter writer(total_length, frame.data());
bool success =
writer.WriteVarInt62(frame_type) && writer.WriteVarInt62(payload_length);
if (payload_length > 0) {
success &= writer.WriteBytes(payload.data(), payload_length);
}
if (success) {
return frame;
}
QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize "
"greasing frame.";
return {};
}
std::string HttpEncoder::SerializeWebTransportStreamFrameHeader(
WebTransportSessionId session_id) {
uint64_t stream_type =
static_cast<uint64_t>(HttpFrameType::WEBTRANSPORT_STREAM);
QuicByteCount header_length = QuicDataWriter::GetVarInt62Len(stream_type) +
QuicDataWriter::GetVarInt62Len(session_id);
std::string frame;
frame.resize(header_length);
QuicDataWriter writer(header_length, frame.data());
bool success =
writer.WriteVarInt62(stream_type) && writer.WriteVarInt62(session_id);
if (success && writer.remaining() == 0) {
return frame;
}
QUIC_DLOG(ERROR) << "Http encoder failed when attempting to serialize "
"WEBTRANSPORT_STREAM frame header.";
return {};
}
std::string HttpEncoder::SerializeMetadataFrameHeader(
QuicByteCount payload_length) {
QUICHE_DCHECK_NE(0u, payload_length);
QuicByteCount header_length =
QuicDataWriter::GetVarInt62Len(payload_length) +
QuicDataWriter::GetVarInt62Len(
static_cast<uint64_t>(HttpFrameType::METADATA));
std::string frame;
frame.resize(header_length);
QuicDataWriter writer(header_length, frame.data());
if (WriteFrameHeader(payload_length, HttpFrameType::METADATA, &writer)) {
return frame;
}
QUIC_DLOG(ERROR)
<< "Http encoder failed when attempting to serialize METADATA "
"frame header.";
return {};
}
} | #include "quiche/quic/core/http/http_encoder.h"
#include <string>
#include "absl/base/macros.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
TEST(HttpEncoderTest, SerializeDataFrameHeader) {
quiche::QuicheBuffer buffer = HttpEncoder::SerializeDataFrameHeader(
5, quiche::SimpleBufferAllocator::Get());
char output[] = {0x00,
0x05};
EXPECT_EQ(ABSL_ARRAYSIZE(output), buffer.size());
quiche::test::CompareCharArraysWithHexError(
"DATA", buffer.data(), buffer.size(), output, ABSL_ARRAYSIZE(output));
}
TEST(HttpEncoderTest, SerializeHeadersFrameHeader) {
std::string header =
HttpEncoder::SerializeHeadersFrameHeader( 7);
char output[] = {0x01,
0x07};
quiche::test::CompareCharArraysWithHexError("HEADERS", header.data(),
header.length(), output,
ABSL_ARRAYSIZE(output));
}
TEST(HttpEncoderTest, SerializeSettingsFrame) {
SettingsFrame settings;
settings.values[1] = 2;
settings.values[6] = 5;
settings.values[256] = 4;
char output[] = {0x04,
0x07,
0x01,
0x02,
0x06,
0x05,
0x41, 0x00,
0x04};
std::string frame = HttpEncoder::SerializeSettingsFrame(settings);
quiche::test::CompareCharArraysWithHexError(
"SETTINGS", frame.data(), frame.length(), output, ABSL_ARRAYSIZE(output));
}
TEST(HttpEncoderTest, SerializeGoAwayFrame) {
GoAwayFrame goaway;
goaway.id = 0x1;
char output[] = {0x07,
0x1,
0x01};
std::string frame = HttpEncoder::SerializeGoAwayFrame(goaway);
quiche::test::CompareCharArraysWithHexError(
"GOAWAY", frame.data(), frame.length(), output, ABSL_ARRAYSIZE(output));
}
TEST(HttpEncoderTest, SerializePriorityUpdateFrame) {
PriorityUpdateFrame priority_update1;
priority_update1.prioritized_element_id = 0x03;
uint8_t output1[] = {0x80, 0x0f, 0x07, 0x00,
0x01,
0x03};
std::string frame1 =
HttpEncoder::SerializePriorityUpdateFrame(priority_update1);
quiche::test::CompareCharArraysWithHexError(
"PRIORITY_UPDATE", frame1.data(), frame1.length(),
reinterpret_cast<char*>(output1), ABSL_ARRAYSIZE(output1));
PriorityUpdateFrame priority_update2;
priority_update2.prioritized_element_id = 0x05;
priority_update2.priority_field_value = "foo";
uint8_t output2[] = {0x80, 0x0f, 0x07, 0x00,
0x04,
0x05,
0x66, 0x6f, 0x6f};
std::string frame2 =
HttpEncoder::SerializePriorityUpdateFrame(priority_update2);
quiche::test::CompareCharArraysWithHexError(
"PRIORITY_UPDATE", frame2.data(), frame2.length(),
reinterpret_cast<char*>(output2), ABSL_ARRAYSIZE(output2));
}
TEST(HttpEncoderTest, SerializeEmptyOriginFrame) {
OriginFrame frame;
uint8_t expected[] = {0x0C,
0x00};
std::string output = HttpEncoder::SerializeOriginFrame(frame);
quiche::test::CompareCharArraysWithHexError(
"ORIGIN", output.data(), output.length(),
reinterpret_cast<char*>(expected), ABSL_ARRAYSIZE(expected));
}
TEST(HttpEncoderTest, SerializeOriginFrame) {
OriginFrame frame;
frame.origins = {"foo", "bar"};
uint8_t expected[] = {0x0C,
0x0A,
0x00, 0x003,
0x66, 0x6f, 0x6f,
0x00, 0x003,
0x62, 0x61, 0x72};
std::string output = HttpEncoder::SerializeOriginFrame(frame);
quiche::test::CompareCharArraysWithHexError(
"ORIGIN", output.data(), output.length(),
reinterpret_cast<char*>(expected), ABSL_ARRAYSIZE(expected));
}
TEST(HttpEncoderTest, SerializeAcceptChFrame) {
AcceptChFrame accept_ch;
uint8_t output1[] = {0x40, 0x89,
0x00};
std::string frame1 = HttpEncoder::SerializeAcceptChFrame(accept_ch);
quiche::test::CompareCharArraysWithHexError(
"ACCEPT_CH", frame1.data(), frame1.length(),
reinterpret_cast<char*>(output1), ABSL_ARRAYSIZE(output1));
accept_ch.entries.push_back({"foo", "bar"});
uint8_t output2[] = {0x40, 0x89,
0x08,
0x03, 0x66, 0x6f, 0x6f,
0x03, 0x62, 0x61, 0x72};
std::string frame2 = HttpEncoder::SerializeAcceptChFrame(accept_ch);
quiche::test::CompareCharArraysWithHexError(
"ACCEPT_CH", frame2.data(), frame2.length(),
reinterpret_cast<char*>(output2), ABSL_ARRAYSIZE(output2));
}
TEST(HttpEncoderTest, SerializeWebTransportStreamFrameHeader) {
WebTransportSessionId session_id = 0x17;
char output[] = {0x40, 0x41,
0x17};
std::string frame =
HttpEncoder::SerializeWebTransportStreamFrameHeader(session_id);
quiche::test::CompareCharArraysWithHexError("WEBTRANSPORT_STREAM",
frame.data(), frame.length(),
output, sizeof(output));
}
TEST(HttpEncoderTest, SerializeMetadataFrameHeader) {
std::string frame = HttpEncoder::SerializeMetadataFrameHeader(
7);
char output[] = {0x40, 0x4d,
0x07};
quiche::test::CompareCharArraysWithHexError(
"METADATA", frame.data(), frame.length(), output, ABSL_ARRAYSIZE(output));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/http_encoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/http_encoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
2dfa250a-7737-404a-b9d5-dbff2dca84a2 | cpp | tensorflow/tensorflow | task | tensorflow/lite/core/async/c/task.cc | tensorflow/lite/core/async/c/task_test.cc | #include "tensorflow/lite/core/async/c/task.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
extern "C" {
TfLiteStatus TfLiteExecutionTaskSetBuffer(TfLiteExecutionTask* task,
TfLiteIoType io_type,
const char* tensor_signature_name,
TfLiteBufferHandle handle) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteError;
return task->task->SetBufferHandle(io_type, tensor_signature_name, handle);
}
TfLiteStatus TfLiteExecutionTaskSetBufferByIndex(TfLiteExecutionTask* task,
int tensor_index,
TfLiteBufferHandle handle) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->SetBufferHandle(tensor_index, handle);
}
TfLiteStatus TfLiteExecutionTaskSetSync(TfLiteExecutionTask* task,
TfLiteIoType io_type,
const char* tensor_signature_name,
TfLiteSynchronization* sync) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteError;
return task->task->SetSynchronization(io_type, tensor_signature_name, sync);
}
TfLiteStatus TfLiteExecutionTaskSetSyncByIndex(TfLiteExecutionTask* task,
int tensor_index,
TfLiteSynchronization* sync) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->SetSynchronization(tensor_index, sync);
}
TfLiteBufferHandle TfLiteExecutionTaskGetBufferByName(
const TfLiteExecutionTask* task, TfLiteIoType io_type,
const char* tensor_signature_name) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return kTfLiteNullBufferHandle;
return task->task->GetBufferHandle(io_type, tensor_signature_name);
}
TfLiteSynchronization* TfLiteExecutionTaskGetSyncByName(
const TfLiteExecutionTask* task, TfLiteIoType io_type,
const char* tensor_signature_name) {
if (task == nullptr || task->task == nullptr ||
tensor_signature_name == nullptr)
return nullptr;
return task->task->GetSynchronization(io_type, tensor_signature_name);
}
TfLiteBufferHandle TfLiteExecutionTaskGetBufferByIndex(
const TfLiteExecutionTask* task, int tensor_index) {
if (task == nullptr || task->task == nullptr) return kTfLiteNullBufferHandle;
return task->task->GetBufferHandle(tensor_index);
}
TfLiteSynchronization* TfLiteExecutionTaskGetSyncByIndex(
const TfLiteExecutionTask* task, int tensor_index) {
if (task == nullptr || task->task == nullptr) return nullptr;
return task->task->GetSynchronization(tensor_index);
}
void* TfLiteExecutionTaskGetDelegateExecutionData(
const TfLiteExecutionTask* task, TfLiteAsyncKernel* kernel) {
if (task == nullptr || task->task == nullptr) return nullptr;
return task->task->GetDelegateExecutionData(kernel);
}
void TfLiteExecutionTaskSetDelegateExecutionData(TfLiteExecutionTask* task,
TfLiteAsyncKernel* kernel,
void* data) {
if (task == nullptr || task->task == nullptr) return;
task->task->SetDelegateExecutionData(kernel, data);
}
TfLiteStatus TfLiteExecutionTaskGetStatus(const TfLiteExecutionTask* task) {
if (task == nullptr || task->task == nullptr) return kTfLiteError;
return task->task->Status();
}
void TfLiteExecutionTaskSetStatus(TfLiteExecutionTask* task,
TfLiteStatus status) {
if (task == nullptr || task->task == nullptr) return;
task->task->SetStatus(status);
}
} | #include "tensorflow/lite/core/async/c/task.h"
#include <gtest/gtest.h>
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/async/task_internal.h"
#include "tensorflow/lite/core/c/common.h"
namespace {
class TfLiteExecutionTaskTest : public ::testing::Test {
protected:
void SetUp() override {
input_names_["x"] = 1;
input_names_["y"] = 2;
output_names_["a"] = 3;
task_.task->SetInputNameMap(&input_names_);
task_.task->SetOutputNameMap(&output_names_);
}
TfLiteExecutionTask* task() { return &task_; }
protected:
tflite::async::ExecutionTask::TensorNameMapT input_names_;
tflite::async::ExecutionTask::TensorNameMapT output_names_;
TfLiteExecutionTask task_;
};
TEST_F(TfLiteExecutionTaskTest, BasicTest) {
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetSync(task(), kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(
42, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeInput, "x"));
EXPECT_EQ(
43, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeInput, "y"));
EXPECT_EQ(
44, TfLiteExecutionTaskGetBufferByName(task(), kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(sync,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeInput, "x"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeInput, "y"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(task(), kTfLiteIoTypeOutput, "a"));
TfLiteSynchronizationDelete(sync);
}
TEST_F(TfLiteExecutionTaskTest, BasicTestByTensorIndex) {
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetBuffer(task(), kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk,
TfLiteExecutionTaskSetSync(task(), kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(42, TfLiteExecutionTaskGetBufferByIndex(task(), 1));
EXPECT_EQ(43, TfLiteExecutionTaskGetBufferByIndex(task(), 2));
EXPECT_EQ(44, TfLiteExecutionTaskGetBufferByIndex(task(), 3));
EXPECT_EQ(sync, TfLiteExecutionTaskGetSyncByIndex(task(), 1));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(task(), 2));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(task(), 3));
TfLiteSynchronizationDelete(sync);
}
TEST_F(TfLiteExecutionTaskTest, NullTest) {
EXPECT_EQ(kTfLiteError,
TfLiteExecutionTaskSetBuffer(nullptr, kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskSetSync(
nullptr, kTfLiteIoTypeInput, "x", nullptr));
EXPECT_EQ(kTfLiteNullBufferHandle, TfLiteExecutionTaskGetBufferByName(
nullptr, kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(nullptr,
TfLiteExecutionTaskGetSyncByName(nullptr, kTfLiteIoTypeInput, "x"));
EXPECT_EQ(kTfLiteNullBufferHandle,
TfLiteExecutionTaskGetBufferByIndex(nullptr, 3));
EXPECT_EQ(nullptr, TfLiteExecutionTaskGetSyncByIndex(nullptr, 3));
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskGetStatus(nullptr));
TfLiteExecutionTaskSetStatus(nullptr, kTfLiteOk);
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskSetBufferByIndex(nullptr, 0, 0));
EXPECT_EQ(kTfLiteError,
TfLiteExecutionTaskSetSyncByIndex(nullptr, 0, nullptr));
}
TEST_F(TfLiteExecutionTaskTest, StatusTest) {
EXPECT_EQ(kTfLiteOk, TfLiteExecutionTaskGetStatus(task()));
TfLiteExecutionTaskSetStatus(task(), kTfLiteError);
EXPECT_EQ(kTfLiteError, TfLiteExecutionTaskGetStatus(task()));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/c/task.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/c/task_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
256c9467-813a-47bb-bf44-f7c554ce6472 | cpp | tensorflow/tensorflow | encapsulate_subgraphs_pass | tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc | tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc | #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
const char* const kXlaCompiledKernelAttr = "_XlaCompiledKernel";
const char* const kXlaNumConstantArgsAttr = "_XlaNumConstantArgs";
const char* const kXlaNumResourceArgsAttr = "_XlaNumResourceArgs";
const char* const kXlaHostTransferSequencerAttr =
"_xla_host_transfer_sequencer";
const char* const kXlaHasReferenceVarsAttr = "_XlaHasReferenceVars";
namespace {
bool AreAllParentsGuaranteedConst(
const Node& n,
const absl::flat_hash_set<const Node*>& runtime_const_nodes) {
if (n.type_string() == "GuaranteeConst") {
return true;
}
bool all_parents_const = true;
bool atleast_one_non_control_edge = false;
for (const Edge* in : n.in_edges()) {
atleast_one_non_control_edge =
atleast_one_non_control_edge || !in->IsControlEdge();
if (!in->IsControlEdge() && runtime_const_nodes.count(in->src()) == 0) {
all_parents_const = false;
break;
}
}
return all_parents_const && atleast_one_non_control_edge;
}
void MarkGuaranteedConstants(
const Graph& graph,
const std::vector<std::pair<const Node*, Node*>>& src_arg_pairs) {
absl::flat_hash_set<const Node*> guaranteed_const_nodes;
std::vector<const Node*> srcs;
srcs.reserve(src_arg_pairs.size());
for (const auto& src_arg : src_arg_pairs) {
srcs.push_back(src_arg.first);
}
ReverseDFSFrom(
graph, srcs, nullptr,
[&guaranteed_const_nodes](const Node* n) {
if (AreAllParentsGuaranteedConst(*n, guaranteed_const_nodes)) {
guaranteed_const_nodes.insert(n);
}
});
for (auto& src_arg : src_arg_pairs) {
if (guaranteed_const_nodes.count(src_arg.first) != 0) {
VLOG(1) << "Guaranteed const found: " << src_arg.first->DebugString();
src_arg.second->AddAttr("_is_guaranteed_constant", true);
}
}
}
struct OutputInputTensorPairHasher {
uint64 operator()(std::pair<OutputTensor, InputTensor> const& s) const {
return Hash64Combine(OutputTensor::Hash()(s.first),
InputTensor::Hash()(s.second));
}
};
static const char* const kArgOp = "_Arg";
static const char* const kRetValOp = "_Retval";
class Encapsulator {
public:
Encapsulator(string group_attribute, Graph const* graph_in)
: group_attribute_(std::move(group_attribute)), graph_in_(graph_in) {}
Status SplitIntoSubgraphs(FunctionLibraryDefinition* library);
Status BuildFunctionDefs(const RewriteSubgraphFn& rewrite_subgraph_fn,
bool reuse_existing_functions,
FunctionLibraryDefinition* library);
Status BuildOutputGraph(Graph* graph_out, FunctionLibraryDefinition* library);
private:
class Subgraph {
public:
Node* MakeNodeImage(const Graph* graph_in, Node* node);
Graph* GetGraph() const;
Status BuildFunctionDef(const string& name_in,
const RewriteSubgraphFn& rewrite_subgraph_fn,
bool reuse_existing_functions,
FunctionLibraryDefinition* library);
Status AddFunctionCallNode(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out);
Node* GetCallNode() const;
int GetArgIndexForEdge(const Edge* edge) const;
int GetResultIndexForEdge(const Edge* edge) const;
Status RecordArg(const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs);
Status RecordControlResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images);
Status RecordResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images);
Status MakeSequencingNode(const string& subgraph_name, Graph* graph_out);
void ConnectSequencerToCallNode(Graph* graph_out);
Status ReplaceFunctionDef(FunctionLibraryDefinition* library);
private:
std::unique_ptr<Graph> graph_;
string device_;
NodeDef call_node_def_;
string function_def_name_;
Node* host_compute_key_placeholder_ = nullptr;
Node* call_node_;
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash> args_by_src_;
absl::flat_hash_map<InputTensor, int, InputTensor::Hash> args_by_dst_;
std::vector<Node*> args_;
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash> results_;
absl::flat_hash_set<string> control_output_nodes_;
Node* sequencer_ = nullptr;
};
Status GetFunctionNameAttr(Node const* node, string* attr) const;
Status CopySubgraphEdges(
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs);
Status CopySubgraphNodes(
absl::flat_hash_map<const Node*, Node*>* node_images);
Status CopyNodesToOutputGraph(
Graph* graph_out, absl::flat_hash_map<const Node*, Node*>* node_images);
Status AddFunctionCallNodes(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out);
Status FindOutputImageOfEdgeSrc(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_src_node, Node** src_image);
int FindOutputSlotOfEdgeSrc(const string& src_func_id,
const string& dst_func_id,
const Edge* edge);
Status FindOutputImageOfEdgeDst(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_dst_node, Node** dst_image);
int FindOutputSlotOfEdgeDst(const string& src_func_id,
const string& dst_func_id,
const Edge* edge);
Status CopyEdgeToOutputGraph(
const Edge* edge, const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out,
absl::flat_hash_set<std::pair<OutputTensor, InputTensor>,
OutputInputTensorPairHasher>* edges_added);
Status AddEdgesToOutputGraph(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out);
Status MakePrunedGraphCopyAndInline(
const Graph& graph, const std::vector<Node*>& sink_nodes,
std::unique_ptr<Graph>* pruned_graph,
absl::flat_hash_map<const Node*, Node*>* node_images,
FunctionLibraryDefinition* library);
const string group_attribute_;
const Graph* graph_in_;
absl::flat_hash_map<string, Subgraph> subgraphs_;
Encapsulator(const Encapsulator&) = delete;
void operator=(const Encapsulator&) = delete;
};
namespace {
void TopologicalClusterSort(
const absl::flat_hash_set<string>& clusters,
const absl::flat_hash_set<string>& has_successors,
const absl::flat_hash_map<string, absl::flat_hash_set<string>>& ancestors,
std::vector<string>* sorted) {
sorted->clear();
struct Work {
string cluster;
bool leave;
};
std::set<string> visited;
std::vector<Work> stack;
for (const auto& cluster : clusters) {
if (has_successors.find(cluster) == has_successors.end()) {
stack.push_back({cluster, false});
}
}
while (!stack.empty()) {
const Work item = stack.back();
stack.pop_back();
if (item.leave) {
sorted->push_back(item.cluster);
continue;
}
if (visited.find(item.cluster) != visited.end()) continue;
visited.insert(item.cluster);
stack.push_back({item.cluster, true});
const auto& iter = ancestors.find(item.cluster);
if (iter != ancestors.end()) {
for (const auto& ancestor : iter->second) {
stack.push_back({ancestor, false});
}
}
}
CHECK(sorted->size() == clusters.size());
}
}
Node* Encapsulator::Subgraph::GetCallNode() const { return call_node_; }
int Encapsulator::Subgraph::GetArgIndexForEdge(const Edge* edge) const {
return args_by_dst_.at(InputTensor(edge->dst(), edge->dst_input()));
}
int Encapsulator::Subgraph::GetResultIndexForEdge(const Edge* edge) const {
return results_.at(OutputTensor(edge->src(), edge->src_output()));
}
Node* Encapsulator::Subgraph::MakeNodeImage(const Graph* graph_in, Node* node) {
if (!graph_) {
graph_.reset(new Graph(graph_in->op_registry()));
graph_->set_versions(graph_in->versions());
}
if (device_.empty()) {
device_ = node->assigned_device_name().empty()
? node->requested_device()
: node->assigned_device_name();
}
return graph_->CopyNode(node);
}
Graph* Encapsulator::Subgraph::GetGraph() const { return graph_.get(); }
Status Encapsulator::Subgraph::RecordArg(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs) {
Node* src_node = edge->src();
int src_slot = edge->src_output();
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash>::iterator iter;
bool inserted;
std::tie(iter, inserted) = args_by_src_.emplace(
OutputTensor(src_node, src_slot), args_by_src_.size());
int arg_index = iter->second;
if (inserted) {
NodeDef arg_def;
NodeDefBuilder builder(
absl::StrCat(src_node->name(), "_", src_slot, "_arg"), kArgOp,
NodeDebugInfo(src_node->def()));
DataType dtype = edge->dst()->input_type(edge->dst_input());
builder.Attr("T", dtype);
builder.Attr("index", arg_index);
Status s = builder.Finalize(&arg_def);
if (!s.ok()) return s;
TF_ASSIGN_OR_RETURN(Node * arg, graph_->AddNode(arg_def));
src_arg_pairs->push_back({src_node, arg});
args_.push_back(arg);
}
Node* dst_node = edge->dst();
Node* dst_image = node_images.at(dst_node);
int dst_slot = edge->dst_input();
args_by_dst_[InputTensor(dst_node, dst_slot)] = arg_index;
graph_->AddEdge(args_[arg_index], 0, dst_image, dst_slot);
return absl::OkStatus();
}
Status Encapsulator::Subgraph::RecordControlResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images) {
Node* src_node = edge->src();
Node* src_image = node_images.at(src_node);
control_output_nodes_.insert(src_image->name());
return absl::OkStatus();
}
Status Encapsulator::Subgraph::RecordResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images) {
Node* src_node = edge->src();
Node* src_image = node_images.at(src_node);
int src_slot = edge->src_output();
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash>::iterator iter;
bool inserted;
std::tie(iter, inserted) =
results_.emplace(OutputTensor(src_node, src_slot), results_.size());
int ret_index = iter->second;
if (inserted) {
NodeDef ret_def;
NodeDefBuilder builder(
absl::StrCat(src_node->name(), "_", src_slot, "_retval"), kRetValOp,
NodeDebugInfo(src_node->def()));
DataType dtype = src_node->output_type(src_slot);
builder.Attr("T", dtype);
builder.Attr("index", ret_index);
builder.Input(src_image->name(), src_slot, dtype);
Status s = builder.Finalize(&ret_def);
if (!s.ok()) return s;
TF_ASSIGN_OR_RETURN(Node * ret, graph_->AddNode(ret_def));
graph_->AddEdge(src_image, src_slot, ret, 0);
}
return absl::OkStatus();
}
Status Encapsulator::Subgraph::MakeSequencingNode(const string& subgraph_name,
Graph* graph_out) {
if (sequencer_ == nullptr) {
NodeDef seq_def;
NodeDefBuilder builder(absl::StrCat(subgraph_name, "_sequencer"), "NoOp");
builder.Attr(kXlaHostTransferSequencerAttr, subgraph_name);
builder.Device(device_);
Status s = builder.Finalize(&seq_def);
if (!s.ok()) return s;
TF_ASSIGN_OR_RETURN(sequencer_, graph_out->AddNode(seq_def));
}
return absl::OkStatus();
}
void Encapsulator::Subgraph::ConnectSequencerToCallNode(Graph* graph_out) {
if (sequencer_ != nullptr) {
VLOG(2) << "ConnectSequencerToCallNode";
graph_out->AddControlEdge(sequencer_, call_node_,
true);
}
}
Status Encapsulator::Subgraph::BuildFunctionDef(
const string& name_in, const RewriteSubgraphFn& rewrite_subgraph_fn,
bool reuse_existing_functions, FunctionLibraryDefinition* library) {
string name = name_in;
call_node_def_.set_op(name);
call_node_def_.set_name(name);
call_node_def_.set_device(device_);
if (rewrite_subgraph_fn) {
std::vector<OutputTensor> arg_source_tensors(args_by_src_.size());
for (const auto& arg : args_by_src_) {
arg_source_tensors.at(arg.second) = arg.first;
}
std::vector<int> input_permutation(args_by_src_.size());
std::iota(input_permutation.begin(), input_permutation.end(), 0);
std::vector<int> output_permutation(results_.size());
std::iota(output_permutation.begin(), output_permutation.end(), 0);
TF_RETURN_IF_ERROR(
rewrite_subgraph_fn(arg_source_tensors, &graph_, &input_permutation,
&output_permutation, &call_node_def_));
if (input_permutation.size() != args_by_src_.size()) {
return errors::InvalidArgument("Input permutation has incorrect size.");
}
if (output_permutation.size() != results_.size()) {
return errors::InvalidArgument("Output permutation has incorrect size.");
}
for (auto& arg : args_by_src_) {
arg.second = input_permutation[arg.second];
}
for (auto& arg : args_by_dst_) {
arg.second = input_permutation[arg.second];
}
for (auto& result : results_) {
result.second = output_permutation[result.second];
}
name = call_node_def_.op();
}
function_def_name_ = name;
FunctionDef fdef;
auto lookup = [this](const Node* node) -> std::optional<string> {
if (control_output_nodes_.contains(node->name())) {
return std::make_optional(node->name());
}
return std::nullopt;
};
std::vector<ControlFlowInfo> dummy;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph_.get(), &dummy));
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph_, name, lookup, &fdef));
if (VLOG_IS_ON(1)) {
VLOG(2) << "Build function def " << name;
DumpGraphToFile(absl::StrCat("encapsulate_fdef_graph_", name), *graph_,
library);
DumpFunctionDefToFile(absl::StrCat("encapsulate_fdef_", name), fdef);
}
const FunctionDef* original_fdef = library->Find(name);
if (!reuse_existing_functions || original_fdef == nullptr) {
TF_RETURN_IF_ERROR(library->AddFunctionDef(fdef));
} else if (!FunctionDefsEqual(*original_fdef, fdef)) {
TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef));
}
return absl::OkStatus();
}
Status Encapsulator::Subgraph::ReplaceFunctionDef(
FunctionLibraryDefinition* library) {
const string& name = function_def_name_;
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph_, name, &fdef));
if (VLOG_IS_ON(1)) {
VLOG(2) << "Replace function def " << name;
DumpGraphToFile(absl::StrCat("replace_encapsulate_fdef_graph_", name),
*graph_, library);
DumpFunctionDefToFile(absl::StrCat("replace_encapsulate_fdef_", name),
fdef);
}
TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef));
return absl::OkStatus();
}
Status Encapsulator::Subgraph::AddFunctionCallNode(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out) {
TF_ASSIGN_OR_RETURN(call_node_, graph_out->AddNode(call_node_def_));
call_node_->set_assigned_device_name(device_);
return absl::OkStatus();
}
Status Encapsulator::GetFunctionNameAttr(Node const* node, string* attr) const {
AttrSlice attrs = node->attrs();
attr->clear();
for (const auto& node_attr : attrs) {
if (node_attr.first == group_attribute_) {
TF_RETURN_IF_ERROR(AttrValueHasType(node_attr.second, "string"));
*attr = node_attr.second.s();
break;
}
}
return absl::OkStatus();
}
bool IsInSubgraph(const string& func_id) { return !func_id.empty(); }
Status Encapsulator::CopySubgraphNodes(
absl::flat_hash_map<const Node*, Node*>* node_images) {
for (Node* node : graph_in_->op_nodes()) {
string func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(node, &func_id));
if (!IsInSubgraph(func_id)) continue;
Subgraph& subgraph = subgraphs_[func_id];
Node* image = subgraph.MakeNodeImage(graph_in_, node);
image->ClearAttr(group_attribute_);
(*node_images)[node] = image;
}
return absl::OkStatus();
}
Status Encapsulator::CopySubgraphEdges(
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs) {
for (const Edge* edge : graph_in_->edges()) {
string src_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->src(), &src_func_id));
string dst_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->dst(), &dst_func_id));
Node* src_image = gtl::FindWithDefault(node_images, edge->src(), nullptr);
Node* dst_image = gtl::FindWithDefault(node_images, edge->dst(), nullptr);
if (IsInSubgraph(src_func_id) && IsInSubgraph(dst_func_id) &&
src_func_id == dst_func_id) {
Graph* g = subgraphs_[src_func_id].GetGraph();
if (edge->IsControlEdge()) {
g->AddControlEdge(src_image, dst_image,
true);
} else {
g->AddEdge(src_image, edge->src_output(), dst_image, edge->dst_input());
}
continue;
}
if (IsInSubgraph(src_func_id)) {
if (!edge->IsControlEdge()) {
DataType dtype = edge->src()->output_type(edge->src_output());
if (IsRefType(dtype)) {
return errors::InvalidArgument(
"Ref Tensors (e.g., Variables) are not supported as results: "
"tensor ",
edge->src()->name(), ":", edge->src_output());
}
}
Subgraph& src_subgraph = subgraphs_[src_func_id];
if (edge->IsControlEdge()) {
TF_RETURN_IF_ERROR(src_subgraph.RecordControlResult(edge, node_images));
} else {
TF_RETURN_IF_ERROR(src_subgraph.RecordResult(edge, node_images));
}
}
if (IsInSubgraph(dst_func_id)) {
if (!edge->IsControlEdge()) {
DataType dtype = edge->dst()->input_type(edge->dst_input());
if (IsRefType(dtype)) {
return errors::InvalidArgument(
"Ref Tensors (e.g., Variables) are not supported as args: "
"tensor ",
edge->src()->name(), ":", edge->src_output());
}
}
Subgraph& dst_subgraph = subgraphs_[dst_func_id];
if (!edge->IsControlEdge()) {
TF_RETURN_IF_ERROR(
dst_subgraph.RecordArg(edge, node_images, src_arg_pairs));
}
}
}
return absl::OkStatus();
}
Status Encapsulator::SplitIntoSubgraphs(FunctionLibraryDefinition* library) {
Status s;
absl::flat_hash_map<const Node*, Node*> node_images;
std::vector<std::pair<const Node*, Node*>> src_arg_pairs;
TF_RETURN_IF_ERROR(CopySubgraphNodes(&node_images));
TF_RETURN_IF_ERROR(CopySubgraphEdges(node_images, &src_arg_pairs));
MarkGuaranteedConstants(*graph_in_, src_arg_pairs);
for (auto& entry : subgraphs_) {
Subgraph& subgraph = entry.second;
FixupSourceAndSinkEdges(subgraph.GetGraph());
}
if (VLOG_IS_ON(1)) {
for (auto& entry : subgraphs_) {
DumpGraphToFile(
absl::StrCat("encapsulate_subgraphs_subgraph_", entry.first),
*entry.second.GetGraph(), library);
}
}
return s;
}
Status Encapsulator::BuildFunctionDefs(
const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions,
FunctionLibraryDefinition* library) {
for (auto& subgraph_entry : subgraphs_) {
string name = subgraph_entry.first;
Subgraph& subgraph = subgraph_entry.second;
TF_RETURN_IF_ERROR(subgraph.BuildFunctionDef(
name, rewrite_subgraph_fn, reuse_existing_functions, library));
}
return absl::OkStatus();
}
Status Encapsulator::CopyNodesToOutputGraph(
Graph* graph_out, absl::flat_hash_map<const Node*, Node*>* node_images) {
for (Node* node : graph_in_->op_nodes()) {
string func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(node, &func_id));
if (IsInSubgraph(func_id)) continue;
Node* image = graph_out->CopyNode(node);
(*node_images)[node] = image;
}
(*node_images)[graph_in_->source_node()] = graph_out->source_node();
(*node_images)[graph_in_->sink_node()] = graph_out->sink_node();
return absl::OkStatus();
}
Status Encapsulator::AddFunctionCallNodes(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out) {
for (auto& subgraph_entry : subgraphs_) {
TF_RETURN_IF_ERROR(
subgraph_entry.second.AddFunctionCallNode(node_images, graph_out));
}
return absl::OkStatus();
}
Status Encapsulator::FindOutputImageOfEdgeSrc(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_src_node, Node** src_image) {
if (IsInSubgraph(src_func_id)) {
*src_image = subgraphs_.at(src_func_id).GetCallNode();
} else {
*src_image = node_images.at(original_src_node);
}
return absl::OkStatus();
}
int Encapsulator::FindOutputSlotOfEdgeSrc(const string& src_func_id,
const string& dst_func_id,
const Edge* edge) {
if (IsInSubgraph(src_func_id)) {
const Subgraph& src_subgraph = subgraphs_.at(src_func_id);
return src_subgraph.GetResultIndexForEdge(edge);
} else {
return edge->src_output();
}
}
Status Encapsulator::FindOutputImageOfEdgeDst(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_dst_node, Node** dst_image) {
if (IsInSubgraph(dst_func_id)) {
*dst_image = subgraphs_.at(dst_func_id).GetCallNode();
} else {
*dst_image = node_images.at(original_dst_node);
}
return absl::OkStatus();
}
int Encapsulator::FindOutputSlotOfEdgeDst(const string& src_func_id,
const string& dst_func_id,
const Edge* edge) {
if (IsInSubgraph(dst_func_id)) {
const Subgraph& dst_subgraph = subgraphs_.at(dst_func_id);
return dst_subgraph.GetArgIndexForEdge(edge);
} else {
return edge->dst_input();
}
}
Status Encapsulator::CopyEdgeToOutputGraph(
const Edge* edge, const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out,
absl::flat_hash_set<std::pair<OutputTensor, InputTensor>,
OutputInputTensorPairHasher>* edges_added) {
Node* src_image;
TF_RETURN_IF_ERROR(FindOutputImageOfEdgeSrc(
src_func_id, dst_func_id, node_images, edge->src(), &src_image));
Node* dst_image;
TF_RETURN_IF_ERROR(FindOutputImageOfEdgeDst(
src_func_id, dst_func_id, node_images, edge->dst(), &dst_image));
if (edge->IsControlEdge()) {
if (edges_added
->emplace(OutputTensor(src_image, -1), InputTensor(dst_image, -1))
.second) {
graph_out->AddControlEdge(src_image, dst_image,
true);
}
return absl::OkStatus();
}
int src_output = FindOutputSlotOfEdgeSrc(src_func_id, dst_func_id, edge);
int dst_input = FindOutputSlotOfEdgeDst(src_func_id, dst_func_id, edge);
if (edges_added
->emplace(OutputTensor(src_image, src_output),
InputTensor(dst_image, dst_input))
.second) {
graph_out->AddEdge(src_image, src_output, dst_image, dst_input);
}
return absl::OkStatus();
}
Status Encapsulator::AddEdgesToOutputGraph(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out) {
absl::flat_hash_set<std::pair<OutputTensor, InputTensor>,
OutputInputTensorPairHasher>
edges_added;
for (const Edge* edge : graph_in_->edges()) {
string src_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->src(), &src_func_id));
string dst_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->dst(), &dst_func_id));
if (IsInSubgraph(src_func_id) && IsInSubgraph(dst_func_id) &&
src_func_id == dst_func_id) {
continue;
}
TF_RETURN_IF_ERROR(CopyEdgeToOutputGraph(
edge, src_func_id, dst_func_id, node_images, graph_out, &edges_added));
}
for (auto& subgraph_entry : subgraphs_) {
Subgraph& subgraph = subgraph_entry.second;
subgraph.ConnectSequencerToCallNode(graph_out);
}
return absl::OkStatus();
}
namespace {
Node* AddDummyShapedNode(const Node* src_node, int src_port,
const std::vector<ControlFlowInfo>& control_flow_info,
const TensorShapeProto& shape, Graph* graph_out) {
DataType data_type = src_node->output_type(src_port);
TensorProto dummy_proto;
dummy_proto.set_dtype(data_type);
*dummy_proto.mutable_tensor_shape() = shape;
GraphDefBuilder::Options options(graph_out, nullptr);
NodeBuilder node_builder(options.GetNameForOp("KnownShape"), "Const",
options.op_registry());
node_builder.Attr("dtype", data_type).Attr("value", dummy_proto);
Node* node = options.FinalizeBuilder(&node_builder);
while (!control_flow_info[src_node->id()].frame_name.empty()) {
NodeDebugInfo debug_info(*src_node);
NodeBuilder enter_builder(options.GetNameForOp("Enter"), "Enter",
options.op_registry(), &debug_info);
enter_builder.Attr("frame_name",
control_flow_info[src_node->id()].frame_name);
enter_builder.Attr("is_constant", true);
enter_builder.Input(node, 0);
Node* enter_node = options.FinalizeBuilder(&enter_builder);
node = enter_node;
src_node = control_flow_info[src_node->id()].parent_frame;
}
return node;
}
}
Status Encapsulator::MakePrunedGraphCopyAndInline(
const Graph& graph, const std::vector<Node*>& sink_nodes,
std::unique_ptr<Graph>* pruned_graph,
absl::flat_hash_map<const Node*, Node*>* node_images,
FunctionLibraryDefinition* library) {
pruned_graph->reset(new Graph(library));
(*pruned_graph)->set_versions(graph.versions());
ReverseDFSFrom(graph, sink_nodes,
nullptr,
[&](Node* n) {
if (!n->IsSource()) {
Node* copied = (*pruned_graph)->CopyNode(n);
node_images->emplace(n, copied);
}
});
for (auto entry : *node_images) {
const Node* orig = entry.first;
Node* image = entry.second;
for (const Edge* out_edge : orig->out_edges()) {
auto iter = node_images->find(out_edge->dst());
if (iter != node_images->end()) {
(*pruned_graph)
->AddEdge(image, out_edge->src_output(), iter->second,
out_edge->dst_input());
}
}
}
std::vector<Node*> function_nodes;
for (auto node : (*pruned_graph)->nodes()) {
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(library->LookUp(node->type_string(), &op_reg_data));
if (op_reg_data->is_function_op) {
function_nodes.push_back(node);
}
}
for (auto node : function_nodes) {
VLOG(2) << "Inlining function " << node->name();
const FunctionDef* fdef = library->Find(node->type_string());
if (fdef == nullptr) {
return errors::Internal("Failed to find function ", node->type_string(),
" in function library.");
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*fdef, node->attrs(), library, &fbody));
InlineFunctionBodyOptions inline_opts;
TF_RETURN_IF_ERROR(InlineFunctionBody(*library, pruned_graph->get(), node,
fbody.get(), inline_opts));
}
return absl::OkStatus();
}
Status Encapsulator::BuildOutputGraph(Graph* graph_out,
FunctionLibraryDefinition* library) {
absl::flat_hash_map<const Node*, Node*> node_images;
TF_RETURN_IF_ERROR(CopyNodesToOutputGraph(graph_out, &node_images));
TF_RETURN_IF_ERROR(AddFunctionCallNodes(node_images, graph_out));
TF_RETURN_IF_ERROR(AddEdgesToOutputGraph(node_images, graph_out));
return absl::OkStatus();
}
}
Status EncapsulateSubgraphsInFunctions(
string group_attribute, const Graph& graph_in,
const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions,
std::unique_ptr<Graph>* graph_out, FunctionLibraryDefinition* library) {
Encapsulator encapsulator(std::move(group_attribute),
&graph_in);
TF_RETURN_IF_ERROR(encapsulator.SplitIntoSubgraphs(library));
TF_RETURN_IF_ERROR(encapsulator.BuildFunctionDefs(
rewrite_subgraph_fn, reuse_existing_functions, library));
std::unique_ptr<Graph> out(new Graph(library));
out->set_versions(graph_in.versions());
TF_RETURN_IF_ERROR(encapsulator.BuildOutputGraph(out.get(), library));
*graph_out = std::move(out);
return absl::OkStatus();
}
static Status GetArgTypes(const Graph& graph, DataTypeVector* types) {
for (Node* n : graph.op_nodes()) {
if (n->type_string() == kArgOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
const int num_types = types->size();
if (index < 0 || index >= num_types) {
return errors::InvalidArgument("Invalid argument number");
}
(*types)[index] = n->output_type(0);
}
}
return absl::OkStatus();
}
static Status RenumberArguments(Graph* graph,
const std::vector<int>& permutation) {
for (Node* n : graph->op_nodes()) {
if (n->type_string() == kArgOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
const int permutation_size = permutation.size();
if (index < 0 || index >= permutation_size) {
return errors::InvalidArgument("Invalid argument number");
}
n->AddAttr("index", permutation[index]);
}
}
return absl::OkStatus();
}
Status EncapsulateSubgraphsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "EncapsulateSubgraphsPass::Run";
if (VLOG_IS_ON(1)) {
DumpGraphToFile("encapsulate_subgraphs_before", **options.graph,
options.flib_def);
}
for (Node* n : (*options.graph)->nodes()) {
if (n->type_string() == "TPUExecute" ||
n->type_string() == "TPUExecuteAndUpdateVariables") {
return absl::OkStatus();
}
}
std::unique_ptr<Graph> graph_out;
FunctionLibraryDefinition* const library = options.flib_def;
SessionOptions session_options;
auto* device_count = session_options.config.mutable_device_count();
device_count->insert({"CPU", 1});
std::vector<std::unique_ptr<Device>> devices;
DeviceFactory* cpu_factory = DeviceFactory::GetFactory("CPU");
if (!cpu_factory) {
return errors::NotFound(
"CPU Factory not registered. Can't run EncapsulateSubgraphsPass");
}
TF_RETURN_IF_ERROR(cpu_factory->CreateDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
if (devices.empty()) {
return errors::NotFound(
"Failed to create a CPU device for EncapsulateSubgraphsPass");
}
std::unique_ptr<DeviceMgr> device_mgr =
std::make_unique<StaticDeviceMgr>(std::move(devices));
const auto* config = &options.session_options->config;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
device_mgr.get(), options.session_options->env,
config, TF_GRAPH_DEF_VERSION, library,
config->graph_options().optimizer_options()));
FunctionLibraryRuntime* flr =
pflr->GetFLR("/job:localhost/replica:0/task:0/device:CPU:0");
if (flr == nullptr) {
return errors::Internal(
"Failed to create and retrieve function library runtime to run "
"constant folding");
}
auto rewrite_subgraph =
[flr](const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* subgraph,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation, NodeDef* node) {
bool disable_constant_folding =
GetBuildXlaOpsPassFlags()->tf_xla_disable_constant_folding;
auto cf_consider_fn = [disable_constant_folding](const Node* n) {
if (disable_constant_folding) return false;
for (const auto& output_arg : n->op_def().output_arg()) {
if (output_arg.type() == DT_VARIANT) {
return false;
}
}
return true;
};
GraphOptimizer::Options graph_optimizer_options;
graph_optimizer_options.cf_consider_fn = cf_consider_fn;
OptimizeGraph(flr, subgraph, graph_optimizer_options);
const int num_args = input_permutation->size();
std::vector<bool> const_args(num_args);
TF_RETURN_IF_ERROR(
BackwardsConstAnalysis(**subgraph, &const_args,
nullptr, flr));
DataTypeVector arg_types(num_args);
TF_RETURN_IF_ERROR(GetArgTypes(**subgraph, &arg_types));
const int num_consts =
std::count(const_args.begin(), const_args.end(), true);
const int num_resources =
std::count(arg_types.begin(), arg_types.end(), DT_RESOURCE);
const int num_nonconsts = num_args - num_resources - num_consts;
if (num_nonconsts < 0) {
return errors::Internal("num_nonconsts should be >= 0, was ",
num_nonconsts);
}
int const_pos = 0;
int arg_pos = num_consts;
int resource_pos = num_consts + num_nonconsts;
for (int i = 0; i < num_args; ++i) {
if (const_args[i]) {
if (arg_types[i] == DT_RESOURCE) {
return errors::Internal(
"Resource arguments cannot be constant (argument ", i, ")");
}
(*input_permutation)[i] = const_pos;
++const_pos;
} else if (arg_types[i] == DT_RESOURCE) {
(*input_permutation)[i] = resource_pos;
++resource_pos;
} else {
(*input_permutation)[i] = arg_pos;
++arg_pos;
}
}
TF_RETURN_IF_ERROR(
RenumberArguments(subgraph->get(), *input_permutation));
AddNodeAttr(kXlaCompiledKernelAttr, true, node);
AddNodeAttr(kXlaNumConstantArgsAttr, num_consts, node);
AddNodeAttr(kXlaNumResourceArgsAttr, num_resources, node);
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
EncapsulateSubgraphsInFunctions(
kXlaClusterAttr, **options.graph, rewrite_subgraph,
false, &graph_out, library),
"EncapsulateSubgraphsPass failed");
if (VLOG_IS_ON(1)) {
DumpGraphToFile("encapsulate_subgraphs_after", *graph_out,
options.flib_def);
}
*options.graph = std::move(graph_out);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Node*> ref_related_nodes,
GetNodesRelatedToRefVariables(**options.graph, flr));
for (Node* node : (*options.graph)->nodes()) {
bool has_ref_vars = ref_related_nodes.contains(node);
node->AddAttr(kXlaHasReferenceVarsAttr, has_ref_vars);
VLOG(3) << "Has ref vars = " << has_ref_vars
<< ", node: " << node->def().DebugString();
}
return absl::OkStatus();
}
bool IsXlaCompiledKernel(const Node& node) {
bool is_compiled = false;
bool has_compilation_attr =
TryGetNodeAttr(node.attrs(), kXlaCompiledKernelAttr, &is_compiled) &&
is_compiled;
return has_compilation_attr ? is_compiled : false;
}
} | #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include <memory>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
const char* const kXlaHostTransferSequencerAttr =
"_xla_host_transfer_sequencer";
Status AddGraphDefToFunctionLibrary(const GraphDefBuilder& graphdef_builder,
const string& name_suffix,
FunctionDefLibrary* library) {
GraphDef graphdef;
TF_RETURN_IF_ERROR(graphdef_builder.ToGraphDef(&graphdef));
std::unique_ptr<Graph> graph =
std::unique_ptr<Graph>(new Graph(OpRegistry::Global()));
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, graphdef, graph.get()));
FunctionDef* fdef = library->add_function();
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*graph,
absl::StrCat("_outside_compilation_shape_inference_", name_suffix),
fdef));
return absl::OkStatus();
}
template <class Tkey, class Tvalue>
bool EqualProtoMap(const ::tensorflow::protobuf::Map<Tkey, Tvalue>& a,
const ::tensorflow::protobuf::Map<Tkey, Tvalue>& b,
const std::function<string(const Tkey&)>& key_to_string,
const std::function<string(const Tvalue&)>& value_to_string,
const std::function<bool(const Tkey&, const Tvalue&,
const Tvalue&)>& compare,
const string& map_name, string* diff) {
for (const auto& elt_a : a) {
const auto iter = b.find(elt_a.first);
if (iter == b.end()) {
if (diff) {
*diff = absl::StrCat(map_name, " expected: contains element with key '",
key_to_string(elt_a.first),
"' got: map has no such element");
}
return false;
}
if (!compare(elt_a.first, elt_a.second, iter->second)) {
if (diff) {
*diff = absl::StrCat(map_name, " expected: element with key '",
key_to_string(elt_a.first), "' has value '",
value_to_string(elt_a.second), "' got: '",
value_to_string(iter->second), "'");
}
return false;
}
}
for (const auto& elt_b : b) {
const auto iter = a.find(elt_b.first);
if (iter == a.end()) {
if (diff) {
*diff = absl::StrCat(map_name, " got: contains element with key '",
key_to_string(elt_b.first),
"' expected: map has no such element");
}
return false;
}
}
return true;
}
bool EqualFunctionNodeDef(const NodeDef& a, const NodeDef& b,
const string& diff_preamble, string* diff) {
if (a.op() != b.op()) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
", expected op '", a.op(), "' got '", b.op());
}
return false;
}
if (a.device() != b.device()) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
", expected device '", a.device(), "' got '",
b.device());
}
return false;
}
if (a.input_size() != b.input_size()) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
", expected ", a.input_size(), " inputs got ",
b.input_size(), " expected:\n", a.DebugString(),
"\ngot:\n", b.DebugString());
}
return false;
}
std::unordered_set<string> control_input_a;
std::unordered_set<string> control_input_b;
for (int i = 0; i < a.input_size(); ++i) {
if (absl::StartsWith(a.input(i), "^")) {
if (!absl::StartsWith(b.input(i), "^")) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
" input ", i, ", expected control input ",
a.input(i), " got ", b.input(i), " expected:\n",
a.DebugString(), "\ngot:\n", b.DebugString());
}
return false;
}
control_input_a.insert(a.input(i));
control_input_b.insert(b.input(i));
} else if (a.input(i) != b.input(i)) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
" input ", i, ", expected ", a.input(i), " got ",
b.input(i), " expected:\n", a.DebugString(),
"\ngot:\n", b.DebugString());
}
return false;
}
}
if (control_input_a != control_input_b) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
" control inputs differ expected:\n",
a.DebugString(), "\ngot:\n", b.DebugString());
}
return false;
}
return EqualProtoMap<string, AttrValue>(
a.attr(), b.attr(), [](const string& s) { return s; },
[](const AttrValue& v) { return v.DebugString(); },
[](const string& key, const AttrValue& av, const AttrValue& bv) {
if (key == "ancestors") {
std::unordered_set<string> a_set(av.list().s().begin(),
av.list().s().end());
std::unordered_set<string> b_set(bv.list().s().begin(),
bv.list().s().end());
return a_set == b_set;
} else {
return av.DebugString() == bv.DebugString();
}
},
absl::StrCat(diff_preamble, " attr mismatch for node ", a.name()), diff);
}
bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
string* diff) {
if (a.signature().DebugString() != b.signature().DebugString()) {
if (diff) {
*diff =
absl::StrCat("Signature mismatch for function ", a.signature().name(),
", expected:\n", a.signature().DebugString(), "\ngot:\n",
b.signature().DebugString());
}
return false;
}
if (!EqualProtoMap<string, AttrValue>(
a.attr(), b.attr(), [](const string& s) { return s; },
[](const AttrValue& v) { return v.DebugString(); },
[](const string& key, const AttrValue& av, const AttrValue& bv) {
return av.DebugString() == bv.DebugString();
},
absl::StrCat("attr mismatch for function ", a.signature().name()),
diff)) {
return false;
}
if (!EqualProtoMap<string, string>(
a.ret(), b.ret(), [](const string& s) { return s; },
[](const string& s) { return s; },
[](const string& key, const string& av, const string& bv) {
return av == bv;
},
absl::StrCat("ret mismatch for function ", a.signature().name()),
diff)) {
return false;
}
for (int i = 0; i < a.node_def_size(); ++i) {
bool found = false;
for (int j = 0; j < b.node_def_size(); ++j) {
if (a.node_def(i).name() == b.node_def(j).name()) {
if (!EqualFunctionNodeDef(
a.node_def(i), b.node_def(j),
absl::StrCat("Function ", a.signature().name()), diff)) {
return false;
}
found = true;
break;
}
}
if (!found) {
if (diff) {
*diff = absl::StrCat("Function ", a.signature().name(),
", expected: has node '", a.node_def(i).name(),
"' got: no node of that name");
}
return false;
}
}
for (int i = 0; i < b.node_def_size(); ++i) {
bool found = false;
for (int j = 0; j < a.node_def_size(); ++j) {
if (b.node_def(i).name() == a.node_def(j).name()) {
found = true;
break;
}
}
if (!found) {
if (diff) {
*diff = absl::StrCat("Function ", a.signature().name(),
", got: has node '", b.node_def(i).name(),
"' expected: no node of that name");
}
return false;
}
}
return true;
}
bool EqualFunctionDefLibrary(const FunctionDefLibrary& expected,
const FunctionDefLibrary& actual, string* diff) {
std::unordered_map<string, const FunctionDef*> actual_index;
for (const FunctionDef& function : actual.function()) {
actual_index[function.signature().name()] = &function;
}
for (const FunctionDef& expected_function : expected.function()) {
auto it = actual_index.find(expected_function.signature().name());
if (it == actual_index.end()) {
if (diff) {
*diff = absl::StrCat("Did not find expected function '",
expected_function.signature().name(), "'");
}
return false;
}
if (!EqualFunctionDef(expected_function, *it->second, diff)) return false;
actual_index.erase(it);
}
if (!actual_index.empty()) {
if (diff != nullptr) {
*diff =
absl::StrCat("Found unexpected function '",
actual_index.begin()->second->signature().name(), "'");
}
return false;
}
return true;
}
#define TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(expected, actual) \
do { \
string diff; \
EXPECT_TRUE(EqualFunctionDefLibrary(expected, actual, &diff)) \
<< diff << "\nActual: " << actual.DebugString(); \
} while (false)
REGISTER_OP("InputTest")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
REGISTER_OP("InputTestShaped")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("UnaryTest")
.Input("a: float")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
::tensorflow::shape_inference::ShapeHandle o;
TF_RETURN_IF_ERROR(c->Merge(c->UnknownShape(), c->input(0), &o));
c->set_output(0, o);
return absl::OkStatus();
});
REGISTER_OP("BinaryTest")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
::tensorflow::shape_inference::ShapeHandle o;
TF_RETURN_IF_ERROR(c->Merge(c->UnknownShape(), c->input(0), &o));
c->set_output(0, o);
return absl::OkStatus();
});
REGISTER_OP("BinaryTest2")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(::tensorflow::shape_inference::UnknownShape);
REGISTER_OP("AddNLikeTest")
.Input("inputs: N * T")
.Output("sum: T")
.Attr("N: int >= 1")
.Attr("T: numbertype")
.SetIsCommutative()
.SetIsAggregate();
Node* Sequencer(const GraphDefBuilder::Options& opts,
const string& call_node_name) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("NoOp"), "NoOp",
opts.op_registry());
return opts.WithAttr(kXlaHostTransferSequencerAttr, call_node_name)
.FinalizeBuilder(&node_builder);
}
Node* Input(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("InputTest", opts);
}
Node* InputShaped(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("InputTestShaped", opts);
}
Node* KnownShapeBase(DataType dtype, absl::Span<const int> shape,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("Const"), "Const",
opts.op_registry());
TensorProto value;
value.set_dtype(dtype);
for (int dim : shape) {
value.mutable_tensor_shape()->add_dim()->set_size(dim);
}
return opts.WithAttr("value", value)
.WithAttr("dtype", dtype)
.FinalizeBuilder(&node_builder);
}
Node* KnownShape(absl::Span<const int> shape,
const GraphDefBuilder::Options& opts) {
return KnownShapeBase(DT_FLOAT, shape, opts);
}
Node* KeyPlaceholderShape(const GraphDefBuilder::Options& opts) {
return KnownShapeBase(DT_STRING, {2}, opts);
}
Node* KeyPlaceholder(const string& call_node,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(absl::StrCat(call_node, "_key_placeholder"),
"Placeholder", opts.op_registry());
TensorShapeProto shape;
shape.add_dim()->set_size(2);
return opts.WithAttr("shape", shape)
.WithAttr("dtype", DT_STRING)
.WithAttr("_host_compute_call_node", call_node)
.FinalizeBuilder(&node_builder);
}
Node* RecvAtHost(ops::NodeOut key_input, const string& cluster,
const string& new_func_name, const string& oc_cluster,
absl::Span<const DataType> dtypes,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
string key = absl::StrCat("host_compute_channel_", cluster, "_",
new_func_name, "_", oc_cluster);
string name = absl::StrCat("outside_compilation_", cluster, "_",
new_func_name, "_", oc_cluster, "_recv");
NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaRecvAtHost"),
"_XlaRecvAtHost", opts.op_registry());
node_builder.Input(std::move(key_input));
return opts.WithAttr("Toutputs", dtypes)
.WithAttr("key", key)
.WithAttr("device_ordinal", 0)
.WithAttr("_encapsulate", cluster)
.WithAttr("_outside", oc_cluster)
.FinalizeBuilder(&node_builder);
}
Node* SendFromHost(ops::NodeOut key_input, const string& cluster,
const string& new_func_name, const string& oc_cluster,
const std::vector<ops::NodeOut>& inputs,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
string key = absl::StrCat("host_compute_channel_", cluster, "_",
new_func_name, "_", oc_cluster);
string name = absl::StrCat("outside_compilation_", cluster, "_",
new_func_name, "_", oc_cluster, "_send");
NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaSendFromHost"),
"_XlaSendFromHost", opts.op_registry());
node_builder.Input(inputs);
node_builder.Input(std::move(key_input));
std::vector<DataType> dtypes;
for (const auto& node : inputs) {
dtypes.push_back(node.dt);
}
return opts.WithAttr("Tinputs", dtypes)
.WithAttr("key", key)
.WithAttr("device_ordinal", 0)
.WithAttr("_encapsulate", cluster)
.WithAttr("_outside", oc_cluster)
.FinalizeBuilder(&node_builder);
}
Node* Unary(ops::NodeOut a, const GraphDefBuilder::Options& opts) {
return ops::UnaryOp("UnaryTest", std::move(a), opts);
}
Node* Binary(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("BinaryTest", std::move(a), std::move(b), opts);
}
Node* BinaryUnknownShape(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("BinaryTest2", std::move(a), std::move(b), opts);
}
Node* AddNLike(const std::vector<ops::NodeOut>& inputs,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("AddN"), "AddNLikeTest",
opts.op_registry());
node_builder.Input(inputs);
return opts.FinalizeBuilder(&node_builder);
}
Node* ArgOp(int index, DataType type, const GraphDefBuilder::Options& opts) {
return ops::SourceOp("_Arg",
opts.WithAttr("T", type).WithAttr("index", index));
}
Node* RetOp(int index, ops::NodeOut a, const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("Retval"), "_Retval",
opts.op_registry());
node_builder.Input(std::move(a)).Attr("index", index);
return opts.FinalizeBuilder(&node_builder);
}
Status Encapsulate(GraphDef* graphdef, FunctionDefLibrary* library,
const std::vector<string>& encapsulated_functions) {
Status s;
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), *library));
GraphConstructorOptions options;
options.allow_internal_ops = true;
std::unique_ptr<Graph> graph(new Graph(lib_def.get()));
s = ConvertGraphDefToGraph(options, *graphdef, graph.get());
if (!s.ok()) return s;
s = PerformStaticShapeInferenceBeforeEncapsulation(graph.get());
if (!s.ok()) return s;
SessionOptions session_options;
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
OptimizerOptions opts;
auto device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def.get(), opts,
nullptr, nullptr);
auto flr = pflr->GetFLR("/job:localhost/replica:0/task:0/cpu:0");
std::unique_ptr<Graph> graph_out;
s = EncapsulateSubgraphsInFunctions("_encapsulate", *graph,
{},
false,
&graph_out, lib_def.get());
if (!s.ok()) return s;
std::unordered_map<string, XlaClusterInfo> clusters;
for (const auto& func : encapsulated_functions) {
Node* xla_computation_node;
for (Node* n : graph_out->nodes()) {
if (n->name() == func) {
xla_computation_node = n;
}
}
if (!xla_computation_node) {
return errors::Internal("Cannot find node ", func);
}
NameAttrList func_name_attrs;
func_name_attrs.set_name(func);
clusters.emplace(func,
XlaClusterInfo{func, func_name_attrs, xla_computation_node,
std::map<string, int>{}});
}
bool modified;
s = ExtractOutsideCompilation("_encapsulate", "_outside", clusters,
graph_out.get(), flr, lib_def.get(), &modified);
if (!s.ok()) return s;
GraphDef graphdef_out;
graph_out->ToGraphDef(&graphdef_out);
graphdef->Swap(&graphdef_out);
*library = lib_def->ToProto();
for (FunctionDef& fdef : *library->mutable_function()) {
for (NodeDef& node_def : *fdef.mutable_node_def()) {
node_def.mutable_attr()->erase("_xla_inferred_shapes");
}
}
return s;
}
Status Encapsulate(GraphDef* graphdef, FunctionDefLibrary* library) {
std::vector<string> encapsulated_functions;
return Encapsulate(graphdef, library, encapsulated_functions);
}
TEST(EncapsulateSubgraphsTest, NoFunctions) {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = Input(builder.opts().WithName("A"));
Node* b = Input(builder.opts().WithName("B"));
Node* c = Unary(a, builder.opts().WithName("C"));
Binary(b, c, builder.opts().WithName("D"));
GraphDef graphdef_in;
FunctionDefLibrary library_in;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef_in));
*library_in.add_function() = test::function::XTimesTwo();
GraphDef graphdef_out = graphdef_in;
FunctionDefLibrary library_out = library_in;
TF_EXPECT_OK(Encapsulate(&graphdef_out, &library_out));
TF_EXPECT_GRAPH_EQ(graphdef_in, graphdef_out);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_in, library_out);
}
TEST(EncapsulateSubgraphsTest, OneFunction) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d = Binary(b, c,
b1.opts().WithName("c").WithControlInput(c).WithAttr(
"_encapsulate", "F1"));
Binary(a, d, b1.opts().WithName("E"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
TF_EXPECT_OK(Encapsulate(&graphdef, &library));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"c_0_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"c"}, "BinaryTest", {"b_0_arg", "C:o:0"}, {}, {"C"}},
},
{{"c_0_retval", "c:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(a).Input(b);
Node* call = b2.opts().FinalizeBuilder(&node_builder);
Binary(a, call, b2.opts().WithName("E"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, TwoFunctions) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* control = Input(b1.opts().WithName("Control"));
Node* c =
Unary(a, b1.opts().WithName("C").WithControlInput(control).WithAttr(
"_encapsulate", "F1"));
Node* d = Binary(b, c,
b1.opts().WithName("D").WithControlInput(control).WithAttr(
"_encapsulate", "F2"));
Binary(a, d, b1.opts().WithName("E"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
TF_EXPECT_OK(Encapsulate(&graphdef, &library));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float"}, {"c_0_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
},
{{"c_0_retval", "C:o:0"}});
*library_expected.add_function() = FunctionDefHelper::Create(
"F2", {"b_0_arg:float", "c_0_arg:float"}, {"d_0_retval:float"}, {},
{
{{"D"}, "BinaryTest", {"b_0_arg", "c_0_arg"}},
},
{{"d_0_retval", "D:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* control = Input(b2.opts().WithName("Control"));
NodeBuilder nb("F1", "F1", lib_def.get());
nb.Input(a).ControlInput(control);
Node* call1 = b2.opts().FinalizeBuilder(&nb);
NodeBuilder nb2("F2", "F2", lib_def.get());
nb2.Input(b).Input(call1).ControlInput(control);
Node* call2 = b2.opts().FinalizeBuilder(&nb2);
Binary(a, call2, b2.opts().WithName("E"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
std::vector<string> GraphNodes(const Graph& graph) {
std::vector<string> nodes;
for (const auto& node : graph.nodes()) {
if (!node->IsSource() && !node->IsSink()) {
nodes.push_back(node->name());
}
}
std::sort(nodes.begin(), nodes.end());
return nodes;
}
std::vector<std::pair<string, string>> GraphEdges(const Graph& graph) {
std::vector<std::pair<string, string>> edges;
for (const Edge* edge : graph.edges()) {
if (edge->src()->IsSource() || edge->dst()->IsSink()) continue;
edges.emplace_back(
absl::StrCat(edge->src()->name(), ":", edge->src_output()),
absl::StrCat(edge->dst()->name(), ":", edge->dst_input()));
}
std::sort(edges.begin(), edges.end());
return edges;
}
TEST(EncapsulateSubgraphsTest, InputDeduplication) {
Scope root = Scope::NewRootScope().ExitOnError().WithDevice(
"/job:localhost/replica:0/task:0/cpu:0");
auto x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
auto add1 = ops::Add(root.WithOpName("add1"), x, x);
add1.node()->AddAttr("_cluster", "cluster1");
auto add2 = ops::Add(root.WithOpName("add2"), add1, add1);
add2.node()->AddAttr("_cluster", "cluster2");
auto out = ops::Mul(root.WithOpName("mul"), add1, add2);
Graph graph_before_encapsulation(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph_before_encapsulation));
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(EncapsulateSubgraphsInFunctions(
"_cluster", graph_before_encapsulation,
{},
false, &graph, &library));
std::vector<string> expected_nodes = {"cluster1", "cluster2", "mul", "x"};
EXPECT_EQ(expected_nodes, GraphNodes(*graph));
std::vector<std::pair<string, string>> expected_edges = {
{"cluster1:0", "cluster2:0"},
{"cluster1:0", "mul:0"},
{"cluster2:0", "mul:1"},
{"x:0", "cluster1:0"}};
EXPECT_EQ(expected_edges, GraphEdges(*graph));
}
const Node* FindNodeByName(const Graph& graph, const string& name) {
for (const Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
bool HasGuaranteeConstAttr(const Node& n) {
bool is_guaranteed_constant = false;
if (!GetNodeAttr(n.attrs(), "_is_guaranteed_constant",
&is_guaranteed_constant)
.ok()) {
return false;
}
return is_guaranteed_constant;
}
TEST(EncapsulateSubgraphsWithGuaranteeConstOpTest, Simple) {
Scope root = Scope::NewRootScope().ExitOnError().WithDevice(
"/job:localhost/replica:0/task:0/cpu:0");
auto x1 = ops::Placeholder(root.WithOpName("x1"), DT_FLOAT);
auto x2 = ops::Placeholder(root.WithOpName("x2"), DT_FLOAT);
auto const_guarantee_x2 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x2"), x2);
auto const_guarantee_x1 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x1"), x1);
auto add1 =
ops::Add(root.WithOpName("add1"), const_guarantee_x1, const_guarantee_x2);
add1.node()->AddAttr("_encapsulate", "encapsulate1");
Graph graph_before(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph_before));
std::unique_ptr<Graph> graph_after;
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
int guaranteed_consts = 0;
TF_ASSERT_OK(EncapsulateSubgraphsInFunctions(
"_encapsulate", graph_before,
[&guaranteed_consts](const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
Graph* graph = graph_ptr->get();
for (const Node* n : graph->nodes()) {
if (n->type_string() == "_Arg" &&
absl::StartsWith(n->name(), "const")) {
++guaranteed_consts;
EXPECT_TRUE(HasGuaranteeConstAttr(*n));
} else {
EXPECT_FALSE(HasGuaranteeConstAttr(*n));
}
}
return absl::OkStatus();
},
false, &graph_after, &library));
EXPECT_EQ(2, guaranteed_consts);
}
TEST(EncapsulateSubgraphsWithGuaranteeConstOpTest, Add) {
Scope root = Scope::NewRootScope().ExitOnError().WithDevice(
"/job:localhost/replica:0/task:0/cpu:0");
auto x1 = ops::Placeholder(root.WithOpName("x1"), DT_FLOAT);
auto x2 = ops::Placeholder(root.WithOpName("x2"), DT_FLOAT);
auto const_guarantee_x1 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x1"), x1);
auto const_guarantee_x2 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x2"), x2);
auto const_guarantee_add1 = ops::Add(root.WithOpName("const_guarantee_add1"),
const_guarantee_x1, const_guarantee_x2);
auto add2 = ops::Add(root.WithOpName("add2"), const_guarantee_x1, x2);
auto mul1 = ops::Mul(root.WithOpName("mul1"), const_guarantee_add1, add2);
mul1.node()->AddAttr("_encapsulate", "encapsulate1");
Graph graph_before(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph_before));
std::unique_ptr<Graph> graph_after;
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
int guaranteed_consts = 0;
TF_ASSERT_OK(EncapsulateSubgraphsInFunctions(
"_encapsulate", graph_before,
[&guaranteed_consts](const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
Graph* graph = graph_ptr->get();
for (const Node* n : graph->nodes()) {
if (n->type_string() == "_Arg" &&
absl::StartsWith(n->name(), "const")) {
++guaranteed_consts;
EXPECT_TRUE(HasGuaranteeConstAttr(*n));
} else {
EXPECT_FALSE(HasGuaranteeConstAttr(*n));
}
}
return absl::OkStatus();
},
false, &graph_after, &library));
EXPECT_EQ(1, guaranteed_consts);
}
TEST(EncapsulateSubgraphsTest, OneFunctionOneOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d = Binary(b, c,
b1.opts().WithName("c").WithControlInput(c).WithAttr(
"_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Binary(a, f, b1.opts().WithName("G").WithControlInput(e));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape.opts());
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv, 0), ops::NodeOut(recv, 1),
shape.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"c"}, "BinaryTest", {"b_0_arg", "C:o:0"}, {}, {"C"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "c:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"c"}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv, 0), ops::NodeOut(recv, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv, send}),
"F1");
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(a).Input(b);
Node* call =
b2.opts().WithControlInputs({s, b}).FinalizeBuilder(&node_builder);
Binary(a, call, b2.opts().WithName("G").WithControlInputs({call}));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OneFunctionTwoOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Node* g = Binary(e, f,
b1.opts()
.WithName("G")
.WithControlInputs({e, f})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* h = Binary(d, e,
b1.opts()
.WithName("H")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* i = Unary(h, b1.opts().WithName("I").WithAttr("_encapsulate", "F1"));
Binary(g, i, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv, 0), ops::NodeOut(recv, 1),
shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
{
GraphDefBuilder shape2(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape2.opts());
Node* recv1 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
shape2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* recv2 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT, DT_FLOAT},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Binary(e, ops::NodeOut(recv2, 0),
shape2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* h = Binary(ops::NodeOut(recv2, 1), e,
shape2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g, h},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape2, "F1_F1_O2", &library_expected));
}
NameAttrList shape_inference_graph1, shape_inference_graph2;
shape_inference_graph1.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
shape_inference_graph2.set_name(
"_outside_compilation_shape_inference_F1_F1_O2");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"g_0_retval_retval:float", "i_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}, {}},
{{"I"},
"UnaryTest",
{"outside_compilation_O2_host_compute:outputs:1"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"F:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph2},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name",
"outside_compilation_O2_host_compute"}},
{"F", "outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph1},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"g_0_retval_retval", "outside_compilation_O2_host_compute:outputs:0"},
{"i_0_retval_retval", "I:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv1})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* recv2 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Binary(e, ops::NodeOut(recv2, 0),
b2.opts()
.WithName("G")
.WithControlInputs({recv2, e})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* h = Binary(ops::NodeOut(recv2, 1), e,
b2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* send2 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g, h},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, send1, recv2, send2}),
"F1");
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(a).Input(b);
Node* call =
b2.opts().WithControlInputs({s, b}).FinalizeBuilder(&node_builder);
Binary(ops::NodeOut(call, 0), ops::NodeOut(call, 1),
b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = InputShaped(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Node* g = Binary(e, f,
b1.opts().WithName("G").WithControlInputs({e, f}).WithAttr(
"_encapsulate", "F2"));
Node* h = Binary(d, g,
b1.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* i =
Binary(f, h, b1.opts().WithName("I").WithAttr("_encapsulate", "F2"));
Binary(g, i, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1", "F2"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float",
"d_0_retval_retval:float"},
{},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"d_0_retval_retval", "D:o:0"},
{"f_0_retval_retval", "F:o:0"}});
*library_expected.add_function() = FunctionDefHelper::Create(
"F2", {"e_0_arg:float", "f_0_arg:float", "d_0_arg:float"},
{"g_0_retval_retval:float", "i_0_retval_retval:float"}, {},
{
{{"G"}, "BinaryTest", {"e_0_arg", "f_0_arg"}},
{{"I"},
"BinaryTest",
{"f_0_arg", "outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"d_0_arg", "G:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F2_F2_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"g_0_retval_retval", "G:o:0"}, {"i_0_retval_retval", "I:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = InputShaped(b2.opts().WithName("B"));
Node* key_constant1 =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(
ops::NodeOut(key_constant1, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv1})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 =
SendFromHost(ops::NodeOut(key_constant1, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInputs({s1, b}).FinalizeBuilder(&node_builder1);
Node* key_constant2 =
KeyPlaceholder("F2", b2.opts().WithName("F2_key_placeholder"));
Node* recv2 = RecvAtHost(
ops::NodeOut(key_constant2, 0), "F2", "F2", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* h = Binary(recv2, ops::NodeOut(recv2, 1),
b2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* send2 =
SendFromHost(ops::NodeOut(key_constant2, 0), "F2", "F2", "O1", {h},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s2 = Sequencer(
b2.opts().WithName("F2_sequencer").WithControlInputs({recv2, send2}),
"F2");
NodeBuilder node_builder2("F2", "F2", lib_def.get());
node_builder2.Input(call1)
.Input(ops::NodeOut(call1, 1))
.Input(ops::NodeOut(call1, 2));
Node* call2 = b2.opts()
.WithControlInputs({s2, call1})
.FinalizeBuilder(&node_builder2);
Binary(call2, ops::NodeOut(call2, 1), b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutsideDependencyFromOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = InputShaped(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Node* g =
Binary(a, b, b1.opts().WithName("G").WithAttr("_encapsulate", "F2"));
Node* h = Unary(g, b1.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* i = Unary(h, b1.opts().WithName("I").WithAttr("_encapsulate", "F2"));
Binary(f, i, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1", "F2"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"f_0_retval_retval", "F:o:0"}});
*library_expected.add_function() = FunctionDefHelper::Create(
"F2", {"a_0_arg:float", "b_0_arg:float"}, {"i_0_retval_retval:float"}, {},
{
{{"G"}, "BinaryTest", {"a_0_arg", "b_0_arg"}},
{{"I"},
"UnaryTest",
{"outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"G:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F2_F2_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"i_0_retval_retval", "I:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = InputShaped(b2.opts().WithName("B"));
Node* key_constant1 =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant1, 0), "F1", "F1", "O1",
{DT_FLOAT, DT_FLOAT}, b2.opts());
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv1})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant1, 0), "F1", "F1", "O1",
{e}, b2.opts().WithControlInput(e));
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInputs({s1, b}).FinalizeBuilder(&node_builder1);
Node* key_constant2 =
KeyPlaceholder("F2", b2.opts().WithName("F2_key_placeholder"));
Node* recv2 = RecvAtHost(ops::NodeOut(key_constant2, 0), "F2", "F2", "O1",
{DT_FLOAT}, b2.opts());
Node* h = Unary(recv2, b2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* send2 = SendFromHost(ops::NodeOut(key_constant2, 0), "F2", "F2", "O1",
{h}, b2.opts());
Node* s2 = Sequencer(
b2.opts().WithName("F2_sequencer").WithControlInputs({recv2, send2}),
"F2");
NodeBuilder node_builder2("F2", "F2", lib_def.get());
node_builder2.Input(a).Input(b);
Node* call2 =
b2.opts().WithControlInputs({s2}).FinalizeBuilder(&node_builder2);
Binary(call1, call2, b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationNoInputs) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f =
Binary(d, e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Unary(f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"D:o:0", "outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts());
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({send1, recv1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Unary(call1, b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationControlInput) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithControlInput(d)
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f =
Binary(d, e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Unary(f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"D:o:0", "outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithControlInput(recv1)
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts());
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Unary(call1, b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationNoOutputs) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Binary(e, f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv1, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"D:o:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts());
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationControlOutput) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Binary(e, f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv1, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"UnaryTest",
{"D:o:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts().WithControlInput(e));
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest,
OutsideCompilationClusterDependencyNoSrcCluster) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Node* g = Unary(f, b1.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* h = Unary(g, b1.opts().WithName("H").WithAttr("_encapsulate", "F1"));
Binary(e, h, b1.opts().WithName("I"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv1, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
{
GraphDefBuilder shape2(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape2.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Unary(ops::NodeOut(recv2, 0), shape2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape2, "F1_F1_O2", &library_expected));
}
NameAttrList shape_inference_graph1;
shape_inference_graph1.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
NameAttrList shape_inference_graph2;
shape_inference_graph2.set_name(
"_outside_compilation_shape_inference_F1_F1_O2");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "h_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"D:o:0"}},
{{"H"},
"UnaryTest",
{"outside_compilation_O2_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph1},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"F:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph2},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name",
"outside_compilation_O2_host_compute"}},
{"outside_compilation_O1_host_compute"}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"h_0_retval_retval", "H:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Unary(recv2, b2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* send2 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s1 = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, send1, recv2, send2}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s1);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("I"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest,
OutsideCompilationClusterDependencyNoDstCluster) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Unary(a, b1.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* h = Unary(f, b1.opts().WithName("H").WithAttr("_encapsulate", "F1"));
Binary(e, h, b1.opts().WithName("I"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv2, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "h_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"UnaryTest",
{"outside_compilation_O1_host_compute:outputs:0"}},
{{"H"}, "UnaryTest", {"F:o:0"}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name",
"outside_compilation_O2_host_compute"}},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"h_0_retval_retval", "H:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Unary(recv2, b2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* s1 = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, recv2, send}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s1);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("I"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationClusterDependency) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Node* g = Unary(d, b1.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* h = Unary(f, b1.opts().WithName("H").WithAttr("_encapsulate", "F1"));
Binary(d, e,
b1.opts()
.WithName("I")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O3")
.WithControlInput(g));
Binary(e, h, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv2, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "h_0_retval_retval:float"}, {},
{{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"outside_compilation_O1_host_compute:outputs:0"}},
{{"H"}, "UnaryTest", {"F:o:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>(
{"_xla_token_arg_node", "outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name", "outside_compilation_O2_host_compute"}},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O3_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O3"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O3"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute",
"outside_compilation_O2_host_compute"})},
{"_xla_original_oc_node_name", "outside_compilation_O3_host_compute"}},
{"outside_compilation_O1_host_compute",
"outside_compilation_O2_host_compute"}}},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"h_0_retval_retval", "H:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Unary(recv2, b2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* recv3 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O3", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Binary(recv3, e,
b2.opts()
.WithName("I")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O3")
.WithControlInput(g));
Node* s1 = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, send, recv2, recv3}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s1);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationNoInputsOrOutputs) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Binary(e, f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv2, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"D:o:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv, send}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationShapeInference) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C"));
Node* d = Unary(b, b1.opts().WithName("c").WithControlInput(c).WithAttr(
"_encapsulate", "F1"));
Node* e = BinaryUnknownShape(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Binary(a, f, b1.opts().WithName("G").WithControlInput(e));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape.opts());
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = BinaryUnknownShape(recv, ops::NodeOut(recv, 1),
shape.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"b_0_arg:float", "c_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"c"}, "UnaryTest", {"b_0_arg"}, {}, {}},
{{"F"},
"BinaryTest",
{"c_0_arg", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"c_0_arg", "c:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"c"}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* c = Unary(a, b2.opts().WithName("C"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = BinaryUnknownShape(recv, ops::NodeOut(recv, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv, send}),
"F1");
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(b).Input(c);
Node* call =
b2.opts().WithControlInputs({s, b, c}).FinalizeBuilder(&node_builder);
Binary(a, call, b2.opts().WithName("G").WithControlInputs({call}));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
void CreateSubgraphTouchingRefVar(const Scope& s) {
Output variable =
ops::Variable(s.WithOpName("variable"), PartialTensorShape{}, DT_FLOAT);
Output read = ops::Identity(s.WithOpName("read_ref_var"), variable);
Output neg = ops::Negate(s.WithOpName("negate_ref"), read);
Output add = ops::Add(s.WithOpName("add_ref"), neg, neg);
Output constant =
ops::Const(s.WithOpName("constant_ref"), Input::Initializer(0.0));
s.graph()->AddControlEdge(constant.node(), variable.node());
}
TEST(EncapsulateSubgraphsTest, RefVariablesMarked) {
Scope root = Scope::NewRootScope().ExitOnError();
CreateSubgraphTouchingRefVar(root);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(graph.get()));
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions options =
wrapper.CreateGraphOptimizationPassOptions(&graph);
EncapsulateSubgraphsPass pass;
TF_ASSERT_OK(pass.Run(options));
for (const Node* node : graph->nodes()) {
bool has_ref_var;
TF_ASSERT_OK(
GetNodeAttr(node->attrs(), kXlaHasReferenceVarsAttr, &has_ref_var));
EXPECT_TRUE(node->IsSink() || node->IsSource() || has_ref_var)
<< "All nodes apart from source and sink can access reference variable";
}
}
void CreateSubgraphNotTouchingRefVar(const Scope& s) {
Output constant =
ops::Const(s.WithOpName("constant_normal"), Input::Initializer(0.0));
Output neg = ops::Negate(s.WithOpName("negate_normal"), constant);
Output add = ops::Add(s.WithOpName("add_normal"), neg, neg);
}
TEST(EncapsulateSubgraphsTest, NoRefVarsNoAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
CreateSubgraphNotTouchingRefVar(root);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(graph.get()));
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions options =
wrapper.CreateGraphOptimizationPassOptions(&graph);
EncapsulateSubgraphsPass pass;
TF_ASSERT_OK(pass.Run(options));
for (const Node* node : graph->nodes()) {
bool has_ref_var;
TF_ASSERT_OK(
GetNodeAttr(node->attrs(), kXlaHasReferenceVarsAttr, &has_ref_var));
EXPECT_FALSE(has_ref_var) << "The graph does not have reference variables";
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
877ed332-87ba-4632-88e8-c712cc99cafa | cpp | tensorflow/tensorflow | unary_ops_composition | tensorflow/compiler/tf2xla/kernels/unary_ops_composition.cc | tensorflow/compiler/tests/unary_ops_composition_test.cc | #include <functional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/kernels/elu_op.h"
#include "tensorflow/compiler/tf2xla/kernels/relu_op.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/math.h"
#include "xla/hlo/builder/xla_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
using XlaUnaryOpGenerator = std::function<xla::XlaOp(xla::XlaOp)>;
using XlaOpGeneratorMap = absl::flat_hash_map<string, XlaUnaryOpGenerator>;
void PopulateXlaOpGeneratorMap(XlaOpGeneratorMap* op_generator_map) {
auto add_xla_op_generator = [&](std::string name,
XlaUnaryOpGenerator xla_op_generator) {
CHECK(op_generator_map->insert({name, xla_op_generator}).second);
};
#define ADD_XLA_OP_GENERATOR(Name) add_xla_op_generator(#Name, xla::Name);
ADD_XLA_OP_GENERATOR(Abs);
ADD_XLA_OP_GENERATOR(Acos);
ADD_XLA_OP_GENERATOR(Acosh);
ADD_XLA_OP_GENERATOR(Asin);
ADD_XLA_OP_GENERATOR(Asinh);
ADD_XLA_OP_GENERATOR(Atan);
ADD_XLA_OP_GENERATOR(Atanh);
ADD_XLA_OP_GENERATOR(Ceil);
ADD_XLA_OP_GENERATOR(Cos);
ADD_XLA_OP_GENERATOR(Cosh);
ADD_XLA_OP_GENERATOR(Expm1);
ADD_XLA_OP_GENERATOR(Exp);
ADD_XLA_OP_GENERATOR(Floor);
add_xla_op_generator(
"Inv", [](xla::XlaOp x) { return xla::ScalarLike(x, 1.0) / x; });
ADD_XLA_OP_GENERATOR(Log);
ADD_XLA_OP_GENERATOR(Log1p);
ADD_XLA_OP_GENERATOR(Neg);
ADD_XLA_OP_GENERATOR(Reciprocal);
add_xla_op_generator("Rint", xla::RoundToEven);
ADD_XLA_OP_GENERATOR(Round);
ADD_XLA_OP_GENERATOR(Rsqrt);
add_xla_op_generator("Sigmoid", xla::Logistic);
ADD_XLA_OP_GENERATOR(Sin);
ADD_XLA_OP_GENERATOR(Sinh);
ADD_XLA_OP_GENERATOR(Sqrt);
ADD_XLA_OP_GENERATOR(Square);
ADD_XLA_OP_GENERATOR(Tan);
ADD_XLA_OP_GENERATOR(Tanh);
ADD_XLA_OP_GENERATOR(Elu);
ADD_XLA_OP_GENERATOR(Relu);
ADD_XLA_OP_GENERATOR(Relu6);
ADD_XLA_OP_GENERATOR(Selu);
#undef ADD_XLA_OP_GENERATOR
}
const XlaOpGeneratorMap& GetXlaOpGeneratorMap() {
static XlaOpGeneratorMap* result = []() {
auto* result = new XlaOpGeneratorMap;
PopulateXlaOpGeneratorMap(result);
return result;
}();
return *result;
}
class UnaryOpsCompositionOp : public XlaOpKernel {
public:
explicit UnaryOpsCompositionOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("op_names", &op_names_));
const XlaOpGeneratorMap& op_generator_map = GetXlaOpGeneratorMap();
for (absl::string_view op_name : op_names_) {
OP_REQUIRES(ctx, op_generator_map.contains(op_name),
errors::Unimplemented(
op_name, " not supported in _UnaryOpsComposition"));
}
}
void Compile(XlaOpKernelContext* ctx) override {
xla::XlaOp x = ctx->Input(0);
const XlaOpGeneratorMap& op_generator_map = GetXlaOpGeneratorMap();
for (absl::string_view op_name : op_names_) {
x = op_generator_map.find(op_name)->second(x);
}
ctx->SetOutput(0, x);
}
private:
std::vector<string> op_names_;
};
REGISTER_XLA_OP(Name("_UnaryOpsComposition"), UnaryOpsCompositionOp);
}
} | #include <algorithm>
#include <cmath>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/port.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
static bool Initialized = [] {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
return true;
}();
class UnaryOpsCompositionTest : public OpsTestBase {
protected:
template <typename T>
void RunComposedOp(const std::vector<string> op_names, T input_scalar_value,
T expected_scalar_value) {
string xla_device_name =
tensorflow::IsGoogleCudaEnabled() ? DEVICE_XLA_GPU : DEVICE_XLA_CPU;
SetDevice(DeviceType(xla_device_name),
std::unique_ptr<tensorflow::Device>(DeviceFactory::NewDevice(
xla_device_name, {}, "/job:a/replica:0/task:0")));
TF_ASSERT_OK(NodeDefBuilder("unary_op_composition", "_UnaryOpsComposition")
.Input(FakeInput(DataTypeToEnum<T>::v()))
.Attr("T", DataTypeToEnum<T>::v())
.Attr("op_names", op_names)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
TensorShape shape({});
AllocatorAttributes host_alloc_attrs;
host_alloc_attrs.set_gpu_compatible(true);
host_alloc_attrs.set_on_host(true);
Allocator* cpu_allocator = device_->GetAllocator(host_alloc_attrs);
DataType dtype = DataTypeToEnum<T>::value;
Tensor input_on_host(cpu_allocator, dtype, shape);
test::FillValues<T>(&input_on_host, {input_scalar_value});
Tensor* input = AddInput(dtype, shape);
DeviceContext* device_context =
device_->tensorflow_accelerator_device_info()->default_context;
TF_CHECK_OK(device_context->CopyCPUTensorToDeviceSync(&input_on_host,
device_, input));
TF_ASSERT_OK(RunOpKernel());
Tensor expected_tensor(cpu_allocator, dtype, shape);
test::FillValues<T>(&expected_tensor, {expected_scalar_value});
Tensor* output = GetOutput(0);
Tensor output_on_host(cpu_allocator, output->dtype(), output->shape());
TF_CHECK_OK(device_context->CopyDeviceTensorToCPUSync(
output, "output 0", device_, &output_on_host));
test::ExpectClose(expected_tensor, output_on_host, 1e-5,
1e-5);
}
};
TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sqrt_F) {
RunComposedOp<float>({"Sqrt", "Sqrt"}, 81.0, 3.0);
}
TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sqrt_D) {
RunComposedOp<double>({"Sqrt", "Sqrt"}, 81.0, 3.0);
}
TEST_F(UnaryOpsCompositionTest, Compose_Sqrt_Sin_F) {
RunComposedOp<float>({"Sqrt", "Sin"}, 81.0, std::sin(9.0f));
}
TEST_F(UnaryOpsCompositionTest, Compose_Cos_Acos_F) {
RunComposedOp<float>({"Cos", "Acos"}, 0.5, std::acos(std::cos(0.5f)));
}
TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu_F) {
RunComposedOp<float>({"Tanh", "Relu"}, 0.5, std::max(0.0f, std::tanh(0.5f)));
}
TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu_D) {
RunComposedOp<double>({"Tanh", "Relu"}, 0.5, std::max(0.0, std::tanh(0.5)));
}
TEST_F(UnaryOpsCompositionTest, Compose_Tanh_Relu6_F) {
RunComposedOp<float>({"Relu6"}, 11.0f, 6.0f);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/unary_ops_composition.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tests/unary_ops_composition_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ea1e268f-3784-4aec-8ec6-1cd57ff2d4cd | cpp | tensorflow/tensorflow | ragged_fill_empty_rows_op | tensorflow/core/kernels/ragged_fill_empty_rows_op.cc | tensorflow/core/kernels/ragged_fill_empty_rows_op_test.cc | #define EIGEN_USE_THREADS
#include <algorithm>
#include <numeric>
#include <unordered_map>
#include <utility>
#include <vector>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/fill_empty_rows_functor.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
namespace {
template <typename Device, typename T, typename Tindex>
void RaggedFillEmptyRowsOpImpl(OpKernelContext* context,
AsyncOpKernel::DoneCallback done = nullptr) {
if (!done) {
done = [] {};
}
const int kValueRowidsInput = 0;
const int kValuesInput = 1;
const int kNRowsInput = 2;
const int kDefaultValueInput = 3;
const Tensor& value_rowids_t = context->input(kValueRowidsInput);
const Tensor& values_t = context->input(kValuesInput);
const Tensor& nrows_t = context->input(kNRowsInput);
const Tensor& default_value_t = context->input(kDefaultValueInput);
OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsScalar(nrows_t.shape()),
errors::InvalidArgument("nrows must be a scalar, saw: ",
nrows_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsVector(value_rowids_t.shape()),
errors::InvalidArgument("value_rowids must be a vector, saw: ",
value_rowids_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()),
errors::InvalidArgument("values must be a vector, saw: ",
values_t.shape().DebugString()),
done);
OP_REQUIRES_ASYNC(context, value_rowids_t.dim_size(0) == values_t.dim_size(0),
errors::InvalidArgument(
"The length of `values` (", values_t.dim_size(0),
") must match the first dimension of `value_rowids` (",
value_rowids_t.dim_size(0), ")."),
done);
OP_REQUIRES_ASYNC(
context, TensorShapeUtils::IsScalar(default_value_t.shape()),
errors::InvalidArgument("default_value must be a scalar, saw: ",
default_value_t.shape().DebugString()),
done);
using FunctorType =
functor::FillEmptyRows<Device, T, Tindex, true>;
OP_REQUIRES_OK_ASYNC(context,
FunctorType()(context, default_value_t, value_rowids_t,
values_t, nrows_t, done),
done);
}
}
template <typename Device, typename T, typename Tindex>
class RaggedFillEmptyRowsOp : public OpKernel {
public:
explicit RaggedFillEmptyRowsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
RaggedFillEmptyRowsOpImpl<Device, T, Tindex>(context);
}
};
#define REGISTER_KERNELS(D, T, Tindex) \
REGISTER_KERNEL_BUILDER(Name("RaggedFillEmptyRows") \
.Device(DEVICE_##D) \
.HostMemory("nrows") \
.TypeConstraint<T>("T"), \
RaggedFillEmptyRowsOp<D##Device, T, Tindex>)
#define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64)
TF_CALL_ALL_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#undef REGISTER_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
template <typename T, typename Tindex>
class RaggedFillEmptyRowsGPUOp : public AsyncOpKernel {
public:
explicit RaggedFillEmptyRowsGPUOp(OpKernelConstruction* context)
: AsyncOpKernel(context) {}
void ComputeAsync(OpKernelContext* context, DoneCallback done) override {
RaggedFillEmptyRowsOpImpl<GPUDevice, T, Tindex>(context, done);
}
};
#define REGISTER_KERNELS(T, Tindex) \
REGISTER_KERNEL_BUILDER(Name("RaggedFillEmptyRows") \
.Device(DEVICE_GPU) \
.HostMemory("nrows") \
.TypeConstraint<T>("T"), \
RaggedFillEmptyRowsGPUOp<T, Tindex>)
#define REGISTER_KERNELS_TINDEX(T) REGISTER_KERNELS(T, int64)
TF_CALL_POD_TYPES(REGISTER_KERNELS_TINDEX)
#undef REGISTER_KERNELS_TINDEX
#undef REGISTER_KERNELS
#endif
template <typename Device, typename T, typename Tindex>
class RaggedFillEmptyRowsGradOp : public OpKernel {
public:
explicit RaggedFillEmptyRowsGradOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor* reverse_index_map_t;
const Tensor* grad_values_t;
OP_REQUIRES_OK(context,
context->input("reverse_index_map", &reverse_index_map_t));
OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t));
OP_REQUIRES(
context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()),
errors::InvalidArgument("reverse_index_map must be a vector, saw: ",
reverse_index_map_t->shape().DebugString()));
OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()),
errors::InvalidArgument("grad_values must be a vector, saw: ",
grad_values_t->shape().DebugString()));
const auto reverse_index_map = reverse_index_map_t->vec<Tindex>();
const auto grad_values = grad_values_t->vec<T>();
const Tindex N = reverse_index_map_t->shape().dim_size(0);
Tensor* d_values_t;
OP_REQUIRES_OK(context, context->allocate_output(
"d_values", TensorShape({N}), &d_values_t));
auto d_values = d_values_t->vec<T>();
Tensor* d_default_value_t;
OP_REQUIRES_OK(context,
context->allocate_output("d_default_value", TensorShape({}),
&d_default_value_t));
auto d_default_value = d_default_value_t->scalar<T>();
OP_REQUIRES_OK(context, functor::FillEmptyRowsGrad<Device, T, Tindex>()(
context, reverse_index_map, grad_values,
d_values, d_default_value));
}
};
#define REGISTER_KERNELS(D, T, Tindex) \
REGISTER_KERNEL_BUILDER(Name("RaggedFillEmptyRowsGrad") \
.Device(DEVICE_##D) \
.TypeConstraint<T>("T"), \
RaggedFillEmptyRowsGradOp<D##Device, T, Tindex>)
#define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64)
TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS);
#undef REGISTER_CPU_KERNELS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T, int64)
TF_CALL_REAL_NUMBER_TYPES(REGISTER_GPU_KERNELS);
#undef REGISTER_GPU_KERNELS
#endif
#undef REGISTER_KERNELS
} | #include <gtest/gtest.h>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class RaggedFillEmptyRowsOpTest : public ::tensorflow::OpsTestBase {
protected:
const int kValueRowidsOutput = 0;
const int kValuesOutput = 1;
const int kEmptyRowIndicatorOutput = 2;
const int kReverseIndexMapOutput = 3;
template <typename T>
void BuildFillEmptyRowsGraph() {
const auto& dtype = DataTypeToEnum<T>::v();
const auto& dtype_int64 = DataTypeToEnum<int64_t>::v();
TF_ASSERT_OK(NodeDefBuilder("tested_op", "RaggedFillEmptyRows")
.Input(FakeInput(dtype_int64))
.Input(FakeInput(dtype))
.Input(FakeInput(dtype_int64))
.Input(FakeInput(dtype))
.Attr("T", dtype)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(RaggedFillEmptyRowsOpTest, IntValues) {
BuildFillEmptyRowsGraph<int>();
AddInputFromArray<int64_t>(TensorShape({4}), {1, 2, 2, 5});
AddInputFromArray<int>(TensorShape({4}), {2, 4, 6, 8});
AddInputFromArray<int64_t>(TensorShape({}), {7});
AddInputFromArray<int>(TensorShape({}), {-1});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*GetOutput(kValueRowidsOutput),
test::AsTensor<int64_t>({0, 1, 2, 2, 3, 4, 5, 6}));
test::ExpectTensorEqual<int>(
*GetOutput(kValuesOutput),
test::AsTensor<int>({-1, 2, 4, 6, -1, -1, 8, -1}));
}
TEST_F(RaggedFillEmptyRowsOpTest, FloatValues) {
BuildFillEmptyRowsGraph<float>();
AddInputFromArray<int64_t>(TensorShape({4}), {1, 2, 2, 5});
AddInputFromArray<float>(TensorShape({4}), {2., 4., 6., 8.});
AddInputFromArray<int64_t>(TensorShape({}), {7});
AddInputFromArray<float>(TensorShape({}), {-1.});
TF_ASSERT_OK(RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*GetOutput(kValueRowidsOutput),
test::AsTensor<int64_t>({0, 1, 2, 2, 3, 4, 5, 6}));
test::ExpectTensorEqual<float>(
*GetOutput(kValuesOutput),
test::AsTensor<float>({-1., 2., 4., 6., -1., -1., 8., -1.}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_fill_empty_rows_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/ragged_fill_empty_rows_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d8467ab7-3264-4340-a4dd-5ef9bc2415d2 | cpp | google/quiche | connect_udp_datagram_payload | quiche/common/masque/connect_udp_datagram_payload.cc | quiche/common/masque/connect_udp_datagram_payload_test.cc | #include "quiche/common/masque/connect_udp_datagram_payload.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_data_reader.h"
#include "quiche/common/quiche_data_writer.h"
namespace quiche {
std::unique_ptr<ConnectUdpDatagramPayload> ConnectUdpDatagramPayload::Parse(
absl::string_view datagram_payload) {
QuicheDataReader data_reader(datagram_payload);
uint64_t context_id;
if (!data_reader.ReadVarInt62(&context_id)) {
QUICHE_DVLOG(1) << "Could not parse malformed UDP proxy payload";
return nullptr;
}
if (ContextId{context_id} == ConnectUdpDatagramUdpPacketPayload::kContextId) {
return std::make_unique<ConnectUdpDatagramUdpPacketPayload>(
data_reader.ReadRemainingPayload());
} else {
return std::make_unique<ConnectUdpDatagramUnknownPayload>(
ContextId{context_id}, data_reader.ReadRemainingPayload());
}
}
std::string ConnectUdpDatagramPayload::Serialize() const {
std::string buffer(SerializedLength(), '\0');
QuicheDataWriter writer(buffer.size(), buffer.data());
bool result = SerializeTo(writer);
QUICHE_DCHECK(result);
QUICHE_DCHECK_EQ(writer.remaining(), 0u);
return buffer;
}
ConnectUdpDatagramUdpPacketPayload::ConnectUdpDatagramUdpPacketPayload(
absl::string_view udp_packet)
: udp_packet_(udp_packet) {}
ConnectUdpDatagramPayload::ContextId
ConnectUdpDatagramUdpPacketPayload::GetContextId() const {
return kContextId;
}
ConnectUdpDatagramPayload::Type ConnectUdpDatagramUdpPacketPayload::GetType()
const {
return Type::kUdpPacket;
}
absl::string_view ConnectUdpDatagramUdpPacketPayload::GetUdpProxyingPayload()
const {
return udp_packet_;
}
size_t ConnectUdpDatagramUdpPacketPayload::SerializedLength() const {
return udp_packet_.size() +
QuicheDataWriter::GetVarInt62Len(uint64_t{kContextId});
}
bool ConnectUdpDatagramUdpPacketPayload::SerializeTo(
QuicheDataWriter& writer) const {
if (!writer.WriteVarInt62(uint64_t{kContextId})) {
return false;
}
if (!writer.WriteStringPiece(udp_packet_)) {
return false;
}
return true;
}
ConnectUdpDatagramUnknownPayload::ConnectUdpDatagramUnknownPayload(
ContextId context_id, absl::string_view udp_proxying_payload)
: context_id_(context_id), udp_proxying_payload_(udp_proxying_payload) {
if (context_id == ConnectUdpDatagramUdpPacketPayload::kContextId) {
QUICHE_BUG(udp_proxy_unknown_payload_udp_context)
<< "ConnectUdpDatagramUnknownPayload created with UDP packet context "
"type (0). Should instead create a "
"ConnectUdpDatagramUdpPacketPayload.";
}
}
ConnectUdpDatagramPayload::ContextId
ConnectUdpDatagramUnknownPayload::GetContextId() const {
return context_id_;
}
ConnectUdpDatagramPayload::Type ConnectUdpDatagramUnknownPayload::GetType()
const {
return Type::kUnknown;
}
absl::string_view ConnectUdpDatagramUnknownPayload::GetUdpProxyingPayload()
const {
return udp_proxying_payload_;
}
size_t ConnectUdpDatagramUnknownPayload::SerializedLength() const {
return udp_proxying_payload_.size() +
QuicheDataWriter::GetVarInt62Len(uint64_t{context_id_});
}
bool ConnectUdpDatagramUnknownPayload::SerializeTo(
QuicheDataWriter& writer) const {
if (!writer.WriteVarInt62(uint64_t{context_id_})) {
return false;
}
if (!writer.WriteStringPiece(udp_proxying_payload_)) {
return false;
}
return true;
}
} | #include "quiche/common/masque/connect_udp_datagram_payload.h"
#include <memory>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche::test {
namespace {
TEST(ConnectUdpDatagramPayloadTest, ParseUdpPacket) {
static constexpr char kDatagramPayload[] = "\x00packet";
std::unique_ptr<ConnectUdpDatagramPayload> parsed =
ConnectUdpDatagramPayload::Parse(
absl::string_view(kDatagramPayload, sizeof(kDatagramPayload) - 1));
ASSERT_TRUE(parsed);
EXPECT_EQ(parsed->GetContextId(),
ConnectUdpDatagramUdpPacketPayload::kContextId);
EXPECT_EQ(parsed->GetType(), ConnectUdpDatagramPayload::Type::kUdpPacket);
EXPECT_EQ(parsed->GetUdpProxyingPayload(), "packet");
}
TEST(ConnectUdpDatagramPayloadTest, SerializeUdpPacket) {
static constexpr absl::string_view kUdpPacket = "packet";
ConnectUdpDatagramUdpPacketPayload payload(kUdpPacket);
EXPECT_EQ(payload.GetUdpProxyingPayload(), kUdpPacket);
EXPECT_EQ(payload.Serialize(), std::string("\x00packet", 7));
}
TEST(ConnectUdpDatagramPayloadTest, ParseUnknownPacket) {
static constexpr char kDatagramPayload[] = "\x05packet";
std::unique_ptr<ConnectUdpDatagramPayload> parsed =
ConnectUdpDatagramPayload::Parse(
absl::string_view(kDatagramPayload, sizeof(kDatagramPayload) - 1));
ASSERT_TRUE(parsed);
EXPECT_EQ(parsed->GetContextId(), 5);
EXPECT_EQ(parsed->GetType(), ConnectUdpDatagramPayload::Type::kUnknown);
EXPECT_EQ(parsed->GetUdpProxyingPayload(), "packet");
}
TEST(ConnectUdpDatagramPayloadTest, SerializeUnknownPacket) {
static constexpr absl::string_view kInnerUdpProxyingPayload = "packet";
ConnectUdpDatagramUnknownPayload payload(4u, kInnerUdpProxyingPayload);
EXPECT_EQ(payload.GetUdpProxyingPayload(), kInnerUdpProxyingPayload);
EXPECT_EQ(payload.Serialize(), std::string("\x04packet", 7));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/masque/connect_udp_datagram_payload.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/masque/connect_udp_datagram_payload_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d8c52ca2-a890-4b74-982d-b4eca24afbf8 | cpp | tensorflow/tensorflow | mutable_op_resolver | tensorflow/lite/mutable_op_resolver.cc | tensorflow/lite/mutable_op_resolver_test.cc | #include "tensorflow/lite/mutable_op_resolver.h"
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/api/op_resolver_internal.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
const TfLiteRegistration* MutableOpResolver::FindOp(tflite::BuiltinOperator op,
int version) const {
auto it = builtins_.find(std::make_pair(op, version));
if (it != builtins_.end()) {
return &it->second;
}
for (const OpResolver* other : other_op_resolvers_) {
const TfLiteRegistration* result = other->FindOp(op, version);
if (result != nullptr) {
return result;
}
}
return nullptr;
}
const TfLiteRegistration* MutableOpResolver::FindOp(const char* op,
int version) const {
auto it = custom_ops_.find(std::make_pair(op, version));
if (it != custom_ops_.end()) {
return &it->second;
}
for (const OpResolver* other : other_op_resolvers_) {
const TfLiteRegistration* result = other->FindOp(op, version);
if (result != nullptr) {
return result;
}
}
return nullptr;
}
void MutableOpResolver::AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration,
int version) {
if (registration == nullptr) {
return;
}
TfLiteRegistration new_registration = *registration;
new_registration.custom_name = nullptr;
new_registration.builtin_code = op;
new_registration.version = version;
auto op_key = std::make_pair(op, version);
builtins_[op_key] = new_registration;
may_directly_contain_user_defined_ops_ = true;
}
void MutableOpResolver::AddBuiltin(tflite::BuiltinOperator op,
const TfLiteRegistration* registration,
int min_version, int max_version) {
for (int version = min_version; version <= max_version; ++version) {
AddBuiltin(op, registration, version);
}
}
void MutableOpResolver::AddCustom(const char* name,
const TfLiteRegistration* registration,
int version) {
TfLiteRegistration new_registration = *registration;
new_registration.builtin_code = BuiltinOperator_CUSTOM;
new_registration.custom_name = name;
new_registration.version = version;
auto op_key = std::make_pair(name, version);
custom_ops_[op_key] = new_registration;
may_directly_contain_user_defined_ops_ = true;
}
void MutableOpResolver::AddCustom(const char* name,
const TfLiteRegistration* registration,
int min_version, int max_version) {
for (int version = min_version; version <= max_version; ++version) {
AddCustom(name, registration, version);
}
}
void MutableOpResolver::AddAll(const MutableOpResolver& other) {
for (const auto& other_builtin : other.builtins_) {
builtins_[other_builtin.first] = other_builtin.second;
}
for (const auto& other_custom_op : other.custom_ops_) {
custom_ops_[other_custom_op.first] = other_custom_op.second;
}
other_op_resolvers_.insert(other_op_resolvers_.begin(),
other.other_op_resolvers_.begin(),
other.other_op_resolvers_.end());
}
void MutableOpResolver::ChainOpResolver(const OpResolver* other) {
other_op_resolvers_.push_back(other);
}
bool MutableOpResolver::MayContainUserDefinedOps() const {
if (may_directly_contain_user_defined_ops_) {
return true;
}
for (const OpResolver* other : other_op_resolvers_) {
if (OpResolverInternal::MayContainUserDefinedOps(*other)) {
return true;
}
}
return false;
}
} | #include "tensorflow/lite/mutable_op_resolver.h"
#include <stddef.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
TfLiteStatus DummyInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteRegistration* GetDummyRegistration() {
static TfLiteRegistration registration = {
.init = nullptr,
.free = nullptr,
.prepare = nullptr,
.invoke = DummyInvoke,
};
return ®istration;
}
TfLiteStatus Dummy2Invoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus Dummy2Prepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
void* Dummy2Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Dummy2free(TfLiteContext* context, void* buffer) {}
TfLiteRegistration* GetDummy2Registration() {
static TfLiteRegistration registration = {
.init = Dummy2Init,
.free = Dummy2free,
.prepare = Dummy2Prepare,
.invoke = Dummy2Invoke,
};
return ®istration;
}
TEST(MutableOpResolverTest, FindOp) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_ADD);
EXPECT_EQ(found_registration->version, 1);
}
TEST(MutableOpResolverTest, FindMissingOp) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, RegisterOpWithSingleVersion) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, RegisterOpWithMultipleVersions) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 3);
}
TEST(MutableOpResolverTest, FindOpWithUnsupportedVersions) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 4);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, FindCustomOp) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("AWESOME", 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_TRUE(found_registration->invoke == DummyInvoke);
EXPECT_EQ(found_registration->version, 1);
}
TEST(MutableOpResolverTest, FindCustomName) {
MutableOpResolver resolver;
TfLiteRegistration* reg = GetDummyRegistration();
reg->custom_name = "UPDATED";
resolver.AddCustom(reg->custom_name, reg);
const TfLiteRegistration* found_registration =
resolver.FindOp(reg->custom_name, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_EQ(found_registration->invoke, GetDummyRegistration()->invoke);
EXPECT_EQ(found_registration->version, 1);
EXPECT_EQ(found_registration->custom_name, "UPDATED");
}
TEST(MutableOpResolverTest, FindBuiltinName) {
MutableOpResolver resolver1;
TfLiteRegistration* reg = GetDummy2Registration();
reg->custom_name = "UPDATED";
resolver1.AddBuiltin(BuiltinOperator_ADD, reg);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->prepare,
GetDummy2Registration()->prepare);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->init,
GetDummy2Registration()->init);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->free,
GetDummy2Registration()->free);
EXPECT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->custom_name, nullptr);
}
TEST(MutableOpResolverTest, FindMissingCustomOp) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp("EXCELLENT", 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, FindCustomOpWithUnsupportedVersion) {
MutableOpResolver resolver;
resolver.AddCustom("AWESOME", GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("AWESOME", 2);
EXPECT_EQ(found_registration, nullptr);
}
TEST(MutableOpResolverTest, AddAll) {
MutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver1.AddBuiltin(BuiltinOperator_MUL, GetDummy2Registration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_SUB, GetDummyRegistration());
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
resolver1.AddAll(resolver2);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_MUL, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummyRegistration()->invoke);
}
class ChainingMutableOpResolver : public MutableOpResolver {
public:
using MutableOpResolver::ChainOpResolver;
};
TEST(MutableOpResolverTest, ChainOpResolver) {
ChainingMutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver1.AddBuiltin(BuiltinOperator_MUL, GetDummy2Registration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_SUB, GetDummyRegistration());
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
resolver1.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_MUL, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver1.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, CopyConstructChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3(resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, AssignChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3;
resolver3 = resolver2;
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, AddAllChainedOpResolver) {
MutableOpResolver resolver;
resolver.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
resolver.AddBuiltin(BuiltinOperator_SUB, GetDummy2Registration());
resolver.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver);
MutableOpResolver resolver3;
resolver3.AddAll(resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_SUB, 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_MUL, 1), nullptr);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
ASSERT_EQ(resolver3.FindOp("NotMyCustom", 1), nullptr);
}
TEST(MutableOpResolverTest, ChainOpResolverCustomOpPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
MutableOpResolver resolver2;
resolver2.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver3;
resolver3.ChainOpResolver(&resolver1);
resolver3.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver3.FindOp("MyCustom", 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, ChainOpResolverBuiltinOpPrecedence) {
MutableOpResolver resolver1;
resolver1.AddBuiltin(BuiltinOperator_ADD, GetDummyRegistration());
MutableOpResolver resolver2;
resolver2.AddBuiltin(BuiltinOperator_ADD, GetDummy2Registration());
ChainingMutableOpResolver resolver3;
resolver3.ChainOpResolver(&resolver1);
resolver3.ChainOpResolver(&resolver2);
ASSERT_EQ(resolver3.FindOp(BuiltinOperator_ADD, 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, ChainOpResolverAddVersusChainPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver1);
MutableOpResolver resolver3;
resolver3.AddCustom("MyCustom", GetDummy2Registration());
ChainingMutableOpResolver resolver4;
resolver4.ChainOpResolver(&resolver2);
resolver4.ChainOpResolver(&resolver3);
ASSERT_EQ(resolver4.FindOp("MyCustom", 1)->invoke,
GetDummyRegistration()->invoke);
}
TEST(MutableOpResolverTest, AddAllAddVersusChainPrecedence) {
MutableOpResolver resolver1;
resolver1.AddCustom("MyCustom", GetDummyRegistration());
ChainingMutableOpResolver resolver2;
resolver2.ChainOpResolver(&resolver1);
MutableOpResolver resolver3;
resolver3.AddCustom("MyCustom", GetDummy2Registration());
MutableOpResolver resolver4;
resolver4.AddAll(resolver2);
resolver4.AddAll(resolver3);
ASSERT_EQ(resolver4.FindOp("MyCustom", 1)->invoke,
GetDummy2Registration()->invoke);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
590fbe25-44c9-4631-8316-7cac1d6d4be9 | cpp | google/arolla | dynamic_lifting | arolla/expr/operators/dynamic_lifting.cc | arolla/expr/operators/dynamic_lifting_test.cc | #include "arolla/expr/operators/dynamic_lifting.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/operators/restricted_operator.h"
#include "arolla/expr/operators/type_meta_eval_strategies.h"
#include "arolla/expr/overloaded_expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::Literal;
using ::arolla::expr::MakeOverloadedOperator;
using ::arolla::expr::Placeholder;
using ::arolla::expr_operators::type_meta::QTypes;
absl::StatusOr<QTypes> NoArrayArgs(absl::Span<const QTypePtr> types) {
for (QTypePtr t : types) {
if (IsArrayLikeQType(t)) {
return absl::InvalidArgumentError("array argument found");
}
}
return QTypes{types.begin(), types.end()};
}
}
absl::StatusOr<ExprOperatorPtr> LiftDynamically(
const absl::StatusOr<ExprOperatorPtr>& op_or) {
ASSIGN_OR_RETURN(const ExprOperatorPtr& op, op_or);
ASSIGN_OR_RETURN(ExprOperatorPtr map_op, expr::LookupOperator("core.map"));
return MakeOverloadedOperator(
op->display_name(), RestrictOperator(op, NoArrayArgs),
MakeLambdaOperator(
ExprOperatorSignature::Make("*args"),
::arolla::expr::CallOp(
"core.apply_varargs",
{Literal(std::move(map_op)), Literal(op), Placeholder("args")})));
}
} | #include "arolla/expr/operators/dynamic_lifting.h"
#include <memory>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/backend_wrapping_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operators/type_meta_eval_strategies.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla::expr_operators {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::expr::BackendWrappingOperator;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::expr_operators::type_meta::CallableStrategy;
using ::arolla::expr_operators::type_meta::Chain;
using ::arolla::expr_operators::type_meta::CommonType;
using ::arolla::expr_operators::type_meta::Ternary;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Field;
TEST(DynamicLiftingTest, LiftDynamically) {
ASSERT_OK_AND_ASSIGN(auto scalar_signature,
ExprOperatorSignature::Make("a, b, c"));
auto scalar_operator = std::make_shared<BackendWrappingOperator>(
"test.scalar_operator", scalar_signature,
CallableStrategy(Chain(Ternary, CommonType)));
ASSERT_OK_AND_ASSIGN(auto lifted_operator, LiftDynamically(scalar_operator));
EXPECT_THAT(lifted_operator->display_name(), Eq("test.scalar_operator"));
EXPECT_THAT(
lifted_operator->GetSignature(),
IsOkAndHolds(Field(
&ExprOperatorSignature::parameters,
ElementsAre(Field(&ExprOperatorSignature::Parameter::name, "a"),
Field(&ExprOperatorSignature::Parameter::name, "b"),
Field(&ExprOperatorSignature::Parameter::name, "c")))));
{
auto scalar_args = {
WithQTypeAnnotation(Leaf("a"), GetQType<float>()),
WithQTypeAnnotation(Leaf("b"), GetOptionalQType<float>()),
WithQTypeAnnotation(Leaf("c"), GetQType<double>())};
ASSERT_OK_AND_ASSIGN(auto scalar_expr,
CallOp(lifted_operator, scalar_args));
EXPECT_THAT(scalar_expr->qtype(), Eq(GetOptionalQType<double>()));
ASSERT_OK_AND_ASSIGN(auto expected_expr,
CallOp(scalar_operator, scalar_args));
EXPECT_THAT(ToLowest(scalar_expr),
IsOkAndHolds(EqualsExpr(ToLowest(expected_expr))));
}
{
std::vector<absl::StatusOr<ExprNodePtr>> array_args = {
WithQTypeAnnotation(Leaf("a"), GetQType<float>()),
WithQTypeAnnotation(Leaf("b"), GetDenseArrayQType<float>()),
WithQTypeAnnotation(Leaf("c"), GetOptionalQType<double>())};
ASSERT_OK_AND_ASSIGN(auto array_expr, CallOp(lifted_operator, array_args));
EXPECT_THAT(array_expr->qtype(), Eq(GetDenseArrayQType<double>()));
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr map_op,
expr::LookupOperator("core.map"));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
CallOp("core.apply_varargs",
{Literal(map_op), Literal<ExprOperatorPtr>(scalar_operator),
CallOp(expr::MakeTupleOperator::Make(), array_args)}));
EXPECT_THAT(ToLowest(array_expr),
IsOkAndHolds(EqualsExpr(ToLowest(expected_expr))));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/dynamic_lifting.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/dynamic_lifting_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
5df53855-dc84-4c12-a642-74b1e73fc48e | cpp | tensorflow/tensorflow | transfer_manager | third_party/xla/xla/service/transfer_manager.cc | third_party/xla/xla/tests/transfer_manager_test.cc | #include "xla/service/transfer_manager.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/const_init.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/literal.h"
#include "xla/service/compiler.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/stream.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/notification.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::Mutex TransferManager::platform_transfer_manager_mutex_(
absl::kConstInit);
absl::flat_hash_map<se::Platform::Id, TransferManager::State>*
TransferManager::GetPlatformTransferManagers() {
static auto* r =
new absl::flat_hash_map<se::Platform::Id, TransferManager::State>;
return r;
}
absl::StatusOr<Literal> TransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) {
Literal literal(device_buffer.on_host_shape());
TF_RETURN_IF_ERROR(TransferLiteralFromDevice(stream, device_buffer, &literal,
transfer_metadata));
return std::move(literal);
}
absl::Status TransferManager::TransferLiteralFromDevice(
se::Stream* stream, const ShapedBuffer& device_buffer,
const MutableBorrowingLiteral& literal,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
absl::Status ret;
tsl::Notification n;
TransferLiteralFromDevice(
substream, device_buffer, literal,
[&](absl::Status status) {
ret = status;
n.Notify();
},
transfer_metadata);
n.WaitForNotification();
return ret;
}
absl::Status TransferManager::TransferLiteralToDevice(
se::Stream* stream, const LiteralSlice& literal,
const ShapedBuffer& device_buffer,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
TF_RETURN_IF_ERROR(TransferLiteralToDeviceAsync(
substream, literal, device_buffer, transfer_metadata));
return substream->BlockHostUntilDone();
}
absl::StatusOr<Literal> TransferManager::TransferArrayFromDevice(
se::Stream* stream, const Shape& shape, const se::DeviceMemoryBase& source,
const TransferMetadata* transfer_metadata) {
TF_RET_CHECK(shape.IsArray());
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
HostShapeToDeviceShape(shape), shape));
Literal literal(shape);
ShapedBuffer shaped_buffer(shape, stream->parent()->device_ordinal());
shaped_buffer.set_buffer(source, {});
TF_RETURN_IF_ERROR(TransferLiteralFromDevice(stream, shaped_buffer, &literal,
transfer_metadata));
return std::move(literal);
}
absl::Status TransferManager::TransferArrayToDevice(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata) {
TF_ASSIGN_OR_RETURN(se::Stream * substream, stream->GetOrCreateSubStream());
TF_RETURN_IF_ERROR(substream->WaitFor(stream));
absl::Cleanup cleanup = [&]() { stream->ReturnSubStream(substream); };
TF_RETURN_IF_ERROR(
TransferArrayToDeviceAsync(substream, literal, dest, transfer_metadata));
return substream->BlockHostUntilDone();
}
absl::Status TransferManager::TransferArrayToDeviceAsync(
se::Stream* stream, const LiteralSlice& literal,
const se::DeviceMemoryBase& dest,
const TransferMetadata* transfer_metadata) {
TF_RET_CHECK(literal.shape().IsArray());
ShapedBuffer shaped_buffer(HostShapeToDeviceShape(literal.shape()),
stream->parent()->device_ordinal());
shaped_buffer.set_buffer(dest, {});
return TransferLiteralToDeviceAsync(stream, literal, shaped_buffer,
transfer_metadata);
}
absl::Status TransferManager::ReadDynamicShapes(
se::Stream* stream, const ShapedBuffer* device_buffer,
Shape* device_shape) {
DCHECK(device_shape->is_dynamic());
Shape original_device_shape = *device_shape;
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
TF_ASSIGN_OR_RETURN(
auto compiler, Compiler::GetForPlatform(stream->parent()->GetPlatform()));
TF_RETURN_IF_ERROR(device_buffer->buffers().ForEachElementWithStatus(
[&](const ShapeIndex& index,
const se::DeviceMemoryBase& buffer) -> absl::Status {
const Shape& buffer_shape =
ShapeUtil::GetSubshape(*device_shape, index);
if (buffer_shape.IsTuple()) {
return absl::OkStatus();
}
Shape& device_sub_shape =
*ShapeUtil::GetMutableSubshape(device_shape, index);
if (device_sub_shape.is_static()) {
return absl::OkStatus();
}
auto shape_size_fn = compiler->ShapeSizeBytesFunction();
Shape buffer_shape_static = ShapeUtil::MakeStaticShape(buffer_shape);
const int64_t offset = shape_size_fn(buffer_shape_static);
int64_t metadata_size = shape_size_fn(buffer_shape) - offset;
if (metadata_size == 0) {
return InvalidArgument("Dynamic shape metadata size should not be 0");
}
auto buffer_8 = se::DeviceMemory<uint8_t>(buffer);
auto metadata_buffer = buffer_8.GetSlice(offset, metadata_size);
TF_ASSIGN_OR_RETURN(
auto metadata,
TransferArrayFromDevice(
stream,
ShapeUtil::MakeShape(S32, {buffer_shape.dimensions_size()}),
metadata_buffer));
for (int64_t i = 0; i < metadata.element_count(); ++i) {
device_sub_shape.mutable_dimensions()[i] = metadata.Get<int32_t>({i});
}
return absl::OkStatus();
}));
device_shape->clear_dynamic_dimensions();
TF_RET_CHECK(ShapeUtil::DynamicShapeIsCompatible(*device_shape,
original_device_shape));
return absl::OkStatus();
}
void TransferManager::RegisterTransferManager(
se::Platform::Id platform_id,
TransferManagerCreationFunction creation_function) {
absl::MutexLock lock(&TransferManager::platform_transfer_manager_mutex_);
auto* managers = GetPlatformTransferManagers();
CHECK(managers->find(platform_id) == managers->end());
(*managers)[platform_id].creation_function = creation_function;
}
absl::StatusOr<TransferManager*> TransferManager::GetForPlatform(
const se::Platform* platform) {
absl::MutexLock lock(&TransferManager::platform_transfer_manager_mutex_);
auto* managers = GetPlatformTransferManagers();
auto it = managers->find(platform->id());
if (it == managers->end()) {
return NotFound(
"could not find registered transfer manager for platform %s -- check "
"target linkage",
platform->Name());
}
if (it->second.manager == nullptr) {
it->second.manager = (*it->second.creation_function)();
}
return it->second.manager.get();
}
absl::Status TransferManager::WriteTupleIndexTables(
se::Stream* stream, const ShapedBuffer& device_buffer) {
TF_RETURN_IF_ERROR(WriteTupleIndexTablesAsync(stream, device_buffer));
return stream->BlockHostUntilDone();
}
absl::Status TransferManager::WriteTupleIndexTablesAsync(
se::Stream* stream, const ShapedBuffer& device_buffer) {
VLOG(2) << "Writing tuple index tables for " << device_buffer;
return ShapeUtil::ForEachSubshapeWithStatus(
device_buffer.on_device_shape(),
[&](const Shape& device_subshape,
const ShapeIndex& index) -> absl::Status {
if (device_subshape.IsTuple() &&
ShapeUtil::TupleElementCount(device_subshape) > 0) {
se::DeviceMemoryBase device_memory = device_buffer.buffer(index);
TF_RET_CHECK(GetByteSizeRequirement(device_subshape) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
ShapeIndex element_index = index;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(device_subshape);
++i) {
element_index.push_back(i);
elements.push_back(device_buffer.buffer(element_index));
element_index.pop_back();
}
return WriteSingleTupleIndexTable(stream, elements, device_subshape,
&device_memory);
}
return absl::OkStatus();
});
}
absl::Status TransferManager::WriteRootTupleIndexTable(
se::Stream* stream, const ShapedBuffer& device_buffer) {
TF_RET_CHECK(device_buffer.on_device_shape().IsTuple());
if (ShapeUtil::TupleElementCount(device_buffer.on_device_shape()) == 0) {
return absl::OkStatus();
}
se::DeviceMemoryBase device_memory = device_buffer.buffer({});
TF_RET_CHECK(GetByteSizeRequirement(device_buffer.on_device_shape()) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
elements.reserve(
ShapeUtil::TupleElementCount(device_buffer.on_device_shape()));
for (int64_t i = 0;
i < ShapeUtil::TupleElementCount(device_buffer.on_device_shape()); ++i) {
elements.push_back(device_buffer.buffer({i}));
}
return WriteSingleTupleIndexTable(
stream, elements, device_buffer.on_device_shape(), &device_memory);
}
absl::Status TransferManager::WriteRootTupleIndexTable(
se::Stream* stream, const ShapeTree<MaybeOwningDeviceMemory>& buffer_tree) {
TF_RET_CHECK(buffer_tree.shape().IsTuple());
if (ShapeUtil::TupleElementCount(buffer_tree.shape()) == 0) {
return absl::OkStatus();
}
se::DeviceMemoryBase device_memory =
buffer_tree.element({}).AsDeviceMemoryBase();
TF_RET_CHECK(GetByteSizeRequirement(buffer_tree.shape()) ==
device_memory.size());
std::vector<se::DeviceMemoryBase> elements;
elements.reserve(ShapeUtil::TupleElementCount(buffer_tree.shape()));
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(buffer_tree.shape());
++i) {
elements.push_back(buffer_tree.element({i}).AsDeviceMemoryBase());
}
return WriteSingleTupleIndexTable(stream, elements, buffer_tree.shape(),
&device_memory);
}
absl::StatusOr<ScopedShapedBuffer> TransferManager::AllocateScopedShapedBuffer(
const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
int device_ordinal, int physical_device_ordinal,
DeviceShapeRepresentationFn shape_representation_fn) {
if (!LayoutUtil::HasLayout(on_host_shape)) {
return InvalidArgument("Shape must have a layout: %s",
ShapeUtil::HumanStringWithLayout(on_host_shape));
}
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape));
Shape on_device_shape = (shape_representation_fn == nullptr)
? HostShapeToDeviceShape(on_host_shape)
: shape_representation_fn(on_host_shape);
TF_RET_CHECK(LayoutUtil::HasLayout(on_device_shape));
ScopedShapedBuffer shaped_buffer(std::move(on_device_shape), allocator,
device_ordinal, physical_device_ordinal);
for (auto& pair : shaped_buffer.buffers()) {
const ShapeIndex& index = pair.first;
se::DeviceMemoryBase& memory_base = pair.second;
const Shape& subshape =
ShapeUtil::GetSubshape(shaped_buffer.on_device_shape(), index);
TF_ASSIGN_OR_RETURN(auto memory,
allocator->Allocate(shaped_buffer.device_ordinal(),
GetByteSizeRequirement(subshape),
true,
LayoutUtil::MemorySpace(subshape)));
memory_base = memory.Release();
}
return std::move(shaped_buffer);
}
absl::StatusOr<Shape> TransferManager::ChooseCompactLayoutForShape(
const Shape& host_shape) const {
return LayoutUtil::GetWithDefaultLayout(host_shape);
}
xla::Shape TransferManager::ChooseGoodInfeedLayout(const Shape& shape) const {
return LayoutUtil::GetWithDefaultLayout(shape);
}
} | #include <memory>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/service/generic_transfer_manager.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/shaped_buffer.h"
#include "xla/service/stream_pool.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/local_client_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class TransferManagerTest : public LocalClientTestBase {
protected:
TransferManagerTest()
: shape_size_fn_([this](const Shape& shape) {
return transfer_manager_->GetByteSizeRequirement(shape);
}) {
stream_ptr_ = local_client_->mutable_backend()
->BorrowStream(stream_executor_)
.value();
stream_ = stream_ptr_.get();
}
~TransferManagerTest() override = default;
ScopedShapedBuffer AllocateDeviceBuffer(const Shape& shape) {
return transfer_manager_
->AllocateScopedShapedBuffer(
shape, GetOrCreateAllocator(local_client_->platform()),
0)
.value();
}
protected:
StreamPool::Ptr stream_ptr_;
se::Stream* stream_;
private:
std::function<int64_t(const Shape&)> shape_size_fn_;
};
XLA_TEST_F(TransferManagerTest, TransferR0U32) {
Literal literal = LiteralUtil::CreateR0<uint32_t>(42);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR0Equal<uint32_t>(42, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1F32) {
Literal literal =
LiteralUtil::CreateR1<float>({1.25f, 2.5f, -17.0f, -20.125f});
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>({1.25f, 2.5f, -17.0f, -20.125f},
result);
}
XLA_TEST_F(TransferManagerTest, TransferR1F32AwkwardSizes) {
constexpr int kMaxR1Size = (1 << 11);
for (int i = 0; i < kMaxR1Size; ++i) {
std::vector<float> inputs(i);
std::iota(inputs.begin(), inputs.end(), 0);
Literal literal = LiteralUtil::CreateR1<float>(inputs);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>(inputs, result);
}
}
XLA_TEST_F(TransferManagerTest, TransferR1LargeF32) {
std::vector<float> test_vector(1024 * 1024);
std::iota(test_vector.begin(), test_vector.end(), 0);
Literal literal = LiteralUtil::CreateR1<float>(test_vector);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR1Equal<float>(test_vector, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1LargeUnalignedF32) {
std::vector<float> test_vector(1025);
std::iota(test_vector.begin(), test_vector.end(), 0);
Shape shape = ShapeUtil::MakeShape(F32, {1024});
BorrowingLiteral literal(reinterpret_cast<const char*>(&test_vector[1]),
shape);
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
std::vector<float> expected_output(1024);
std::iota(expected_output.begin(), expected_output.end(), 1);
LiteralTestUtil::ExpectR1Equal<float>(expected_output, result);
}
XLA_TEST_F(TransferManagerTest, TransferR1U8) {
const char* test_string = "0123456789abcdef";
Literal literal = LiteralUtil::CreateR1U8(test_string);
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_EQ(result.GetR1U8AsString(), test_string);
}
XLA_TEST_F(TransferManagerTest, TransferR2F32) {
Literal literal =
LiteralUtil::CreateR2<float>({{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}});
const Shape& shape = literal.shape();
auto device_buffer = AllocateDeviceBuffer(shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
LiteralTestUtil::ExpectR2Equal<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, result);
}
XLA_TEST_F(TransferManagerTest,
TransferR2F32AndChangeLayoutTransferringToDevice) {
Literal literal = LiteralUtil::CreateR2WithLayout<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, LayoutUtil::MakeLayout({0, 1}));
const Shape ondevice_shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 3}, {1, 0});
auto device_buffer = AllocateDeviceBuffer(ondevice_shape);
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_FALSE(
LayoutUtil::Equal(result.shape().layout(), literal.shape().layout()));
LiteralTestUtil::ExpectR2Equal<float>(
{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, result);
}
XLA_TEST_F(TransferManagerTest, TransferTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferEmptyTuple) {
Literal literal = LiteralUtil::MakeTuple({});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferNestedTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-10.0f, 123.0f})});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferComplexValue) {
Literal literal = LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferComplexValueInTuple) {
Literal literal = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR1<complex64>(
{complex64(1.0f, 2.0f), complex64(42.0f, -123.4f)}),
LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6}),
LiteralUtil::CreateR0<complex64>(complex64(0.3f, -0.4f))});
auto device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
XLA_TEST_F(TransferManagerTest, TransferTokenFromDevice) {
auto device_buffer = AllocateDeviceBuffer(ShapeUtil::MakeTokenShape());
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateToken(), result));
}
XLA_TEST_F(TransferManagerTest, OVERSIZE_ON_GRM(MultiStreamRoundTripSoak)) {
const int64_t kIterationCount = 5000;
Literal literal1 = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(123.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{1.0f, 2.0f}, {4.0f, 5.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -10.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-10.0f, 123.0f})});
Literal literal2 = LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR0<float>(456.0f),
LiteralUtil::MakeTupleFromSlices(
{LiteralUtil::CreateR2<float>({{5.0f, 7.0f}, {9.0f, 4.0f}}),
LiteralUtil::CreateR1<float>({44.0f, -11.0f, 3333333.3f})}),
LiteralUtil::CreateR1<float>({-98.0f, 153.0f})});
auto device_buffer1 = AllocateDeviceBuffer(literal1.shape());
auto device_buffer2 = AllocateDeviceBuffer(literal2.shape());
auto stream1 = stream_;
auto stream2 = stream_->GetOrCreateSubStream().value();
Literal result1, result2;
for (int i = 0; i < kIterationCount; ++i) {
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream1, literal1,
device_buffer1));
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream2, literal2,
device_buffer2));
TF_ASSERT_OK_AND_ASSIGN(
Literal this_result1,
transfer_manager_->TransferLiteralFromDevice(stream1, device_buffer1));
TF_ASSERT_OK_AND_ASSIGN(
Literal this_result2,
transfer_manager_->TransferLiteralFromDevice(stream2, device_buffer2));
result1 = std::move(this_result1);
result2 = std::move(this_result2);
}
EXPECT_TRUE(LiteralTestUtil::Equal(literal1, result1));
EXPECT_TRUE(LiteralTestUtil::Equal(literal2, result2));
}
XLA_TEST_F(TransferManagerTest, DISABLED_ON_TPU(TransferDynamicShape)) {
TF_ASSERT_OK_AND_ASSIGN(
Shape s, ParseShape("(s64[], s32[<=1048576,3], f32[<=1048576,48])"));
Literal literal(s);
literal.SetDynamicSize(0, {1},
1048574);
literal.SetDynamicSize(0, {2},
1048575);
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {0})
.Populate<int64_t>(
[](absl::Span<const int64_t> indices) { return 42; }));
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {1})
.Populate<int32_t>([](absl::Span<const int64_t> indices) {
return indices[0] + indices[1];
}));
ASSERT_IS_OK(MutableBorrowingLiteral(&literal, {2})
.Populate<float>([](absl::Span<const int64_t> indices) {
return indices[0] + indices[1];
}));
ScopedShapedBuffer device_buffer = AllocateDeviceBuffer(literal.shape());
ASSERT_IS_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
EXPECT_EQ(literal.GetDynamicSize(0, {1}),
result.GetDynamicSize(0, {1}));
EXPECT_EQ(literal.GetDynamicSize(0, {2}),
result.GetDynamicSize(0, {2}));
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
class TransferDeviceToHostBenchmark : public TransferManagerTest {
public:
using TransferManagerTest::TransferManagerTest;
~TransferDeviceToHostBenchmark() override {}
void Run(::testing::benchmark::State& state, int num_tuple_elements,
int array_size) {
SetUp();
std::vector<Literal> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal.shape());
TF_CHECK_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
for (auto s : state) {
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
transfer_manager_->TransferLiteralFromDevice(stream_, device_buffer));
}
TearDown();
}
void TestBody() override {}
};
class TransferHostToDeviceBenchmark : public TransferManagerTest {
public:
using TransferManagerTest::TransferManagerTest;
~TransferHostToDeviceBenchmark() override {}
void Run(::testing::benchmark::State& state, int num_tuple_elements,
int array_size) {
SetUp();
std::vector<Literal> tuple_elements;
for (int i = 0; i < num_tuple_elements; ++i) {
tuple_elements.push_back(
LiteralUtil::CreateR2F32Linspace(0.0f, 1.0f, array_size, array_size));
}
Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements));
auto device_buffer = AllocateDeviceBuffer(literal.shape());
for (auto s : state) {
TF_CHECK_OK(transfer_manager_->TransferLiteralToDevice(stream_, literal,
device_buffer));
}
TearDown();
}
void TestBody() override {}
};
void BM_TransferDeviceToHost(::testing::benchmark::State& state) {
const int num_tuple_elements = state.range(0);
const int array_size = state.range(1);
TransferDeviceToHostBenchmark bm;
bm.Run(state, num_tuple_elements, array_size);
}
void BM_TransferHostToDevice(::testing::benchmark::State& state) {
const int num_tuple_elements = state.range(0);
const int array_size = state.range(1);
TransferHostToDeviceBenchmark bm;
bm.Run(state, num_tuple_elements, array_size);
}
BENCHMARK(BM_TransferHostToDevice)
->ArgPair(1, 256)
->ArgPair(1, 257)
->ArgPair(100, 256)
->ArgPair(100, 257);
BENCHMARK(BM_TransferDeviceToHost)
->ArgPair(1, 256)
->ArgPair(1, 257)
->ArgPair(100, 256)
->ArgPair(100, 257);
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
tsl::testing::RunBenchmarks();
return RUN_ALL_TESTS();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/transfer_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/transfer_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
27ddbda0-b0b4-4bd1-becc-a1d64dcc555f | cpp | tensorflow/tensorflow | assert_prev_dataset_op | tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.cc | tensorflow/core/kernels/data/experimental/assert_prev_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.h"
#include <map>
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/rewrite_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr char AssertPrevDatasetOp::kInputDataset[];
constexpr char AssertPrevDatasetOp::kDatasetType[];
constexpr char AssertPrevDatasetOp::kTransformations[];
constexpr char AssertPrevDatasetOp::kOutputTypes[];
constexpr char AssertPrevDatasetOp::kOutputShapes[];
namespace {
absl::StatusOr<NameAttrList> GetAssertions(const tstring& transformation) {
NameAttrList assertions;
if (!std::is_base_of<protobuf::Message, NameAttrList>()) {
return errors::InvalidArgument(
"Portable proto implementations are not supported.");
}
if (!protobuf::TextFormat::ParseFromString(
transformation, reinterpret_cast<protobuf::Message*>(&assertions))) {
return errors::InvalidArgument("Couldn't parse transformation '",
transformation, "'.");
}
return assertions;
}
absl::StatusOr<const DatasetBase*> GetPreviousDataset(
const DatasetBase& dataset) {
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset.InputDatasets(&inputs));
if (inputs.empty()) {
return errors::InvalidArgument("No previous transformation found.");
}
return inputs.back();
}
Status CheckOpName(const DatasetBase& dataset, const NameAttrList& assertions) {
if (!MatchesAnyVersion(assertions.name(), dataset.type_string())) {
return errors::InvalidArgument("Asserted transformation matching '",
assertions.name(), "', but found '",
dataset.type_string(), "'.");
}
return absl::OkStatus();
}
absl::StatusOr<NodeDef> GetDatasetNode(const DatasetBase& dataset,
absl::string_view op_name) {
SerializationContext serialization_ctx((SerializationContext::Params()));
GraphDefBuilder b;
GraphDef graph_def;
TF_RETURN_IF_ERROR(
AsGraphDef(&dataset, std::move(serialization_ctx), &graph_def));
TF_ASSIGN_OR_RETURN(NodeDef node, GetDatasetNodeDef(graph_def));
return node;
}
Status CheckAttributes(const DatasetBase& dataset,
const NameAttrList& assertions) {
if (assertions.attr().empty()) return absl::OkStatus();
TF_ASSIGN_OR_RETURN(NodeDef node, GetDatasetNode(dataset, assertions.name()));
std::vector<std::string> attrs_not_found;
for (const auto& attr : assertions.attr()) {
auto it = node.attr().find(attr.first);
if (it != node.attr().end()) {
if (!std::is_base_of<protobuf::Message, AttrValue>()) {
return errors::InvalidArgument(
"Portable proto implementations are not supported.");
}
if (!protobuf::util::MessageDifferencer::Equivalent(
*reinterpret_cast<const protobuf::Message*>(&it->second),
*reinterpret_cast<const protobuf::Message*>(&attr.second))) {
return errors::InvalidArgument(
"Asserted attribute '", attr.first, "' having a value of '",
attr.second.DebugString(), "', but found value of '",
it->second.DebugString(), "'.");
}
} else {
return errors::InvalidArgument(
"Asserted attribute '", attr.first, "' having a value of '",
attr.second.DebugString(), "', but found no such attribute defined.");
}
}
return absl::OkStatus();
}
Status CheckTransformation(const DatasetBase& dataset,
const tstring& transformation) {
TF_ASSIGN_OR_RETURN(NameAttrList assertions, GetAssertions(transformation));
TF_RETURN_IF_ERROR(CheckOpName(dataset, assertions));
TF_RETURN_IF_ERROR(CheckAttributes(dataset, assertions));
return absl::OkStatus();
}
}
class AssertPrevDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const std::vector<tstring>& transformations,
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
transformations_(transformations),
output_types_(output_types),
output_shapes_(output_shapes) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* transformations_node = nullptr;
TF_RETURN_IF_ERROR(b->AddVector(transformations_, &transformations_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, transformations_node}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
Status Initialize(IteratorContext* ctx) override {
const DatasetBase* current_dataset = dataset();
for (int i = 0; i < dataset()->transformations_.size(); ++i) {
absl::StatusOr<const DatasetBase*> previous_dataset =
GetPreviousDataset(*current_dataset);
if (!previous_dataset.ok()) {
return errors::InvalidArgument(
"Asserted previous ", dataset()->transformations_.size(),
" transformations but encountered only ", i, ".");
}
Status s = CheckTransformation(**previous_dataset,
dataset()->transformations_[i]);
if (!s.ok()) {
return errors::InvalidArgument(
"Failure checking transformations at offset ", i, ": ",
s.message());
}
current_dataset = *previous_dataset;
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
return absl::OkStatus();
}
private:
std::unique_ptr<IteratorBase> input_impl_;
};
const DatasetBase* input_;
const std::vector<tstring> transformations_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
};
AssertPrevDatasetOp::AssertPrevDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void AssertPrevDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::vector<tstring> transformations;
OP_REQUIRES_OK(ctx, ParseVectorArgument<tstring>(ctx, kTransformations,
&transformations));
*output =
new Dataset(ctx, input, transformations, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("AssertPrevDataset").Device(DEVICE_CPU),
AssertPrevDatasetOp);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
#include "tensorflow/core/kernels/data/take_dataset_op.h"
#include "tensorflow/core/kernels/data/tensor_slice_dataset_op.h"
namespace tensorflow {
namespace data {
namespace experimental {
namespace {
constexpr char kNodeName[] = "assert_prev_dataset";
std::string GetTransformation(
absl::string_view name,
std::initializer_list<std::pair<std::string, bool>> attrs = {}) {
NameAttrList message;
message.set_name(absl::StrCat(name, "Dataset"));
for (const auto& attr : attrs) {
AttrValue value;
value.set_b(attr.second);
message.mutable_attr()->insert({attr.first, value});
}
std::string output;
protobuf::TextFormat::PrintToString(message, &output);
return output;
}
class AssertPrevDatasetParams : public DatasetParams {
public:
template <typename T>
AssertPrevDatasetParams(T input_dataset_params,
const std::vector<tstring>& transformations,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
transformations_(transformations) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
int num_transformations = transformations_.size();
return {CreateTensor<tstring>(TensorShape({num_transformations}),
transformations_)};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(input_dataset_params_.size() + 1);
input_names->emplace_back(AssertPrevDatasetOp::kInputDataset);
input_names->emplace_back(AssertPrevDatasetOp::kTransformations);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{AssertPrevDatasetOp::kOutputShapes, output_shapes_},
{AssertPrevDatasetOp::kOutputTypes, output_dtypes_}};
return absl::OkStatus();
}
string dataset_type() const override {
return AssertPrevDatasetOp::kDatasetType;
}
private:
std::vector<tstring> transformations_;
};
class AssertPrevDatasetOpTest : public DatasetOpsTestBase {};
AssertPrevDatasetParams AssertPrevDatasetParams1() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType)},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams AssertPrevDatasetParams2() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(RangeDatasetOp::kDatasetType)},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams AssertPrevDatasetParams2WithAttrs() {
TakeDatasetParams take_dataset_params = TakeDatasetParams(
TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset"),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(TensorSliceDatasetOp::kDatasetType,
{{"is_files", false}})},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams InvalidAssertPrevDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation("Whoops")},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
AssertPrevDatasetParams ShortAssertPrevDatasetParams() {
TakeDatasetParams take_dataset_params =
TakeDatasetParams(RangeDatasetParams(0, 10, 1),
3,
{DT_INT64},
{PartialTensorShape({})},
"take_dataset");
return AssertPrevDatasetParams(
std::move(take_dataset_params),
{GetTransformation(TakeDatasetOp::kDatasetType),
GetTransformation(RangeDatasetOp::kDatasetType),
GetTransformation("Whoops")},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<AssertPrevDatasetParams>> GetNextTestCases() {
return {{AssertPrevDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertPrevDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_GET_NEXT_TEST_P(AssertPrevDatasetOpTest, AssertPrevDatasetParams,
GetNextTestCases())
TEST_F(AssertPrevDatasetOpTest, DatasetNodeName) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertPrevDatasetOpTest, DatasetAttrs) {
auto dataset_params = AssertPrevDatasetParams2WithAttrs();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(AssertPrevDatasetOpTest, DatasetTypeString) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(AssertPrevDatasetOp::kDatasetType)));
}
TEST_F(AssertPrevDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(AssertPrevDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertPrevDatasetOpTest, Cardinality) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(3));
}
TEST_F(AssertPrevDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(AssertPrevDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(AssertPrevDatasetOpTest, IteratorPrefix) {
auto dataset_params = AssertPrevDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
AssertPrevDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<AssertPrevDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{AssertPrevDatasetParams1(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})},
{AssertPrevDatasetParams2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(AssertPrevDatasetOpTest,
AssertPrevDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(AssertPrevDatasetOpTest, InvalidArguments) {
auto dataset_params = InvalidAssertPrevDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(AssertPrevDatasetOpTest, ShortAssertPrev) {
auto dataset_params = ShortAssertPrevDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_prev_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/assert_prev_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5293b335-1054-4251-bbc9-edecf54c9bf3 | cpp | tensorflow/tensorflow | inject_prefetch | tensorflow/core/grappler/optimizers/data/inject_prefetch.cc | tensorflow/core/grappler/optimizers/data/inject_prefetch_test.cc | #include "tensorflow/core/grappler/optimizers/data/inject_prefetch.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kPrefetchDataset[] = "PrefetchDataset";
constexpr std::array<const char*, 5> kAsyncTransforms = {
"MapAndBatchDataset", "ParallelBatchDataset", "ParallelInterleaveDataset",
"ParallelMapDataset", "PrefetchDataset"};
constexpr std::array<const char*, 8> kDatasetsToSkip = {
"AssertNextDataset",
"ExperimentalAssertNextDataset",
"IgnoreErrorsDataset",
"OptionsDataset",
"ModelDataset",
"OptimizeDataset",
"MaxIntraOpParallelismDataset",
"PrivateThreadPoolDataset",
};
bool ShouldInjectPrefetch(const NodeDef* last_node,
const MutableGraphView& graph) {
while (last_node != nullptr &&
absl::c_any_of(kDatasetsToSkip, [last_node](const char* dataset) {
return data::MatchesAnyVersion(dataset, last_node->op());
})) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
if (last_node == nullptr) {
VLOG(1) << "The optimization inject_prefetch is not applied because graph "
"rewrite failed to find a dataset node.";
return false;
}
if (absl::c_any_of(kAsyncTransforms, [last_node](const char* dataset) {
return data::MatchesAnyVersion(dataset, last_node->op());
})) {
VLOG(1) << "The optimization inject_prefetch is not applied because the "
"last transformation of the input pipeline is an asynchronous "
"transformation: "
<< last_node->op();
return false;
}
return true;
}
}
Status InjectPrefetch::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization inject_prefetch is not applied if autotune is "
"off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph)) {
return absl::OkStatus();
}
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (!ShouldInjectPrefetch(last_node, graph)) {
return absl::OkStatus();
}
NodeDef prefetch_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("inject/prefetch_", last_node->name()), graph.graph(),
&prefetch_node);
prefetch_node.set_op(kPrefetchDataset);
*prefetch_node.mutable_input()->Add() = last_node->name();
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
*prefetch_node.mutable_input()->Add() = autotune_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &prefetch_node))
return absl::OkStatus();
TF_RETURN_IF_ERROR(
graph_utils::SetMetadataName(prefetch_node.name(), &prefetch_node));
auto* added_node = graph.AddNode(std::move(prefetch_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(InjectPrefetch, "inject_prefetch");
}
} | #include "tensorflow/core/grappler/optimizers/data/inject_prefetch.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
constexpr char kOptionsDataset[] = "OptionsDataset";
constexpr char kParallelMapDataset[] = "ParallelMapDatasetV2";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
Status Optimize(InjectPrefetch &optimizer, const GrapplerItem &item,
GraphDef *output, bool autotune) {
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
Status OptimizeWithInjectPrefetch(const GrapplerItem &item, GraphDef *output,
bool autotune) {
InjectPrefetch optimizer;
return Optimize(optimizer, item, output, autotune);
}
class InjectPrefetchParameterizedTest : public ::testing::TestWithParam<bool> {
};
TEST_P(InjectPrefetchParameterizedTest, TestAutotuneSetting) {
const bool autotune = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"range"}, {})});
item.fetch.push_back("Sink");
GraphDef inject_prefetch_output;
TF_ASSERT_OK(
OptimizeWithInjectPrefetch(item, &inject_prefetch_output, autotune));
EXPECT_EQ(autotune, graph_utils::ContainsNodeWithOp(kPrefetchDataset,
inject_prefetch_output));
EXPECT_EQ(autotune, graph_utils::ContainsGraphNodeWithName(
"inject/prefetch_range", inject_prefetch_output));
}
INSTANTIATE_TEST_SUITE_P(AutotuneSetting, InjectPrefetchParameterizedTest,
::testing::Values(false, true));
TEST(InjectPrefetchTest, FromFunctionDef) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "_Retval", {"range"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
}
TEST(InjectPrefetchTest, AlreadyPrefetched) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch", kPrefetchDataset, {"range"}, {}),
NDef("Sink", "Identity", {"prefetch"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
EXPECT_EQ(6, output.node_size());
}
TEST(InjectPrefetchTest, AlreadyParallelMap) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("parallel_map", kParallelMapDataset, {"range"},
{{"f", "__inference_Dataset_map_normalize_8232"},
{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"parallel_map"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsNodeWithOp(kPrefetchDataset, output));
EXPECT_EQ(6, output.node_size());
}
TEST(InjectPrefetchTest, OptionsFollowedByPrefetched) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch", kPrefetchDataset, {"range"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("options", kOptionsDataset, {"prefetch"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", "Identity", {"options"}, {})});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithInjectPrefetch(item, &output, true));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("inject/prefetch_options",
output));
EXPECT_EQ(7, output.node_size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_prefetch.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/inject_prefetch_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6e0a30ac-5324-4c52-88ed-dcda49ff08f4 | cpp | tensorflow/tensorflow | collective_ops | tensorflow/core/ops/collective_ops.cc | third_party/xla/xla/tests/collective_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
REGISTER_OP("CollectiveReduce")
.Input("input: T")
.Output("data: T")
.Attr("T: {bfloat16, float, float16, float64, int32, int64}")
.Attr("group_size: int")
.Attr("group_key: int")
.Attr("instance_key: int")
.Attr("merge_op: {'Min', 'Max', 'Mul', 'Add'}")
.Attr("final_op: {'Id', 'Div'}")
.Attr("subdiv_offsets: list(int)")
.Attr("wait_for: list(int) = []")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CollectiveGather")
.Input("input: T")
.Output("data: T")
.Attr("T: {float, float16, float64, int32, int64}")
.Attr("group_size: int")
.Attr("group_key: int")
.Attr("instance_key: int")
.Attr("shape: shape")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn([](shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused));
shape_inference::ShapeHandle in_subshape;
TF_RETURN_IF_ERROR(c->Subshape(c->input(0), 1, &in_subshape));
auto input_first_dim_value = c->Value(c->Dim(c->input(0), 0));
shape_inference::ShapeHandle output_first_dim_as_shape;
if (input_first_dim_value ==
shape_inference::InferenceContext::kUnknownDim) {
output_first_dim_as_shape =
c->Vector(shape_inference::InferenceContext::kUnknownDim);
} else {
int group_size;
TF_CHECK_OK(c->GetAttr("group_size", &group_size));
std::vector<shape_inference::DimensionHandle> output_first_dim;
output_first_dim.push_back(
c->MakeDim(group_size * input_first_dim_value));
output_first_dim_as_shape = c->MakeShape(output_first_dim);
}
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(
c->Concatenate(output_first_dim_as_shape, in_subshape, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("CollectiveBcastSend")
.Input("input: T")
.Output("data: T")
.Attr("T: {bool, float, float16, float64, int32, int64}")
.Attr("group_size: int")
.Attr("group_key: int")
.Attr("instance_key: int")
.Attr("shape: shape")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::ExplicitShape);
REGISTER_OP("CollectiveBcastRecv")
.Output("data: T")
.Attr("T: {bool, float, float16, float64, int32, int64}")
.Attr("group_size: int")
.Attr("group_key: int")
.Attr("instance_key: int")
.Attr("shape: shape")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::ExplicitShape);
REGISTER_OP("CollectiveAssignGroupV2")
.Input("group_assignment: int32")
.Input("device_index: int32")
.Input("base_key: int32")
.Output("group_size: int32")
.Output("group_key: int32")
.SetDoNotOptimize()
.SetIsDistributedCommunication()
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Scalar());
c->set_output(1, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("CollectiveReduceV2")
.Input("input: T")
.Output("data: T")
.Attr("T: {bfloat16, float, float16, float64, int32, int64}")
.Input("group_size: int32")
.Input("group_key: int32")
.Input("instance_key: int32")
.Input("ordering_token: Nordering_token * resource")
.Attr("merge_op: {'Min', 'Max', 'Mul', 'Add'}")
.Attr("final_op: {'Id', 'Div'}")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.Attr("is_stateless: bool = false")
.Attr("Nordering_token: int >= 0 = 0")
.Attr("max_subdivs_per_device: int = -1")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CollectiveReduceScatterV2")
.Input("input: T")
.Output("data: T")
.Attr("T: {bfloat16, float, float16, float64, int32, int64}")
.Input("group_size: int32")
.Input("group_key: int32")
.Input("instance_key: int32")
.Input("ordering_token: Nordering_token * resource")
.Attr("merge_op: {'Min', 'Max', 'Mul', 'Add'}")
.Attr("final_op: {'Id', 'Div'}")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.Attr("is_stateless: bool = false")
.Attr("Nordering_token: int >= 0 = 0")
.Attr("max_subdivs_per_device: int = -1")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn([](shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused));
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(
c->ReplaceDim(c->input(0), 0, c->UnknownDim(), &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("CollectiveGatherV2")
.Input("input: T")
.Output("data: T")
.Attr("T: {float, float16, float64, int32, int64}")
.Input("group_size: int32")
.Input("group_key: int32")
.Input("instance_key: int32")
.Input("ordering_token: Nordering_token * resource")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.Attr("is_stateless: bool = false")
.Attr("Nordering_token: int >= 0 = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn([](shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused));
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(
c->ReplaceDim(c->input(0), 0, c->UnknownDim(), &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("CollectiveBcastSendV2")
.Input("input: T")
.Output("data: T")
.Attr("T: {bool, float, float16, float64, int32, int64}")
.Input("group_size: int32")
.Input("group_key: int32")
.Input("instance_key: int32")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CollectiveBcastRecvV2")
.Output("data: T")
.Attr("T: {bool, float, float16, float64, int32, int64}")
.Input("group_size: int32")
.Input("group_key: int32")
.Input("instance_key: int32")
.Input("shape: Tshape")
.Attr("Tshape: {int32, int64} = DT_INT32")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn([](shape_inference::InferenceContext* c) {
shape_inference::ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(3, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("CollectiveInitializeCommunicator")
.Input("group_key: int32")
.Input("rank: int32")
.Input("group_size: int32")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.Output("communicator: resource")
.SetDoNotOptimize()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("CollectiveReduceV3")
.Input("input: T")
.Input("communicator: resource")
.Input("group_assignment: int32")
.Output("data: T")
.Attr("T: {bfloat16, float, float16, float64, int32, int64}")
.Attr("reduction: {'Min', 'Max', 'Mul', 'Add'}")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CollectiveAllToAllV2")
.Input("input: T")
.Output("data: T")
.Attr("T: {bfloat16, float, float16, float64, int32, int64}")
.Input("group_size: int32")
.Input("group_key: int32")
.Input("instance_key: int32")
.Input("ordering_token: Nordering_token * resource")
.Attr("communication_hint: string = 'auto'")
.Attr("timeout_seconds: float = 0")
.Attr("is_stateless: bool = false")
.Attr("Nordering_token: int >= 0 = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("CollectiveAllToAllV3")
.Input("input: T")
.Input("communicator: resource")
.Input("group_assignment: int32")
.Output("data: T")
.Attr("T: {bfloat16, float, float16, float64, int32, int64}")
.Attr("timeout_seconds: float = 0")
.SetIsStateful()
.SetIsDistributedCommunication()
.SetShapeFn(shape_inference::UnchangedShape);
} | #include <array>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/computation_placer.h"
#include "xla/service/executable.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
class CollectiveOpsTest : public HloTestBase {
public:
CollectiveOpsTest() {
VLOG(1) << "Running with " << num_devices() << " devices";
}
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes(
"gpu-convert-async-collectives-to-sync");
return debug_options;
}
std::unique_ptr<HloModule> MakeCrsModule(
const Shape& shape, std::vector<std::vector<int64_t>> replica_groups,
const HloModuleConfig& config, std::string op = "add",
std::string datatype = "f32") {
std::string hlo_template = R"(
HloModule test
apply_op {
x = DATATYPE[] parameter(0)
y = DATATYPE[] parameter(1)
ROOT apply_op = DATATYPE[] OP(x, y)
}
ENTRY test_computation {
p = SHAPE parameter(0)
p2 = SHAPE reshape(p)
crs = SHAPE all-reduce(p2), replica_groups=REPLICA_GROUPS, to_apply=apply_op
copy = SHAPE copy(crs)
ROOT out = SHAPE reshape(copy)
}
)";
std::vector<std::string> replica_group_strs;
replica_group_strs.reserve(replica_groups.size());
for (const auto& g : replica_groups) {
replica_group_strs.push_back(
absl::StrFormat("{%s}", absl::StrJoin(g, ",")));
}
std::string shape_str = shape.ToString(false);
if (shape_str == "f32[1]") {
hlo_template = absl::StrReplaceAll(
hlo_template,
{{"DATATYPE[SHAPE] reshape(p)", "DATATYPE[] reshape(p)"},
{"DATATYPE[SHAPE] all-reduce", "DATATYPE[] all-reduce"},
{"DATATYPE[SHAPE] copy", "DATATYPE[] copy"}});
}
std::string parameterized_hlo = absl::StrReplaceAll(
hlo_template,
{{"SHAPE", shape_str},
{"REPLICA_GROUPS",
absl::StrFormat("{%s}", absl::StrJoin(replica_group_strs, ", "))},
{"OP", op},
{"DATATYPE", datatype}});
return ParseAndReturnVerifiedModule(parameterized_hlo, config).value();
}
template <typename LiteralType>
void TestTwoReplicasOneOperand(std::string op, Literal input_value,
Literal expected_value) {
const int kNumReplicas = 2;
std::string dtype = primitive_util::LowercasePrimitiveTypeName(
primitive_util::NativeToPrimitiveType<LiteralType>());
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
auto module = MakeCrsModule(
input_value.shape(),
{}, config,
op, dtype);
TF_ASSERT_OK_AND_ASSIGN(std::vector<Literal> results,
ExecuteReplicated(std::move(module), {&input_value},
kNumReplicas,
true,
true));
for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) {
EXPECT_TRUE(LiteralTestUtil::NearOrEqual(
expected_value, results[replica_idx], ErrorSpec{1e-5, 1e-5}));
}
}
template <typename LiteralType>
void TestAllOpsForReduce() {
auto cast = [&](int value) { return static_cast<LiteralType>(value); };
auto to_literal = [&](absl::Span<const LiteralType> values) {
return LiteralUtil::CreateR1<LiteralType>(values);
};
Literal input_value = to_literal({cast(1), cast(2), cast(3)});
TestTwoReplicasOneOperand<LiteralType>(
"add",
input_value.Clone(),
to_literal({cast(2), cast(4), cast(6)}));
TestTwoReplicasOneOperand<LiteralType>(
"multiply",
input_value.Clone(),
to_literal({cast(1), cast(4), cast(9)}));
TestTwoReplicasOneOperand<LiteralType>(
"maximum",
input_value.Clone(),
to_literal({cast(1), cast(2), cast(3)}));
TestTwoReplicasOneOperand<LiteralType>(
"minimum",
input_value.Clone(),
to_literal({cast(1), cast(2), cast(3)}));
if constexpr (std::numeric_limits<LiteralType>::is_signed) {
input_value = to_literal({cast(-1), cast(-2), cast(-3)});
TestTwoReplicasOneOperand<LiteralType>(
"add",
input_value.Clone(),
to_literal({cast(-2), cast(-4), cast(-6)}));
TestTwoReplicasOneOperand<LiteralType>(
"multiply",
input_value.Clone(),
to_literal({cast(1), cast(4), cast(9)}));
TestTwoReplicasOneOperand<LiteralType>(
"maximum",
input_value.Clone(),
to_literal({cast(-1), cast(-2), cast(-3)}));
TestTwoReplicasOneOperand<LiteralType>(
"minimum",
input_value.Clone(),
to_literal({cast(-1), cast(-2), cast(-3)}));
}
}
};
std::vector<std::vector<int64_t>> PowerSetOfIota(int64_t n) {
std::vector<std::vector<int64_t>> power_set;
for (int64_t i = 1; i < (1 << n); ++i) {
power_set.emplace_back();
for (int64_t j = 0; j < n; ++j) {
if (i & (1 << j)) {
power_set.back().push_back(j);
}
}
}
return power_set;
}
DeviceAssignment MakeDeviceAssn(std::vector<int64_t> devices) {
DeviceAssignment assn(devices.size(),
1);
for (int64_t i = 0; i < devices.size(); ++i) {
assn(i, 0) = devices[i];
}
return assn;
}
template <typename T>
static Eigen::half ToHalf(T value) {
return static_cast<Eigen::half>(value);
}
XLA_TEST_F(CollectiveOpsTest, AllReduce_sum_float32_2D) {
TestTwoReplicasOneOperand<float>(
"add",
LiteralUtil::CreateR2<float>({{1, 2}, {3, 4}}),
LiteralUtil::CreateR2<float>({{2, 4}, {6, 8}}));
}
XLA_TEST_F(CollectiveOpsTest, AllReduceSingleOutput_float32) {
TestTwoReplicasOneOperand<float>(
"add",
LiteralUtil::CreateR1<float>({1}),
LiteralUtil::CreateR1<float>({2}));
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_int8) {
TestAllOpsForReduce<int8_t>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_uint8) {
TestAllOpsForReduce<uint8_t>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_uint32) {
TestAllOpsForReduce<uint32_t>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_int32) {
TestAllOpsForReduce<int32_t>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_int64) {
TestAllOpsForReduce<int64_t>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_uint64) {
TestAllOpsForReduce<uint64_t>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_float32) {
TestAllOpsForReduce<float>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_double) {
TestAllOpsForReduce<double>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_half) {
TestAllOpsForReduce<Eigen::half>();
}
XLA_TEST_F(CollectiveOpsTest, AllReduceTwoReplicasOneOperand_bfloat16) {
TestAllOpsForReduce<bfloat16>();
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllReduce_sum_complex64)) {
TestTwoReplicasOneOperand<complex64>(
"add",
LiteralUtil::CreateR1<complex64>({{1, 2}, {3, 4}}),
LiteralUtil::CreateR1<complex64>({{2, 4}, {6, 8}}));
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllReduce_sum_complex128)) {
TestTwoReplicasOneOperand<complex128>(
"add",
LiteralUtil::CreateR1<complex128>({{1, 2}, {3, 4}}),
LiteralUtil::CreateR1<complex128>({{2, 4}, {6, 8}}));
}
XLA_TEST_F(CollectiveOpsTest, AllReduceAnd_Pred) {
TestTwoReplicasOneOperand<bool>(
"and",
LiteralUtil::CreateR1<bool>({true, false}),
LiteralUtil::CreateR1<bool>({true, false}));
const char* hlo_module = R"(
HloModule test
apply_op {
x = pred[] parameter(0)
y = pred[] parameter(1)
ROOT apply_op = pred[] and(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
c = u32[] constant(0)
p = pred[] compare(id, c), direction=EQ
p2 = pred[1] reshape(p)
crs = pred[1] all-reduce(p2), replica_groups={}, to_apply=apply_op
copy = pred[1] copy(crs)
ROOT out = pred[1] reshape(copy)
}
)";
HloModuleConfig config = GetModuleConfigForTest(2);
auto module = ParseAndReturnVerifiedModule(hlo_module, config).value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
2,
true, true));
for (int replica_idx = 0; replica_idx < 2; replica_idx++) {
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<bool>({false}),
results[replica_idx]));
}
}
XLA_TEST_F(CollectiveOpsTest, AllReduceOr_Pred) {
TestTwoReplicasOneOperand<bool>(
"or",
LiteralUtil::CreateR1<bool>({true, false}),
LiteralUtil::CreateR1<bool>({true, false}));
const char* hlo_module = R"(
HloModule test
apply_op {
x = pred[] parameter(0)
y = pred[] parameter(1)
ROOT apply_op = pred[] or(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
c = u32[] constant(0)
p = pred[] compare(id, c), direction=EQ
p2 = pred[1] reshape(p)
crs = pred[1] all-reduce(p2), replica_groups={}, to_apply=apply_op
copy = pred[1] copy(crs)
ROOT out = pred[1] reshape(copy)
}
)";
HloModuleConfig config = GetModuleConfigForTest(2);
auto module = ParseAndReturnVerifiedModule(hlo_module, config).value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
2,
true, true));
for (int replica_idx = 0; replica_idx < 2; replica_idx++) {
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<bool>({true}),
results[replica_idx]));
}
}
XLA_TEST_F(CollectiveOpsTest, AllReduce_AllCombinations) {
const int64_t kNumElems = 1024;
for (std::vector<int64_t> devices : PowerSetOfIota(num_devices())) {
SCOPED_TRACE(absl::StrFormat("Running on devices {%s}",
absl::StrJoin(devices, ", ")));
DeviceAssignment device_assn = MakeDeviceAssn(devices);
HloModuleConfig config =
GetModuleConfigForTest(devices.size());
config.set_static_device_assignment(device_assn);
std::vector<float> input_vec(kNumElems);
absl::c_iota(input_vec, 0);
auto input_literal = LiteralUtil::CreateR1<float>(input_vec);
auto module = MakeCrsModule(input_literal.shape(),
{}, config);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), {&input_literal},
devices.size(), &device_assn,
true, true));
}
}
XLA_TEST_F(CollectiveOpsTest,
DISABLED_ON_GPU(AllReduce_ManyConcurrentAllReduces)) {
const int64_t kNumElems = 1024;
const int64_t kNumThreads = 200;
const int64_t kRunsPerThread = 10;
std::vector<float> input_vec(kNumElems);
absl::c_iota(input_vec, 0);
auto input_literal = LiteralUtil::CreateR1<float>(input_vec);
HloModuleConfig config = GetModuleConfigForTest(2);
auto executable =
test_runner_
.CreateExecutable(MakeCrsModule(input_literal.shape(),
{}, config),
true)
.value();
std::vector<int64_t> devices = {0, 1};
auto device_assn = MakeDeviceAssn(devices);
HloRunner::ReplicatedExecuteOptions opts;
opts.num_replicas = devices.size();
opts.use_threads = true;
opts.arguments.push_back(&input_literal);
tsl::BlockingCounter done(kNumThreads * kRunsPerThread);
tsl::thread::ThreadPool pool(tsl::Env::Default(), TestName(), kNumThreads);
for (int64_t i = 0; i < kNumThreads * kRunsPerThread; ++i) {
pool.Schedule([&] {
TF_ASSERT_OK(
test_runner_.ExecuteReplicated(executable.get(), opts, &device_assn)
.status());
done.DecrementCount();
});
}
done.Wait();
}
XLA_TEST_F(CollectiveOpsTest, AllReduce_CombinableAllReduces) {
std::string hlo_string = R"(
HloModule test
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY test_computation {
p0 = f32[5] parameter(0)
p1 = f32[5] parameter(1)
crs0 = f32[5] all-reduce(p0), replica_groups={}, to_apply=apply_op
crs1 = f32[5] all-reduce(p1), replica_groups={}, to_apply=apply_op
ROOT out = (f32[5], f32[5]) tuple(f32[5] crs0, f32[5] crs1)
}
)";
static constexpr int kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
std::vector<float> input0_vec = {1., 2., 3., 4., 5.};
auto input0_literal = LiteralUtil::CreateR1<float>(input0_vec);
std::vector<float> input1_vec = {7., 3., 4., 1., 2.};
auto input1_literal = LiteralUtil::CreateR1<float>(input1_vec);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), {&input0_literal, &input1_literal},
kNumReplicas,
true, true));
std::vector<float> expected0_vec = {2., 4., 6., 8., 10.};
auto expected0_literal = LiteralUtil::CreateR1<float>(expected0_vec);
std::vector<float> expected1_vec = {14., 6., 8., 2., 4.};
auto expected1_literal = LiteralUtil::CreateR1<float>(expected1_vec);
for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) {
auto rs = results[replica_idx].DecomposeTuple();
EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected0_literal, rs[0],
ErrorSpec{1e-5, 1e-5}));
EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected1_literal, rs[1],
ErrorSpec{1e-5, 1e-5}));
}
}
XLA_TEST_F(CollectiveOpsTest, AllReduce_ThreeReplicaGroups) {
const int64_t kNumElems = 137;
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
std::vector<float> input_vec(kNumElems);
absl::c_iota(input_vec, 0);
auto input_literal = LiteralUtil::CreateR1<float>(input_vec);
auto module = MakeCrsModule(
input_literal.shape(),
{{0}, {1, 2}, {3}}, config);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), {&input_literal}, 4,
true, true));
ASSERT_EQ(results.size(), 4);
std::vector<float> input_vec_doubled;
input_vec_doubled.reserve(input_vec.size());
for (float n : input_vec) {
input_vec_doubled.push_back(n * 2);
}
auto input_literal_doubled = LiteralUtil::CreateR1<float>(input_vec_doubled);
EXPECT_TRUE(LiteralTestUtil::Equal(input_literal, results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(input_literal_doubled, results[1]));
EXPECT_TRUE(LiteralTestUtil::Equal(input_literal_doubled, results[2]));
EXPECT_TRUE(LiteralTestUtil::Equal(input_literal, results[3]));
}
XLA_TEST_F(CollectiveOpsTest, AllReduce_Degenerate) {
const char* const kModuleStr = R"(
HloModule test
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
ROOT crs = u32[] all-reduce(id), replica_groups={{0},{1},{2},{3}}, to_apply=apply_op
}
)";
static constexpr int kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (int i = 0; i < kNumReplicas; ++i) {
LiteralTestUtil::ExpectR0Equal<uint32_t>(i, results[i]);
}
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllReduce)) {
const absl::string_view kModuleStr = R"(
HloModule test
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, backend_config="{\"is_sync\":false}"
ROOT done = u32[] all-reduce-done(start)
}
)";
HloModuleConfig config =
GetModuleConfigForTest(num_devices());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
num_devices(),
true, false));
ASSERT_EQ(results.size(), num_devices());
uint32_t expected = num_devices() * (num_devices() - 1) / 2;
for (int i = 0; i < num_devices(); ++i) {
LiteralTestUtil::ExpectR0Equal<uint32_t>(expected, results[i]);
}
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllReduceTwoOperands)) {
const absl::string_view kModuleStr = R"(
HloModule test
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[] multiply(id, id)
start = (u32[], u32[]) all-reduce-start(id, id2), to_apply=apply_op, backend_config="{\"is_sync\":false}"
ROOT done = (u32[], u32[]) all-reduce-done(start)
}
)";
HloModuleConfig config =
GetModuleConfigForTest(num_devices());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
num_devices(),
true, false));
ASSERT_EQ(results.size(), num_devices());
uint32_t expected0 = num_devices() * (num_devices() - 1) / 2;
uint32_t expected1 =
num_devices() * (num_devices() - 1) * (2 * num_devices() - 1) / 6;
for (int i = 0; i < num_devices(); ++i) {
std::vector<Literal> replica_results = results[i].DecomposeTuple();
LiteralTestUtil::ExpectR0Equal<uint32_t>(expected0, replica_results[0]);
LiteralTestUtil::ExpectR0Equal<uint32_t>(expected1, replica_results[1]);
}
}
XLA_TEST_F(CollectiveOpsTest, ReplicaId) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
ROOT out = u32[] copy(id)
}
)";
HloModuleConfig config =
GetModuleConfigForTest(num_devices());
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
num_devices(),
true, true));
ASSERT_EQ(results.size(), num_devices());
for (uint32_t i = 0; i < num_devices(); ++i) {
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR0(i), results[i]));
}
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(CollectiveBroadcast_TwoGPUs)) {
const char* const kModuleStr = R"(
HloModule test
collective_broadcast {
p0 = u32[2] parameter(0)
ROOT result = u32[2] collective-broadcast(p0), replica_groups={{1, 0}}
}
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
cb = ((u32[2]), u32[2]) async-start(u32[2] %p), calls=collective_broadcast
ROOT res = u32[2] async-done(cb), calls=collective_broadcast
}
)";
const int64_t kNumReplicas = 2;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[1]));
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(CollectiveBroadcast_Simple)) {
const char* const kModuleStr = R"(
HloModule test
collective_broadcast {
p0 = u32[2] parameter(0)
ROOT result = u32[2] collective-broadcast(p0), replica_groups={{1, 0, 2, 3}}
}
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
cb = ((u32[2]), u32[2]) async-start(u32[2] %p), calls=collective_broadcast
ROOT res = u32[2] async-done(cb), calls=collective_broadcast
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[1]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[2]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[3]));
}
XLA_TEST_F(CollectiveOpsTest, CollectivePermute_TwoGPUs) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
permute = u32[2] collective-permute(p), source_target_pairs={{1,0}, {0,1}}
ROOT copy = u32[2] copy(permute)
}
)";
const int64_t kNumReplicas = 2;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}),
results[1]));
}
XLA_TEST_F(CollectiveOpsTest, CollectivePermute_Simple) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
permute = u32[2] collective-permute(p), source_target_pairs={{1,0}, {0,1}, {2,2}}
ROOT copy = u32[2] copy(permute)
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}),
results[1]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}),
results[2]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({0, 0}),
results[3]));
}
XLA_TEST_F(CollectiveOpsTest, CollectivePermute_Degenerate) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
permute = u32[2] collective-permute(p), source_target_pairs={{0,0}, {1,1}, {2,2}, {3,3}}
ROOT copy = u32[2] copy(permute)
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[1]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}),
results[2]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({13, 13}),
results[3]));
}
XLA_TEST_F(CollectiveOpsTest, CollectivePermute_NotDegenerate) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
permute = u32[2] collective-permute(p), source_target_pairs={{0,0}, {1,1}, {2,2}}
ROOT copy = u32[2] copy(permute)
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[1]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}),
results[2]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({0, 0}),
results[3]));
}
XLA_TEST_F(CollectiveOpsTest, CollectivePermute_Rotate) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
permute = u32[2] collective-permute(p), source_target_pairs={{0,1}, {1,2}, {2,3}, {3,0}}
ROOT copy = u32[2] copy(permute)
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({13, 13}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}),
results[1]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[2]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({12, 12}),
results[3]));
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncCollectivePermute)) {
const absl::string_view kModuleStr = R"(
HloModule test
ENTRY test_computation {
replica = u32[] replica-id()
ten = u32[] constant(10)
sum = u32[] add(replica, ten)
p = u32[2] broadcast(sum), dimensions={}
start = (u32[2], u32[2]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}, backend_config="{\"is_sync\":false}"
ROOT done = u32[2] collective-permute-done(start)
}
)";
const int64_t kNumReplicas = 2;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, false));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({10, 10}),
results[1]));
}
XLA_TEST_F(CollectiveOpsTest, AllToAll_EmptyReplicaGroups) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[2] broadcast(id), dimensions={}
a0 = u32[2] constant({10, 15})
b0 = u32[2] constant({20, 25})
c0 = u32[2] constant({30, 35})
d0 = u32[2] constant({40, 45})
a1 = u32[2] add(id2, a0)
b1 = u32[2] add(id2, b0)
c1 = u32[2] add(id2, c0)
d1 = u32[2] add(id2, d0)
all2all = (u32[2], u32[2], u32[2], u32[2]) all-to-all(a1, b1, c1, d1), replica_groups={}
a_prime = u32[2] get-tuple-element(all2all), index=0
b_prime = u32[2] get-tuple-element(all2all), index=1
c_prime = u32[2] get-tuple-element(all2all), index=2
d_prime = u32[2] get-tuple-element(all2all), index=3
ROOT out = u32[8] concatenate(a_prime, b_prime, c_prime, d_prime), dimensions={0}
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16, 12, 17, 13, 18},
results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({20, 25, 21, 26, 22, 27, 23, 28},
results[1]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({30, 35, 31, 36, 32, 37, 33, 38},
results[2]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({40, 45, 41, 46, 42, 47, 43, 48},
results[3]);
}
XLA_TEST_F(CollectiveOpsTest, AllToAll_OrderedReplicaGroups) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[2] broadcast(id), dimensions={}
a0 = u32[2] constant({10, 15})
b0 = u32[2] constant({20, 25})
c0 = u32[2] constant({30, 35})
d0 = u32[2] constant({40, 45})
a1 = u32[2] add(id2, a0)
b1 = u32[2] add(id2, b0)
c1 = u32[2] add(id2, c0)
d1 = u32[2] add(id2, d0)
all2all = (u32[2], u32[2], u32[2], u32[2]) all-to-all(a1, b1, c1, d1), replica_groups={{3,2,1,0}}
a_prime = u32[2] get-tuple-element(all2all), index=0
b_prime = u32[2] get-tuple-element(all2all), index=1
c_prime = u32[2] get-tuple-element(all2all), index=2
d_prime = u32[2] get-tuple-element(all2all), index=3
ROOT out = u32[8] concatenate(a_prime, b_prime, c_prime, d_prime), dimensions={0}
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint32_t>({43, 48, 42, 47, 41, 46, 40, 45},
results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({33, 38, 32, 37, 31, 36, 30, 35},
results[1]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({23, 28, 22, 27, 21, 26, 20, 25},
results[2]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({13, 18, 12, 17, 11, 16, 10, 15},
results[3]);
}
XLA_TEST_F(CollectiveOpsTest, AllToAll_TwoReplicaGroups) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[2] broadcast(id), dimensions={}
a0 = u32[2] constant({10, 15})
b0 = u32[2] constant({20, 25})
a1 = u32[2] add(id2, a0)
b1 = u32[2] add(id2, b0)
all2all = (u32[2], u32[2]) all-to-all(a1, b1), replica_groups={{2,1},{3,0}}
a_prime = u32[2] get-tuple-element(all2all), index=0
b_prime = u32[2] get-tuple-element(all2all), index=1
ROOT out = u32[4] concatenate(a_prime, b_prime), dimensions={0}
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint32_t>({23, 28, 20, 25}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({22, 27, 21, 26}, results[1]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({12, 17, 11, 16}, results[2]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({13, 18, 10, 15}, results[3]);
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllToAll_SplitDimension)) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[4, 2] broadcast(id), dimensions={}
a0 = u32[4, 2] constant({{10, 15}, {20, 25}, {30, 35}, {40, 45}})
a1 = u32[4, 2] add(id2, a0)
all2all = u32[4, 2] all-to-all(a1), replica_groups={{0,1,2,3}}, dimensions={0}
ROOT out = u32[8] reshape(all2all)
}
)";
const int64_t kNumReplicas = 4;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16, 12, 17, 13, 18},
results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({20, 25, 21, 26, 22, 27, 23, 28},
results[1]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({30, 35, 31, 36, 32, 37, 33, 38},
results[2]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({40, 45, 41, 46, 42, 47, 43, 48},
results[3]);
}
XLA_TEST_F(CollectiveOpsTest, AllGather_Dim0) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[1, 2] broadcast(id), dimensions={}
a0 = u32[1, 2] constant({{10, 15}})
a1 = u32[1, 2] add(id2, a0)
allgather = u32[2, 2] all-gather(a1), dimensions={0}
ROOT out = u32[4] reshape(allgather)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, AllGather_Dim0_UseGlobalDevices) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[1, 2] broadcast(id), dimensions={}
a0 = u32[1, 2] constant({{10, 15}})
a1 = u32[1, 2] add(id2, a0)
allgather = u32[2, 2] all-gather(a1), dimensions={0}, use_global_device_ids=true, channel_id=7, replica_groups={{0, 1}}
ROOT out = u32[4] reshape(allgather)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, AllGather_Dim1) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[2, 1] broadcast(id), dimensions={}
a0 = u32[2, 1] constant({{10}, {15}})
a1 = u32[2, 1] add(id2, a0)
allgather = u32[2, 2] all-gather(a1), dimensions={1}
ROOT out = u32[4] reshape(allgather)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 11, 15, 16}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, AllReduce_TupleAllReduce) {
if (IsMlirLoweringEnabled()) {
GTEST_SKIP();
}
std::string hlo_string = R"(
HloModule test
apply_op {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT apply_op = f32[] add(x, y)
}
ENTRY test_computation {
p0 = f32[5] parameter(0)
p1 = f32[7] parameter(1)
ROOT out = (f32[5], f32[7]) all-reduce(p0, p1), replica_groups={}, to_apply=apply_op
}
)";
static constexpr int kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string, config));
std::vector<float> input0_vec = {1., 2., 3., 4., 5.};
auto input0_literal = LiteralUtil::CreateR1<float>(input0_vec);
std::vector<float> input1_vec = {
7., 3., 4., 1., 2., 3., 4.,
};
auto input1_literal = LiteralUtil::CreateR1<float>(input1_vec);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), {&input0_literal, &input1_literal},
kNumReplicas,
true, true));
std::vector<float> expected0_vec = {2., 4., 6., 8., 10.};
auto expected0_literal = LiteralUtil::CreateR1<float>(expected0_vec);
std::vector<float> expected1_vec = {14., 6., 8., 2., 4., 6., 8.};
auto expected1_literal = LiteralUtil::CreateR1<float>(expected1_vec);
for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) {
auto rs = results[replica_idx].DecomposeTuple();
EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected0_literal, rs[0],
ErrorSpec{1e-5, 1e-5}));
EXPECT_TRUE(LiteralTestUtil::NearOrEqual(expected1_literal, rs[1],
ErrorSpec{1e-5, 1e-5}));
}
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllGatherMixedTypes)) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
p0 = u32[2, 1] broadcast(id), dimensions={}
p1 = f32[2, 1] convert(p0)
allgather = (u32[2, 2], f32[2, 2]) all-gather(p0, p1), dimensions={1}
ag0 = u32[2, 2] get-tuple-element(allgather), index=0
ag1 = f32[2, 2] get-tuple-element(allgather), index=1
r0 = u32[4] reshape(ag0)
r1 = f32[4] reshape(ag1)
ROOT out = (u32[4], f32[4]) tuple(r0, r1)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
for (int replica_idx = 0; replica_idx < kNumReplicas; replica_idx++) {
auto rs = results[replica_idx].DecomposeTuple();
LiteralTestUtil::ExpectR1Equal<uint32_t>({0, 1, 0, 1}, rs[0]);
LiteralTestUtil::ExpectR1Near<float>({0.0, 1.0, 0.0, 1.0}, rs[1],
ErrorSpec{1e-5, 1e-5});
}
}
XLA_TEST_F(CollectiveOpsTest, ReduceScatter) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
ENTRY main {
c0 = u32[8] constant({1, 2, 3, 4, 5, 6, 7, 8})
c1 = u32[8] constant({10, 11, 12, 13, 14, 15, 16, 17})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[8] broadcast(p), dimensions={}
data = u32[8] select(pb, c0, c1)
ROOT ars = u32[4] reduce-scatter(data), replica_groups={},
dimensions={0}, to_apply=add
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
LiteralTestUtil::ExpectR1Equal<uint32_t>({11, 13, 15, 17}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({19, 21, 23, 25}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, ReduceScatterConstrainLayout) {
const char* const kModuleStr = R"(
HloModule reduce-scatter
%sum (a: u32[], b: u32[]) -> u32[] {
%a = u32[] parameter(0)
%b = u32[] parameter(1)
ROOT %add = u32[] add(u32[] a, u32[] b)
}
ENTRY main {
%param = u32[16] parameter(0)
ROOT %rs = u32[8] reduce-scatter(u32[16] %param), replica_groups={},
constrain_layout=true, to_apply=%sum, dimensions={0}
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
std::vector<uint32_t> input_vec = {
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}};
auto input_literal = LiteralUtil::CreateR1<uint32_t>(input_vec);
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), {&input_literal}, kNumReplicas,
true, true));
LiteralTestUtil::ExpectR1Equal<uint32_t>({2, 4, 6, 8, 10, 12, 14, 16},
results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({18, 20, 22, 24, 26, 28, 30, 32},
results[1]);
}
XLA_TEST_F(CollectiveOpsTest, ReduceScatter_Dim1) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
ENTRY main {
c0 = u32[2, 4] constant({{ 1, 2, 3, 4}, { 5, 6, 7, 8}})
c1 = u32[2, 4] constant({{10, 11, 12, 13}, {14, 15, 16, 17}})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[2, 4] broadcast(p), dimensions={}
data = u32[2, 4] select(pb, c0, c1)
ars = u32[2, 2] reduce-scatter(data), replica_groups={},
dimensions={1}, to_apply=add
ROOT r = u32[4] reshape(ars)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
LiteralTestUtil::ExpectR1Equal<uint32_t>({11, 13, 19, 21}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({15, 17, 23, 25}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(ReduceScatterReassociate)) {
const char* const kModuleStr = R"(
HloModule m
sum {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY main {
c0 = u32[8] constant({ 1, 2, 3, 4, 5, 6, 7, 8})
c1 = u32[8] constant({ 11, 12, 13, 14, 15, 16, 17, 18})
c2 = u32[8] constant({ 2, 3, 4, 5, 6, 7, 8, 9})
c3 = u32[8] constant({ 12, 13, 14, 15, 16, 17, 18, 19})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[8] broadcast(p), dimensions={}
data0 = u32[8] select(pb, c0, c1)
data1 = u32[8] select(pb, c2, c3)
rs0 = u32[4] reduce-scatter(data0), replica_groups={}, dimensions={0}, to_apply=sum
rs1 = u32[4] reduce-scatter(data1), replica_groups={}, dimensions={0}, to_apply=sum
ROOT add = u32[4] add(rs0, rs1)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
const ErrorSpec es{1e-5, 1e-5};
LiteralTestUtil::ExpectR1Equal<uint32_t>({26, 30, 34, 38}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({42, 46, 50, 54}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest,
DISABLED_ON_CPU(ReduceScatterReassociate_ReduceScatterCreator)) {
const char* const kModuleStr = R"(
HloModule m
sum {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY main {
c0 = u32[8] constant({ 1, 2, 3, 4, 5, 6, 7, 8})
c1 = u32[8] constant({ 11, 12, 13, 14, 15, 16, 17, 18})
c2 = u32[8] constant({ 2, 3, 4, 5, 6, 7, 8, 9})
c3 = u32[8] constant({ 12, 13, 14, 15, 16, 17, 18, 19})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[8] broadcast(p), dimensions={}
data0 = u32[8] select(pb, c0, c1)
data1 = u32[8] select(pb, c2, c3)
ar0 = u32[8] all-reduce(data0), replica_groups={}, to_apply=sum
ar1 = u32[8] all-reduce(data1), replica_groups={}, to_apply=sum
rid = u32[] replica-id()
slice_size = u32[] constant(4)
offset = u32[] multiply(rid, slice_size)
ds0 = u32[4] dynamic-slice(ar0, offset), dynamic_slice_sizes={4}
ds1 = u32[4] dynamic-slice(ar1, offset), dynamic_slice_sizes={4}
ROOT add = u32[4] add(ds0, ds1)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
const ErrorSpec es{1e-5, 1e-5};
LiteralTestUtil::ExpectR1Equal<uint32_t>({26, 30, 34, 38}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({42, 46, 50, 54}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AllReduceReassociate)) {
const char* const kModuleStr = R"(
HloModule m
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
ENTRY main {
c0 = f32[8] constant({ 1, 2, 3, 4, 5, 6, 7, 8})
c1 = f32[8] constant({ 11, 12, 13, 14, 15, 16, 17, 18})
c2 = f32[8] constant({ 2, 3, 4, 5, 6, 7, 8, 9})
c3 = f32[8] constant({ 12, 13, 14, 15, 16, 17, 18, 19})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[8] broadcast(p), dimensions={}
data0 = f32[8] select(pb, c0, c1)
data1 = f32[8] select(pb, c2, c3)
ar0 = f32[8] all-reduce(data0), replica_groups={}, to_apply=sum
ar1 = f32[8] all-reduce(data1), replica_groups={}, to_apply=sum
ROOT add = f32[8] add(ar0, ar1)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
const ErrorSpec es{1e-5, 1e-5};
EXPECT_TRUE(LiteralTestUtil::NearOrEqual(results[0], results[1], es));
LiteralTestUtil::ExpectR1Near<float>(
{26.0, 30.0, 34.0, 38.0, 42.0, 46.0, 50.0, 54.0}, results[0], es);
}
XLA_TEST_F(CollectiveOpsTest,
DISABLED_ON_CPU(AllGatherBroadcastReorder_NonUniform)) {
const char* const kModuleStr = R"(
HloModule m
ENTRY main {
c0 = u32[2, 3] constant({{ 1, 2, 3}, { 4, 5, 6}})
c1 = u32[2, 3] constant({{10, 11, 12}, {13, 14, 15}})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[2, 3] broadcast(p), dimensions={}
data = u32[2, 3] select(pb, c0, c1)
bc = u32[2, 4, 3] broadcast(data), dimensions={0, 2}
ROOT ag = u32[2, 4, 6] all-gather(bc), dimensions={2}, replica_groups={{0, 1}}
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
EXPECT_TRUE(LiteralTestUtil::Equal(results[0], results[1]));
LiteralTestUtil::ExpectR3Equal<uint32_t>({{{1, 2, 3, 10, 11, 12},
{1, 2, 3, 10, 11, 12},
{1, 2, 3, 10, 11, 12},
{1, 2, 3, 10, 11, 12}},
{{4, 5, 6, 13, 14, 15},
{4, 5, 6, 13, 14, 15},
{4, 5, 6, 13, 14, 15},
{4, 5, 6, 13, 14, 15}}},
results[0]);
}
XLA_TEST_F(CollectiveOpsTest,
DISABLED_ON_CPU(AllGatherBroadcastReorder_Uniform)) {
const char* const kModuleStr = R"(
HloModule m
ENTRY main {
c0 = u32[2, 3] constant({{ 1, 2, 3}, { 4, 5, 6}})
c1 = u32[2, 3] constant({{10, 11, 12}, {13, 14, 15}})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[2, 3] broadcast(p), dimensions={}
data = u32[2, 3] select(pb, c0, c1)
bc = u32[2, 4, 3] broadcast(data), dimensions={0, 2}
ROOT ag = u32[2, 8, 3] all-gather(bc), dimensions={1}, replica_groups={{0, 1}}
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
EXPECT_TRUE(LiteralTestUtil::Equal(results[0], results[1]));
LiteralTestUtil::ExpectR3Equal<uint32_t>({{{1, 2, 3},
{1, 2, 3},
{1, 2, 3},
{1, 2, 3},
{10, 11, 12},
{10, 11, 12},
{10, 11, 12},
{10, 11, 12}},
{{4, 5, 6},
{4, 5, 6},
{4, 5, 6},
{4, 5, 6},
{13, 14, 15},
{13, 14, 15},
{13, 14, 15},
{13, 14, 15}}},
results[0]);
}
XLA_TEST_F(CollectiveOpsTest, AllGather_16BitInt) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[1, 2] broadcast(id), dimensions={}
a0 = u16[1, 2] constant({{10, 15}})
a1 = u16[1, 2] add(id2, a0)
allgather = u16[2, 2] all-gather(a1), dimensions={0}
ROOT out = u16[4] reshape(allgather)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<uint16_t>({10, 15, 11, 16}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, AllToAll_16BitInt) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT a2a = u16[2] all-to-all(a1), dimensions={0}
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint16_t>({10, 11}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint16_t>({15, 16}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, CollectivePermute_16BitInt) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[2] collective-permute(a1), source_target_pairs={{0,1}, {1,0}}
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint16_t>({11, 16}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint16_t>({10, 15}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, AllReduce_16BitInt) {
const char* const kModuleStr = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[2] all-reduce(a1), replica_groups={}, to_apply=sum
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<uint16_t>({21, 31}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, ReduceScatter_16BitInt) {
const char* const kModuleStr = R"(
HloModule test
sum {
a = u16[] parameter(0)
b = u16[] parameter(1)
ROOT add.2 = u16[] add(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
id = u16[] convert(id32)
id2 = u16[2] broadcast(id), dimensions={}
a0 = u16[2] constant({10, 15})
a1 = u16[2] add(id2, a0)
ROOT cp = u16[1]reduce-scatter(a1), dimensions={0}, replica_groups={}, to_apply=sum
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint16_t>({21}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint16_t>({31}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, AllReduceBFloat16Min) {
const char* const kModuleStr = R"(
HloModule test
min {
a = bf16[] parameter(0)
b = bf16[] parameter(1)
ROOT min.2 = bf16[] minimum(a, b)
}
ENTRY test_computation {
id32 = u32[] replica-id()
one = u32[] constant(1)
id32_1 = u32[] add(id32, one)
id = bf16[] convert(id32_1)
id2 = bf16[2] broadcast(id), dimensions={}
ROOT cp = bf16[2] all-reduce(id2), replica_groups={}, to_apply=min
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
const bfloat16 one = static_cast<bfloat16>(1.0f);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<bfloat16>({one, one}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllGather)) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[1, 2] broadcast(id), dimensions={}
a0 = u32[1, 2] constant({{10, 15}})
a1 = u32[1, 2] add(id2, a0)
ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, backend_config="{\"is_sync\":false}"
allgather = u32[2,2] all-gather-done(ags)
ROOT out = u32[4] reshape(allgather)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, false));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 15, 11, 16}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncReduceScatter)) {
const char* const kModuleStr = R"(
HloModule test
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
reduce_scatter {
p0 = u32[8] parameter(0)
ROOT result = u32[4] reduce-scatter(p0), replica_groups={},
dimensions={0}, to_apply=add
}
ENTRY main {
c0 = u32[8] constant({1, 2, 3, 4, 5, 6, 7, 8})
c1 = u32[8] constant({10, 11, 12, 13, 14, 15, 16, 17})
zero = u32[] constant(0)
id = u32[] replica-id()
p = pred[] compare(id, zero), direction=EQ
pb = pred[8] broadcast(p), dimensions={}
data = u32[8] select(pb, c0, c1)
rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter, backend_config="{\"is_sync\":false}"
ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, false));
LiteralTestUtil::ExpectR1Equal<uint32_t>({11, 13, 15, 17}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({19, 21, 23, 25}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(AsyncAllToAll)) {
const char* const kModuleStr = R"(
HloModule test
all_to_all {
p0 = u32[2] parameter(0)
ROOT result = u32[2] all-to-all(p0), dimensions={0}
}
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[2] broadcast(id), dimensions={}
a0 = u32[2] constant({10, 15})
a1 = u32[2] add(id2, a0)
a2a-start = ((u32[2]), u32[2]) async-start(u32[2] %a1), calls=all_to_all, backend_config="{\"is_sync\":false}"
ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, false));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<uint32_t>({10, 11}, results[0]);
LiteralTestUtil::ExpectR1Equal<uint32_t>({15, 16}, results[1]);
}
XLA_TEST_F(CollectiveOpsTest, AllGather_Dim1UnitDimensions) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
id = u32[] replica-id()
id2 = u32[1, 1, 2, 1, 2] broadcast(id), dimensions={}
offset = u32[4] iota(), iota_dimension=0
offset_reshape = u32[1, 1, 2, 1, 2] reshape(offset)
agi = u32[1, 1, 2, 1, 2] add(id2, offset_reshape)
allgather = u32[1, 1, 4, 1, 2] all-gather(agi), dimensions={2}
ROOT out = u32[8] reshape(allgather)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<uint32_t>({0, 1, 2, 3, 1, 2, 3, 4}, result);
}
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_Simple)) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
%replica = u32[] replica-id()
%ten = u32[] constant(10)
%sum = u32[] add(%replica, %ten)
%p = u32[2] broadcast(%sum), dimensions={}
%after-all = token[] after-all()
%recv = (u32[2], u32[], token[]) recv(%after-all), channel_id=0, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}"
}
%send = (u32[2], u32[], token[]) send(%p, %after-all), channel_id=0, control-predecessors={%recv}, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}"
}
%recv-done = (u32[2], token[]) recv-done(%recv), channel_id=0
%recv-data = u32[2] get-tuple-element(%recv-done), index=0
%send-done = token[] send-done(%send), channel_id=0, control-predecessors={%recv}
ROOT copy = u32[2] copy(%recv-data)
}
)";
const int64_t kNumReplicas = 2;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({11, 11}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({0, 0}),
results[1]));
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_TwoConcurrentChains)) {
const char* const kModuleStr = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
replica = u32[] replica-id()
a = u32[] add(c1, replica)
send-data = u32[2] broadcast(a), dimensions={}
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="1"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0),
channel_id=0, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_pipeline="1"
}
after-all.1 = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}}"
}
send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1),
channel_id=0, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}}"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0
compare0 = pred[] compare(replica, c0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
send-done.0 = token[] send-done(send.0), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="1"
}
send-done.1 = token[] send-done(send.1), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
c1b = u32[2] broadcast(c1), dimensions={}
ROOT result = u32[2] add(c1b, recv-data)
})";
const int64_t kNumReplicas = 2;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, false));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({3, 3}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({2, 2}),
results[1]));
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_ValidationAttr1)) {
const char* const kModuleStr = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
replica = u32[] replica-id()
a = u32[] add(c1, replica)
send-data = u32[2] broadcast(a), dimensions={}
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_validation="invalid"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0),
channel_id=0, frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_validation="invalid"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
send-done.0 = token[] send-done(send.0), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}}"
}
send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1),
channel_id=0, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}}"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0
compare0 = pred[] compare(replica, c0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
send-done.1 = token[] send-done(send.1), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
c1b = u32[2] broadcast(c1), dimensions={}
ROOT result = u32[2] add(c1b, recv-data)
})";
const int64_t kNumReplicas = 2;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, false));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({2, 2}),
results[1]));
}
XLA_TEST_F(CollectiveOpsTest, DISABLED_ON_CPU(SendRecv_ValidationAttr2)) {
const char* const kModuleStr = R"(
HloModule test, is_scheduled=true
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(%param), index=0
send-data = get-tuple-element(%param), index=1
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_validation="{{0,1}}"
}
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0),
channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{1,0}}",
_xla_send_recv_validation="{{0,1}}"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data.0 = u32[2] get-tuple-element(recv-done.0), index=0
send-done.0 = token[] send-done(send.0), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
after-all.1 = token[] after-all()
recv.1 = (u32[2], u32[], token[]) recv(after-all.1), channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}}"
}
send.1 = (u32[2], u32[], token[]) send(send-data, after-all.1),
channel_id=0,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}}"
}
recv-done.1 = (u32[2], token[]) recv-done(recv.1), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data.1 = u32[2] get-tuple-element(recv-done.1), index=0
replica = u32[] replica-id()
constant0 = u32[] constant(0)
compare0 = pred[] compare(replica, constant0), direction=EQ
compare = pred[2] broadcast(compare0), dimensions={}
recv-data = u32[2] select(compare, recv-data.0, recv-data.1)
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
send-done.1 = token[] send-done(send.1), channel_id=0,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
r = u32[] replica-id()
init = u32[2] broadcast(r), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
const int64_t kNumReplicas = 2;
SKIP_TEST_IF_NUM_DEVICES_LESS_THAN(kNumReplicas);
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr, config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, false));
ASSERT_EQ(results.size(), kNumReplicas);
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({2, 2}),
results[0]));
EXPECT_TRUE(LiteralTestUtil::Equal(LiteralUtil::CreateR1<uint32_t>({3, 3}),
results[1]));
}
class Fp8CollectiveOpsTest : public CollectiveOpsTest {
public:
Fp8CollectiveOpsTest() {
replacements_[kF8E4M3DatatypePlaceholder] =
IsCuda() ? "f8e4m3fn" : "f8e4m3fnuz";
replacements_[kF8E5M2DatatypePlaceholder] =
IsCuda() ? "f8e5m2" : "f8e5m2fnuz";
}
protected:
bool IsCuda() {
return std::holds_alternative<se::CudaComputeCapability>(Capability());
}
const se::GpuComputeCapability& Capability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
absl::flat_hash_map<absl::string_view, absl::string_view> replacements_;
private:
static constexpr const char* kF8E4M3DatatypePlaceholder{"<<F8E4M3>>"};
static constexpr const char* kF8E5M2DatatypePlaceholder{"<<F8E5M2>>"};
};
XLA_TEST_F(Fp8CollectiveOpsTest, DISABLED_ON_CPU(AllGather_8BitFloat)) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
a0 = <<F8E4M3>>[1,2] constant({{1,2}})
allgather = <<F8E4M3>>[2, 2] all-gather(a0), dimensions={0}
p = <<F8E4M3>>[4] reshape(allgather)
ROOT out = f32[4] convert(p)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(
absl::StrReplaceAll(kModuleStr, replacements_), config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
for (const Literal& result : results) {
LiteralTestUtil::ExpectR1Equal<float>({1, 2, 1, 2}, result);
}
}
XLA_TEST_F(Fp8CollectiveOpsTest, DISABLED_ON_CPU(AllToAll_8BitFloat)) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
a0 = <<F8E4M3>>[2] constant({1,2})
a2a = <<F8E4M3>>[2] all-to-all(a0), dimensions={0}
ROOT out = f32[2] convert(a2a)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(
absl::StrReplaceAll(kModuleStr, replacements_), config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<float>({1, 1}, results[0]);
LiteralTestUtil::ExpectR1Equal<float>({2, 2}, results[1]);
}
XLA_TEST_F(Fp8CollectiveOpsTest, DISABLED_ON_CPU(CollectivePermute_8BitFloat)) {
const char* const kModuleStr = R"(
HloModule test
ENTRY test_computation {
a0 = <<F8E5M2>>[2] constant({1,2})
a1 = <<F8E5M2>>[2] collective-permute(a0), source_target_pairs={{0,1}, {1,0}}
ROOT out = f32[2] convert(a1)
}
)";
const int64_t kNumReplicas = 2;
HloModuleConfig config =
GetModuleConfigForTest(kNumReplicas);
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(
absl::StrReplaceAll(kModuleStr, replacements_), config));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<Literal> results,
ExecuteReplicated(std::move(module), absl::Span<Literal* const>{},
kNumReplicas,
true, true));
ASSERT_EQ(results.size(), kNumReplicas);
LiteralTestUtil::ExpectR1Equal<float>({1, 2}, results[0]);
LiteralTestUtil::ExpectR1Equal<float>({1, 2}, results[1]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/collective_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/collective_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e50f42a-d4e9-4044-b05a-302eaf7a972f | cpp | tensorflow/tensorflow | codegen | tensorflow/compiler/aot/codegen.cc | tensorflow/compiler/aot/codegen_test.cc | #include "tensorflow/compiler/aot/codegen.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/aot/embedded_protocol_buffers.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/cpu_function_runtime.h"
#include "xla/service/compiler.h"
#include "xla/service/cpu/buffer_info_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace tfcompile {
namespace {
using BufferInfo = xla::cpu_function_runtime::BufferInfo;
bool IsAlpha(char c) {
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
}
bool IsAlphaNum(char c) { return IsAlpha(c) || (c >= '0' && c <= '9'); }
Status XLATypeToCpp(xla::PrimitiveType type, string* str) {
switch (type) {
case xla::PRED:
*str = "bool";
break;
case xla::S8:
*str = "tensorflow::int8";
break;
case xla::S16:
*str = "tensorflow::int16";
break;
case xla::S32:
*str = "tensorflow::int32";
break;
case xla::S64:
*str = "int64_t";
break;
case xla::U8:
*str = "tensorflow::uint8";
break;
case xla::U16:
*str = "tensorflow::uint16";
break;
case xla::U32:
*str = "tensorflow::uint32";
break;
case xla::U64:
*str = "tensorflow::uint64";
break;
case xla::F32:
*str = "float";
break;
case xla::F64:
*str = "double";
break;
default:
return errors::Unimplemented("XLA type ", xla::PrimitiveType_Name(type),
" has no equivalent in C++");
}
return absl::OkStatus();
}
size_t TotalBufferBytes(const std::vector<BufferInfo>& buffer_infos) {
return std::accumulate(buffer_infos.begin(), buffer_infos.end(), size_t{0},
[](size_t size, const BufferInfo& buffer_info) {
return size + buffer_info.size();
});
}
std::vector<BufferInfo> ExtractEntryParamBufferInfos(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<BufferInfo> result;
std::copy_if(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(result), [](const BufferInfo& buffer_info) {
return buffer_info.is_entry_parameter();
});
return result;
}
std::vector<BufferInfo> ExtractTempBufferInfos(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<BufferInfo> result;
std::copy_if(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(result), [](const BufferInfo& buffer_info) {
return buffer_info.is_temp_buffer();
});
return result;
}
Status AddRewritesForShape(int i, const xla::Shape& shape,
std::vector<std::pair<string, string>>* rewrites) {
string type;
TF_RETURN_IF_ERROR(XLATypeToCpp(shape.element_type(), &type));
std::vector<string> dim_vars;
string dim_sizes, indices;
int count = 1;
if (shape.rank() == 0 ||
(shape.dimensions_size() == 1 && shape.dimensions(0) == 1)) {
dim_sizes = "[1]";
indices = "[0]";
} else {
for (int dim = 0; dim < shape.dimensions_size(); ++dim) {
dim_vars.push_back(absl::StrCat("size_t dim", dim));
dim_sizes += absl::StrCat("[", shape.dimensions(dim), "]");
indices += absl::StrCat("[dim", dim, "]");
count *= shape.dimensions(dim);
}
}
rewrites->push_back({"{{I}}", absl::StrCat(i)});
rewrites->push_back({"{{TYPE}}", type});
rewrites->push_back({"{{DIM_VARS}}", absl::StrJoin(dim_vars, ", ")});
rewrites->push_back({"{{DIM_SIZES}}", dim_sizes});
rewrites->push_back({"{{INDICES}}", indices});
rewrites->push_back({"{{COUNT}}", absl::StrCat(count)});
return absl::OkStatus();
}
string RewriteWithName(const string& name, string code,
const std::vector<std::pair<string, string>>& rewrites) {
absl::StrReplaceAll(rewrites, &code);
absl::StrReplaceAll({{"{{NAME}}", name}}, &code);
return code;
}
Status GenArgMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps,
const CompileResult& compile_result, string* methods) {
const int num_args = ps.parameters_size();
if (config.feed_size() + config.variable_size() < num_args) {
return errors::InvalidArgument(
"mismatch between feed_size(", config.feed_size(), ")+variable_size(",
config.variable_size(), ") and num_args(", num_args, ")");
}
for (int i = 0; i < config.feed_size(); ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(
AddRewritesForShape(i, xla::Shape(ps.parameters(i)), &rewrites));
const string code = R"(
void set_arg{{NAME}}_data(const void* data) {
set_arg_data({{I}}, data);
}
{{TYPE}}* arg{{NAME}}_data() {
return static_cast<{{TYPE}}*>(arg_data({{I}}));
}
{{TYPE}}& arg{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
const {{TYPE}}* arg{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(arg_data({{I}}));
}
const {{TYPE}}& arg{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
int arg{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int arg{{NAME}}_count() const {
return {{COUNT}};
}
)";
*methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.feed(i).name().empty()) {
*methods += RewriteWithName("_" + config.feed(i).name(), code, rewrites);
}
}
return absl::OkStatus();
}
Status GenResultMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps, string* methods) {
if (ps.result().element_type() != xla::TUPLE) {
return errors::Internal("codegen requires the XLA result to be a tuple");
}
size_t num_results = ps.result().tuple_shapes_size();
int readonly_variables = absl::c_count_if(
config.variable(),
[](const tf2xla::Variable& var) { return var.readonly(); });
const int actual_num_results =
config.fetch_size() + config.variable_size() - readonly_variables;
if (actual_num_results != num_results) {
return errors::InvalidArgument("mismatch between fetch_size(",
config.fetch_size(), ")+variable_size(",
config.variable_size(), ") and tuple_size(",
ps.result().tuple_shapes_size(), ")");
}
for (int i = 0; i < config.fetch_size(); ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(AddRewritesForShape(
i, xla::Shape(ps.result().tuple_shapes(i)), &rewrites));
string code = R"(
{{TYPE}}* result{{NAME}}_data() {
return static_cast<{{TYPE}}*>(result_data({{I}}));
}
{{TYPE}}& result{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{TYPE}}(*){{DIM_SIZES}}>(
result_data({{I}}))){{INDICES}};
}
const {{TYPE}}* result{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(result_data({{I}}));
}
const {{TYPE}}& result{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
result_data({{I}}))){{INDICES}};
}
int result{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int result{{NAME}}_count() const {
return {{COUNT}};
}
)";
*methods += RewriteWithName(absl::StrCat(i), code, rewrites);
if (!config.fetch(i).name().empty()) {
*methods += RewriteWithName("_" + config.fetch(i).name(), code, rewrites);
}
}
return absl::OkStatus();
}
Status GenVariableMethods(const tf2xla::Config& config,
const xla::ProgramShapeProto& ps, string* methods) {
const int num_args = ps.parameters_size();
for (int i = config.feed_size(); i < num_args; ++i) {
std::vector<std::pair<string, string>> rewrites;
TF_RETURN_IF_ERROR(
AddRewritesForShape(i, xla::Shape(ps.parameters(i)), &rewrites));
const string code = R"(
void set_var_{{NAME}}_data({{MAYBE_CONST}}{{TYPE}}* data) {
set_arg_data({{I}}, data);
}
{{MAYBE_CONST}}{{TYPE}}* var_{{NAME}}_data() {
return static_cast<{{MAYBE_CONST}}{{TYPE}}*>(arg_data({{I}}));
}
{{MAYBE_CONST}}{{TYPE}}& var_{{NAME}}({{DIM_VARS}}) {
return (*static_cast<{{MAYBE_CONST}}{{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
const {{TYPE}}* var_{{NAME}}_data() const {
return static_cast<const {{TYPE}}*>(arg_data({{I}}));
}
const {{TYPE}}& var_{{NAME}}({{DIM_VARS}}) const {
return (*static_cast<const {{TYPE}}(*){{DIM_SIZES}}>(
arg_data({{I}}))){{INDICES}};
}
int var_{{NAME}}_size() const {
return {{COUNT}} * sizeof({{TYPE}});
}
int var_{{NAME}}_count() const {
return {{COUNT}};
}
)";
const tf2xla::Variable& var = config.variable(i - config.feed_size());
rewrites.emplace_back("{{MAYBE_CONST}}", var.readonly() ? "const " : "");
*methods += RewriteWithName(
var.name().empty() ? var.node_name() : var.name(), code, rewrites);
}
return absl::OkStatus();
}
Status GenArgShapeInfos(const xla::ProgramShapeProto& ps, string* infos) {
for (int i = 0; i < ps.parameters_size(); ++i) {
const xla::ShapeProto& shape = ps.parameters(i);
if (shape.element_type() == xla::TUPLE) {
return absl::InternalError(
absl::StrCat("parameter ", i,
": codegen requires XLA parameters to "
"be non-tuples."));
}
*infos += absl::Substitute(R"( static constexpr int32_t kArg$0Shapes[] = {
$1
};
)",
i,
shape.dimensions_size() > 0
? absl::StrJoin(shape.dimensions(), ", ")
: "-1");
}
*infos += R"( static const ShapeInfo* ArgShapeInfos() {
static constexpr ShapeInfo kArgShapeInfoTable[kNumArgs] = {
)";
for (int i = 0; i < ps.parameters_size(); ++i) {
const xla::ShapeProto& shape = ps.parameters(i);
*infos +=
absl::Substitute("{ kArg$0Shapes, $1 },\n", i, shape.dimensions_size());
}
*infos += R"( };
return kArgShapeInfoTable;
})";
return absl::OkStatus();
}
Status GenResultShapeInfos(const xla::ProgramShapeProto& ps, string* infos) {
if (ps.result().element_type() != xla::TUPLE) {
return absl::InternalError("codegen requires the XLA result to be a tuple");
}
for (int i = 0; i < ps.result().tuple_shapes_size(); ++i) {
const xla::ShapeProto& shape = ps.result().tuple_shapes(i);
*infos += absl::Substitute(
R"( static constexpr int32_t kResult$0Shapes[] = {
$1
};
)",
i,
shape.dimensions_size() > 0 ? absl::StrJoin(shape.dimensions(), ", ")
: "-1");
}
*infos += R"( static const ShapeInfo* ResultShapeInfos() {
static constexpr ShapeInfo kResultShapeInfoTable[kNumResults] = {
)";
for (int i = 0; i < ps.result().tuple_shapes_size(); ++i) {
const xla::ShapeProto& shape = ps.result().tuple_shapes(i);
*infos += absl::Substitute("{ kResult$0Shapes, $1 },\n", i,
shape.dimensions_size());
}
*infos += R"( };
return kResultShapeInfoTable;
})";
return absl::OkStatus();
}
template <typename T>
string GenNameToIndexCode(const T& entries, bool generate) {
if (!generate) {
return "{\n return nullptr;\n }";
}
int end = entries.size();
for (int i = entries.size() - 1; i >= 0; --i) {
if (!entries[i].name().empty()) {
break;
}
end = i;
}
string code = "{\n static const char* kNames[] = {";
for (int i = 0; i < end; ++i) {
if (i > 0) {
code += ", ";
}
code += "\"";
code += entries[i].name();
code += "\"";
}
if (end > 0) {
code += ", ";
}
code += "nullptr};\n return kNames;\n }";
return code;
}
Status ValidateFeedFetchCppNames(const tf2xla::Config& config) {
for (const tf2xla::Feed& feed : config.feed()) {
if (!feed.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(feed.name(), "feed name"));
}
}
for (const tf2xla::Fetch& fetch : config.fetch()) {
if (!fetch.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(fetch.name(), "fetch name"));
}
}
for (const tf2xla::Variable& variable : config.variable()) {
if (!variable.name().empty()) {
TF_RETURN_IF_ERROR(ValidateCppIdent(variable.name(), "variable name"));
} else {
TF_RETURN_IF_ERROR(
ValidateCppIdent(variable.node_name(), "variable name"));
}
}
return absl::OkStatus();
}
std::vector<string> BufferInfosToCppExpression(
const std::vector<BufferInfo>& buffer_infos) {
std::vector<string> buffer_infos_as_strings;
std::transform(buffer_infos.begin(), buffer_infos.end(),
std::back_inserter(buffer_infos_as_strings),
[](const BufferInfo& buffer_info) {
xla::cpu_function_runtime::EncodedBufferInfo encoded =
buffer_info.Encode();
auto param_to_str = [](uint32_t param) -> std::string {
return param == ~0U ? "~0U" : absl::StrCat(param, "U");
};
return absl::StrCat(
"::xla::cpu_function_runtime::BufferInfo("
"::xla::cpu_function_runtime::EncodedBufferInfo{",
encoded.packed_kind_and_size, "ULL, ",
param_to_str(encoded.entry_param_number), ", ",
param_to_str(encoded.result_param_number), "})");
});
return buffer_infos_as_strings;
}
Status CheckEqual(size_t a, size_t b, absl::string_view error_msg) {
if (a != b) {
return absl::InternalError(
absl::StrCat(error_msg, ". Expected ", a, ", got ", b, "."));
}
return absl::OkStatus();
}
}
Status GenerateHeader(const CodegenOpts& opts, const tf2xla::Config& config,
const CompileResult& compile_result,
const MetadataResult& metadata_result, string* header) {
TF_RETURN_IF_ERROR(ValidateConfig(config));
TF_RETURN_IF_ERROR(ValidateFeedFetchCppNames(config));
const int64_t result_index = compile_result.aot->result_buffer_index();
const std::vector<BufferInfo>& buffer_infos =
compile_result.aot->buffer_infos();
const std::vector<int32> arg_index_table =
::xla::cpu::CreateArgIndexTableFromBufferInfos(buffer_infos);
const std::vector<int32> result_index_table =
::xla::cpu::CreateResultIndexTableFromBufferInfos(buffer_infos);
std::vector<string> buffer_infos_as_strings =
BufferInfosToCppExpression(buffer_infos);
const int64_t buffer_infos_size = buffer_infos.size();
if (result_index < 0 || result_index >= buffer_infos_size) {
return errors::InvalidArgument("result index: ", result_index,
" is outside the range of temp sizes: [0,",
buffer_infos.size(), ")");
}
std::vector<BufferInfo> buffer_infos_for_args =
ExtractEntryParamBufferInfos(buffer_infos);
std::vector<BufferInfo> buffer_infos_for_temps =
ExtractTempBufferInfos(buffer_infos);
const xla::ProgramShapeProto& ps = compile_result.program_shape;
string methods_arg, methods_result, methods_variable;
TF_RETURN_IF_ERROR(GenArgMethods(config, ps, compile_result, &methods_arg));
TF_RETURN_IF_ERROR(GenResultMethods(config, ps, &methods_result));
TF_RETURN_IF_ERROR(GenVariableMethods(config, ps, &methods_variable));
string arg_shape_infos, result_shape_infos;
TF_RETURN_IF_ERROR(GenArgShapeInfos(ps, &arg_shape_infos));
TF_RETURN_IF_ERROR(
CheckEqual(ps.parameters_size(), arg_index_table.size(),
"Arg number mismatch, proto vs. arg_index_table"));
TF_RETURN_IF_ERROR(GenResultShapeInfos(ps, &result_shape_infos));
TF_RETURN_IF_ERROR(
CheckEqual(ps.result().tuple_shapes_size(), result_index_table.size(),
"Result number mismatch, proto vs. result_index_table"));
const size_t arg_bytes_aligned =
xla::cpu_function_runtime::AlignedBufferBytes(
buffer_infos_for_args.data(), buffer_infos_for_args.size(),
true);
const size_t arg_bytes_total = TotalBufferBytes(buffer_infos_for_args);
const size_t temp_bytes_aligned =
xla::cpu_function_runtime::AlignedBufferBytes(
buffer_infos_for_temps.data(), buffer_infos_for_temps.size(),
true);
const size_t temp_bytes_total = TotalBufferBytes(buffer_infos_for_temps);
string ns_start;
for (const string& n : opts.namespaces) {
ns_start += absl::StrCat("namespace ", n, " {\n");
}
ns_start += "\n";
string ns_end("\n");
for (int i = opts.namespaces.size() - 1; i >= 0; --i) {
const string& n = opts.namespaces[i];
ns_end += absl::StrCat("}
}
const string arg_names_code =
GenNameToIndexCode(config.feed(), opts.gen_name_to_index);
auto variable_copy = config.variable();
for (auto& var : variable_copy) {
if (var.name().empty()) {
var.set_name(var.node_name());
}
}
const string variable_names_code =
GenNameToIndexCode(variable_copy, opts.gen_name_to_index);
const string result_names_code =
GenNameToIndexCode(config.fetch(), opts.gen_name_to_index);
const string include_xla_data_proto =
opts.gen_program_shape
? R"(#include "xla/xla_data.pb.h")"
: "";
const string include_hlo_profile_printer_data_proto =
opts.gen_hlo_profile_printer_data
? R"(#include "xla/service/hlo_profile_printer_data.pb.h")"
: "";
const string assign_profile_counters_size =
opts.gen_hlo_profile_printer_data
? "set_static_data_profile_counters_size(data, "
"get_static_data_hlo_profile_printer_data(data)->"
"profile_counters_size());"
: "";
*header =
R"(
#ifndef TFCOMPILE_GENERATED_{{ENTRY}}_H_
#define TFCOMPILE_GENERATED_{{ENTRY}}_H_
{{INCLUDE_XLA_DATA_PROTO}}
{{INCLUDE_HLO_PROFILE_PRINTER_DATA_PROTO}}
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "tensorflow/core/platform/types.h"
namespace Eigen { struct ThreadPoolDevice; }
namespace xla { class ExecutableRunOptions; }
extern "C" void {{ENTRY}}(
void* result, const ::xla::ExecutableRunOptions* run_options,
const void** args, void** temps, XlaCustomCallStatus* status,
int64_t* profile_counters);
{{DECLS_FROM_OBJ_FILE}}
{{NS_START}}
class {{CLASS}} final : public tensorflow::XlaCompiledCpuFunction {
public:
static constexpr size_t kNumArgs = {{ARG_NUM}};
static constexpr size_t kNumResults = {{RESULT_NUM}};
static constexpr size_t kNumVariables = {{VARIABLE_NUM}};
static const ::int64_t ArgSize(::tensorflow::int32 index) {
return BufferInfos()[ArgIndexToBufferIndex()[index]].size();
}
static const tensorflow::XlaCompiledCpuFunction::StaticData& StaticData() {
static XlaCompiledCpuFunction::StaticData* kStaticData = [](){
XlaCompiledCpuFunction::StaticData* data =
new XlaCompiledCpuFunction::StaticData;
set_static_data_raw_function(data, {{ENTRY}});
set_static_data_buffer_infos(data, BufferInfos());
set_static_data_num_buffers(data, kNumBuffers);
set_static_data_result_index_table(data, ResultIndexToBufferIndex());
set_static_data_num_results(data, kNumResults);
set_static_data_arg_index_table(data, ArgIndexToBufferIndex());
set_static_data_num_args(data, kNumArgs);
set_static_data_num_variables(data, kNumVariables);
set_static_data_result_index(data, kResultIndex);
set_static_data_arg_shape_infos(data, ArgShapeInfos());
set_static_data_result_shape_infos(data, ResultShapeInfos());
set_static_data_arg_names(data, StaticArgNames());
set_static_data_variable_names(data, StaticVariableNames());
set_static_data_result_names(data, StaticResultNames());
set_static_data_program_shape(data, StaticProgramShape());
set_static_data_hlo_profile_printer_data(
data, StaticHloProfilePrinterData());
{{ASSIGN_PROFILE_COUNTERS_SIZE}}
return data;
}();
return *kStaticData;
}
{{CLASS}}(AllocMode alloc_mode =
AllocMode::ARGS_VARIABLES_RESULTS_PROFILES_AND_TEMPS)
: XlaCompiledCpuFunction(StaticData(), alloc_mode) {}
{{CLASS}}(const {{CLASS}}&) = delete;
{{CLASS}}& operator=(const {{CLASS}}&) = delete;
{{METHODS_ARG}}
{{METHODS_RESULT}}
{{METHODS_VARIABLE}}
private:
static constexpr size_t kNumBuffers = {{NUM_BUFFERS}};
static const ::xla::cpu_function_runtime::BufferInfo* BufferInfos() {
static const ::xla::cpu_function_runtime::BufferInfo
kBufferInfos[kNumBuffers] = {
{{BUFFER_INFOS_AS_STRING}}
};
return kBufferInfos;
}
static const ::tensorflow::int32* ResultIndexToBufferIndex() {
static constexpr ::tensorflow::int32 kResultIndexToBufferIndex[kNumResults] = {
{{RESULT_INDEX_TABLE}}
};
return kResultIndexToBufferIndex;
}
static const ::tensorflow::int32* ArgIndexToBufferIndex() {
static constexpr ::tensorflow::int32 kArgIndexToBufferIndex[kNumArgs] = {
{{ARG_INDEX_TABLE}}
};
return kArgIndexToBufferIndex;
}
static constexpr size_t kResultIndex = {{RESULT_INDEX}};
{{ARG_SHAPE_INFOS}};
{{RESULT_SHAPE_INFOS}};
static const char** StaticArgNames() {{ARG_NAMES_CODE}}
static const char** StaticVariableNames() {{VARIABLE_NAMES_CODE}}
static const char** StaticResultNames() {{RESULT_NAMES_CODE}}
static const ::xla::ProgramShapeProto* StaticProgramShape() {
static const ::xla::ProgramShapeProto* kShape = {{PROGRAM_SHAPE_SHIM_EXPRESSION}};
return kShape;
}
static const ::xla::HloProfilePrinterData* StaticHloProfilePrinterData() {
static const ::xla::HloProfilePrinterData* kHloProfilePrinterData =
{{HLO_PROFILE_PRINTER_DATA_SHIM_EXPRESSION}};
return kHloProfilePrinterData;
}
};
{{NS_END}}
#endif
)";
const std::vector<std::pair<string, string>> rewrites = {
{"{{ARG_BYTES_ALIGNED}}", absl::StrCat(arg_bytes_aligned)},
{"{{ARG_BYTES_TOTAL}}", absl::StrCat(arg_bytes_total)},
{"{{ARG_NAMES_CODE}}", arg_names_code},
{"{{ARG_NUM}}", absl::StrCat(arg_index_table.size())},
{"{{ARG_SHAPE_INFOS}}", arg_shape_infos},
{"{{VARIABLE_NUM}}", absl::StrCat(config.variable_size())},
{"{{ARG_INDEX_TABLE}}", absl::StrJoin(arg_index_table, ", ")},
{"{{RESULT_NUM}}", absl::StrCat(result_index_table.size())},
{"{{RESULT_INDEX_TABLE}}", absl::StrJoin(result_index_table, ", ")},
{"{{ASSIGN_PROFILE_COUNTERS_SIZE}}", assign_profile_counters_size},
{"{{CLASS}}", opts.class_name},
{"{{DECLS_FROM_OBJ_FILE}}",
absl::StrJoin(metadata_result.header_variable_decls, "\n")},
{"{{ENTRY}}", compile_result.entry_point},
{"{{HLO_PROFILE_PRINTER_DATA_SHIM_EXPRESSION}}",
metadata_result.hlo_profile_printer_data_access_shim},
{"{{INCLUDE_XLA_DATA_PROTO}}", include_xla_data_proto},
{"{{INCLUDE_HLO_PROFILE_PRINTER_DATA_PROTO}}",
include_hlo_profile_printer_data_proto},
{"{{METHODS_ARG}}\n", methods_arg},
{"{{METHODS_RESULT}}\n", methods_result},
{"{{METHODS_VARIABLE}}\n", methods_variable},
{"{{NS_END}}\n", ns_end},
{"{{NS_START}}\n", ns_start},
{"{{PROGRAM_SHAPE}}", xla::ShapeUtil::HumanString(xla::ProgramShape(ps))},
{"{{PROGRAM_SHAPE_SHIM_EXPRESSION}}",
metadata_result.program_shape_access_shim},
{"{{VARIABLE_NAMES_CODE}}", variable_names_code},
{"{{RESULT_INDEX}}", absl::StrCat(result_index)},
{"{{RESULT_NAMES_CODE}}", result_names_code},
{"{{RESULT_SHAPE_INFOS}}", result_shape_infos},
{"{{TEMP_BYTES_ALIGNED}}", absl::StrCat(temp_bytes_aligned)},
{"{{TEMP_BYTES_TOTAL}}", absl::StrCat(temp_bytes_total)},
{"{{NUM_BUFFERS}}", absl::StrCat(buffer_infos.size())},
{"{{BUFFER_INFOS_AS_STRING}}",
absl::StrJoin(buffer_infos_as_strings, ",\n")}};
absl::StrReplaceAll(rewrites, header);
return absl::OkStatus();
}
static string CreateUniqueIdentifier(const CodegenOpts& opts,
absl::string_view suffix) {
string result = "__tfcompile";
for (const string& n : opts.namespaces) {
absl::StrAppend(&result, "_", n);
}
absl::StrAppend(&result, "_", opts.class_name, "_", suffix);
return result;
}
Status GenerateMetadata(const CodegenOpts& opts,
const CompileResult& compile_result,
MetadataResult* metadata_result) {
std::unique_ptr<xla::ProgramShapeProto> program_shape;
if (opts.gen_program_shape) {
program_shape =
std::make_unique<xla::ProgramShapeProto>(compile_result.program_shape);
program_shape->clear_parameter_names();
}
ProtobufToEmbed program_shape_protobuf{
CreateUniqueIdentifier(opts, "ProgramShapeProto"),
"::xla::ProgramShapeProto", program_shape.get()};
ProtobufToEmbed hlo_profile_printer_data_protobuf{
CreateUniqueIdentifier(opts, "HloProfilePrinterData"),
"::xla::HloProfilePrinterData",
compile_result.aot->hlo_profile_printer_data()};
TF_ASSIGN_OR_RETURN(
EmbeddedProtocolBuffers embedded_protobufs,
CreateEmbeddedProtocolBuffers(
opts.target_triple,
{program_shape_protobuf, hlo_profile_printer_data_protobuf}));
metadata_result->program_shape_access_shim =
std::move(embedded_protobufs.cpp_shims[0].expression);
metadata_result->hlo_profile_printer_data_access_shim =
std::move(embedded_protobufs.cpp_shims[1].expression);
metadata_result->header_variable_decls.emplace_back(
std::move(embedded_protobufs.cpp_shims[0].variable_decl));
metadata_result->header_variable_decls.emplace_back(
std::move(embedded_protobufs.cpp_shims[1].variable_decl));
metadata_result->object_file_data =
std::move(embedded_protobufs.object_file_data);
return absl::OkStatus();
}
Status ParseCppClass(const string& cpp_class, string* class_name,
std::vector<string>* namespaces) {
class_name->clear();
namespaces->clear();
if (cpp_class.empty()) {
return errors::InvalidArgument("empty cpp_class: " + cpp_class);
}
std::vector<string> parts = absl::StrSplit(cpp_class, "::");
if (parts.front().empty()) {
parts.erase(parts.begin());
}
for (int i = 0, end = parts.size(); i < end; ++i) {
if (i < end - 1) {
TF_RETURN_IF_ERROR(ValidateCppIdent(
parts[i], "in namespace component of cpp_class: " + cpp_class));
namespaces->push_back(parts[i]);
} else {
TF_RETURN_IF_ERROR(ValidateCppIdent(
parts[i], "in class name of cpp_class: " + cpp_class));
*class_name = parts[i];
}
}
return absl::OkStatus();
}
Status ValidateCppIdent(absl::string_view ident, absl::string_view msg) {
if (ident.empty()) {
return errors::InvalidArgument("empty identifier: ", msg);
}
if (ident[0] != '_' && !IsAlpha(ident[0])) {
return errors::InvalidArgument("illegal leading char: ", msg);
}
for (size_t pos = 1; pos < ident.size(); ++pos) {
if (ident[pos] != '_' && !IsAlphaNum(ident[pos])) {
return errors::InvalidArgument("illegal char: ", msg);
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/aot/codegen.h"
#include <algorithm>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/TargetSelect.h"
#include "xla/cpu_function_runtime.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tfcompile {
namespace {
using ::xla::cpu_function_runtime::BufferInfo;
void ExpectErrorContains(const Status& status, absl::string_view str) {
EXPECT_NE(absl::OkStatus(), status);
EXPECT_TRUE(absl::StrContains(status.message(), str))
<< "expected error: " << status.message() << " to contain: " << str;
}
TEST(ValidateCppIdent, Simple) {
TF_EXPECT_OK(ValidateCppIdent("a", ""));
TF_EXPECT_OK(ValidateCppIdent("abc", ""));
TF_EXPECT_OK(ValidateCppIdent("_abc", ""));
TF_EXPECT_OK(ValidateCppIdent("_abc123", ""));
string ident;
for (char c = 'a'; c <= 'z'; c++) {
ident.append(1, c);
}
for (char c = 'A'; c <= 'Z'; c++) {
ident.append(1, c);
}
for (char c = '0'; c <= '9'; c++) {
ident.append(1, c);
}
ident += "_";
TF_EXPECT_OK(ValidateCppIdent(ident, ""));
ExpectErrorContains(ValidateCppIdent("", ""), "empty identifier");
ExpectErrorContains(ValidateCppIdent(" ", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent("0", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent(".", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent(":", ""), "illegal leading char");
ExpectErrorContains(ValidateCppIdent("a.", ""), "illegal char");
ExpectErrorContains(ValidateCppIdent("a:", ""), "illegal char");
ExpectErrorContains(ValidateCppIdent("a:", ""), "illegal char");
}
class ParseCppClassTest : public ::testing::Test {
protected:
void ExpectOK(const string& cpp_class, const string& want_class_name,
const std::vector<string>& want_namespaces) {
string class_name;
std::vector<string> namespaces;
TF_EXPECT_OK(ParseCppClass(cpp_class, &class_name, &namespaces));
EXPECT_EQ(class_name, want_class_name);
EXPECT_EQ(namespaces, want_namespaces);
}
void ExpectFail(const string& cpp_class) {
string class_name;
std::vector<string> namespaces;
EXPECT_NE(ParseCppClass(cpp_class, &class_name, &namespaces),
absl::OkStatus())
<< cpp_class;
}
};
TEST_F(ParseCppClassTest, ParseOK) {
ExpectOK("MyClass", "MyClass", {});
ExpectOK("_MyClass", "_MyClass", {});
ExpectOK("a::MyClass", "MyClass", {"a"});
ExpectOK("a::foo::MyClass", "MyClass", {"a", "foo"});
ExpectOK("a::foo::b::MyClass", "MyClass", {"a", "foo", "b"});
ExpectOK("a::foo::b::bar::MyClass", "MyClass", {"a", "foo", "b", "bar"});
ExpectOK("foo::MyClass", "MyClass", {"foo"});
ExpectOK("_foo::MyClass", "MyClass", {"_foo"});
ExpectOK("_foo::_MyClass", "_MyClass", {"_foo"});
ExpectOK("::foo::bar::MyClass", "MyClass", {"foo", "bar"});
ExpectOK("::_foo::MyClass", "MyClass", {"_foo"});
ExpectOK("::_foo::_MyClass", "_MyClass", {"_foo"});
string ident;
for (char c = 'a'; c <= 'z'; c++) {
ident.append(1, c);
}
for (char c = 'A'; c <= 'Z'; c++) {
ident.append(1, c);
}
for (char c = '0'; c <= '9'; c++) {
ident.append(1, c);
}
ident += "_";
ExpectOK(ident, ident, {});
ExpectOK(ident + "::" + ident, ident, {ident});
ExpectOK(ident + "::" + ident + "::" + ident, ident, {ident, ident});
}
TEST_F(ParseCppClassTest, ParseFail) {
ExpectFail("");
ExpectFail("::");
ExpectFail("0");
ExpectFail("a.b");
ExpectFail("a:b");
ExpectFail(":foo::bar");
ExpectFail("good::.bad");
ExpectFail("good:::bad");
ExpectFail("good::bad::");
ExpectFail("good::::bad");
ExpectFail("::::bad");
ExpectFail("good:: bad");
ExpectFail("good::0bad");
}
static void CompareWithGoldenFile(
const string& tensorflow_relative_golden_file_name,
const string& expected_contents, bool ignore_cr) {
string sanitized_expected_contents(expected_contents);
if (ignore_cr) {
sanitized_expected_contents.erase(
std::remove(sanitized_expected_contents.begin(),
sanitized_expected_contents.end(), '\r'),
sanitized_expected_contents.end());
}
const bool update_golden = false;
string golden_file_name =
GetDataDependencyFilepath(tensorflow_relative_golden_file_name);
if (update_golden) {
TF_EXPECT_OK(
WriteStringToFile(Env::Default(), golden_file_name, expected_contents));
}
string golden_file_contents;
TF_ASSERT_OK(ReadFileToString(Env::Default(), golden_file_name,
&golden_file_contents));
if (ignore_cr) {
golden_file_contents.erase(std::remove(golden_file_contents.begin(),
golden_file_contents.end(), '\r'),
golden_file_contents.end());
}
EXPECT_EQ(golden_file_contents, expected_contents);
}
#if TF_LLVM_X86_AVAILABLE
TEST(CodegenTest, Golden) {
LLVMInitializeX86Target();
LLVMInitializeX86TargetInfo();
LLVMInitializeX86TargetMC();
LLVMInitializeX86AsmPrinter();
CodegenOpts opts;
opts.class_name = "MyClass";
opts.target_triple = "x86_64-pc-linux";
opts.namespaces = {"foo", "bar"};
opts.gen_name_to_index = true;
opts.gen_program_shape = true;
tf2xla::Config config;
tf2xla::Feed* feed = config.add_feed();
feed->mutable_id()->set_node_name("feed0");
feed->set_name("myfeed");
feed = config.add_feed();
feed->mutable_id()->set_node_name("feed1");
tf2xla::Fetch* fetch = config.add_fetch();
fetch->mutable_id()->set_node_name("fetch0");
fetch->set_name("myfetch");
tf2xla::Variable* variable = config.add_variable();
variable->set_node_name("myvar_readonly");
variable->mutable_shape()->add_dim()->set_size(1);
variable->set_type(DT_FLOAT);
variable->set_readonly(true);
tf2xla::Variable* variable2 = config.add_variable();
variable2->set_node_name("myvar");
variable2->mutable_shape()->add_dim()->set_size(1);
variable2->set_type(DT_FLOAT);
tf2xla::Variable* variable3 = config.add_variable();
variable3->set_node_name("my/var");
variable3->set_name("myvar2");
variable3->mutable_shape()->add_dim()->set_size(5);
variable3->set_type(DT_INT32);
CompileResult compile_result;
compile_result.aot.reset(new xla::cpu::CpuAotCompilationResult(
{},
{BufferInfo::MakeTempBuffer(3 * 8),
BufferInfo::MakeEntryParameter(8, 0),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 1),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 2),
BufferInfo::MakeTempBuffer(1),
BufferInfo::MakeEntryParameter(96, 3),
BufferInfo::MakeResultParameter(5 * 6 * 4,
0),
BufferInfo::MakeEntryParameter(96, 4),
BufferInfo::MakeResultParameter(1 * 4,
1),
BufferInfo::MakeResultParameter(5 * 4,
2)},
0, nullptr, {}));
compile_result.program_shape =
xla::ShapeUtil::MakeProgramShape(
{
xla::ShapeUtil::MakeShape(xla::F32, {1, 2}),
xla::ShapeUtil::MakeShape(xla::S64, {3, 4}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::S32, {5}),
},
xla::ShapeUtil::MakeTupleShape({
xla::ShapeUtil::MakeShape(xla::U32, {5, 6}),
xla::ShapeUtil::MakeShape(xla::F32, {1}),
xla::ShapeUtil::MakeShape(xla::S32, {5}),
}))
.ToProto();
compile_result.entry_point = "entry_point";
compile_result.pointer_size = 8;
MetadataResult metadata_result;
TF_ASSERT_OK(GenerateMetadata(opts, compile_result, &metadata_result));
CompareWithGoldenFile("tensorflow/compiler/aot/codegen_test_o.golden",
metadata_result.object_file_data, false);
string header;
TF_ASSERT_OK(
GenerateHeader(opts, config, compile_result, metadata_result, &header));
CompareWithGoldenFile("tensorflow/compiler/aot/codegen_test_h.golden", header,
true);
}
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/codegen.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/aot/codegen_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
74fda3c0-aa6b-4ac7-b8c6-35e66f71e68a | cpp | tensorflow/tensorflow | logical | tensorflow/lite/kernels/logical.cc | tensorflow/lite/kernels/logical_test.cc | #include <stddef.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace logical {
namespace {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
data->requires_broadcast = false;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
const TfLiteType type = input1->type;
if (type != kTfLiteBool) {
TF_LITE_KERNEL_LOG(context, "Logical ops only support bool type.");
return kTfLiteError;
}
output->type = type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node,
bool (*func)(bool, bool)) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
} else {
reference_ops::BinaryFunction<bool, bool, bool>(
GetTensorShape(input1), GetTensorData<bool>(input1),
GetTensorShape(input2), GetTensorData<bool>(input2),
GetTensorShape(output), GetTensorData<bool>(output), func);
}
return kTfLiteOk;
}
bool LogicalOr(bool x, bool y) { return x || y; }
TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) {
return LogicalImpl(context, node, LogicalOr);
}
bool LogicalAnd(bool x, bool y) { return x && y; }
TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) {
return LogicalImpl(context, node, LogicalAnd);
}
}
}
TfLiteRegistration* Register_LOGICAL_OR() {
static TfLiteRegistration r = {logical::Init, logical::Free, logical::Prepare,
logical::LogicalOrEval};
return &r;
}
TfLiteRegistration* Register_LOGICAL_AND() {
static TfLiteRegistration r = {logical::Init, logical::Free, logical::Prepare,
logical::LogicalAndEval};
return &r;
}
}
}
} | #include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class LogicalOpModel : public SingleOpModel {
public:
LogicalOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape, BuiltinOperator op) {
input1_ = AddInput(TensorType_BOOL);
input2_ = AddInput(TensorType_BOOL);
output_ = AddOutput(TensorType_BOOL);
ConfigureBuiltinOp(op);
BuildInterpreter({input1_shape, input2_shape});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<bool> GetOutput() { return ExtractVector<bool>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input1_;
int input2_;
int output_;
void ConfigureBuiltinOp(BuiltinOperator op) {
switch (op) {
case BuiltinOperator_LOGICAL_OR: {
SetBuiltinOp(op, BuiltinOptions_LogicalOrOptions,
CreateLogicalOrOptions(builder_).Union());
break;
}
case BuiltinOperator_LOGICAL_AND: {
SetBuiltinOp(op, BuiltinOptions_LogicalAndOptions,
CreateLogicalAndOptions(builder_).Union());
break;
}
default: { FAIL() << "We shouldn't get here."; }
}
}
};
TEST(LogicalTest, LogicalOr) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, BuiltinOperator_LOGICAL_OR);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {true, false, true, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, true, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(LogicalTest, BroadcastLogicalOr) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, BuiltinOperator_LOGICAL_OR);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(LogicalTest, LogicalAnd) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, BuiltinOperator_LOGICAL_AND);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {true, false, true, false});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, false));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
TEST(LogicalTest, BroadcastLogicalAnd) {
LogicalOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, BuiltinOperator_LOGICAL_AND);
model.PopulateTensor<bool>(model.input1(), {true, false, false, true});
model.PopulateTensor<bool>(model.input2(), {true});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAre(true, false, false, true));
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1, 1, 4));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/logical.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/logical_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f329122-8f19-41a9-81b2-7b068f84edbb | cpp | abseil/abseil-cpp | decode_rust_punycode | absl/debugging/internal/decode_rust_punycode.cc | absl/debugging/internal/decode_rust_punycode_test.cc | #include "absl/debugging/internal/decode_rust_punycode.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/base/config.h"
#include "absl/base/nullability.h"
#include "absl/debugging/internal/bounded_utf8_length_sequence.h"
#include "absl/debugging/internal/utf8_for_code_point.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
constexpr uint32_t kMaxChars = 256;
constexpr uint32_t kBase = 36, kTMin = 1, kTMax = 26, kSkew = 38, kDamp = 700;
constexpr uint32_t kMaxCodePoint = 0x10ffff;
constexpr uint32_t kMaxI = 1 << 30;
bool ConsumeOptionalAsciiPrefix(const char*& punycode_begin,
const char* const punycode_end,
char* const out_begin,
char* const out_end,
uint32_t& num_ascii_chars) {
num_ascii_chars = 0;
int last_underscore = -1;
for (int i = 0; i < punycode_end - punycode_begin; ++i) {
const char c = punycode_begin[i];
if (c == '_') {
last_underscore = i;
continue;
}
if ('a' <= c && c <= 'z') continue;
if ('A' <= c && c <= 'Z') continue;
if ('0' <= c && c <= '9') continue;
return false;
}
if (last_underscore < 0) return true;
if (last_underscore == 0) return false;
if (last_underscore + 1 > out_end - out_begin) return false;
num_ascii_chars = static_cast<uint32_t>(last_underscore);
std::memcpy(out_begin, punycode_begin, num_ascii_chars);
out_begin[num_ascii_chars] = '\0';
punycode_begin += num_ascii_chars + 1;
return true;
}
int DigitValue(char c) {
if ('0' <= c && c <= '9') return c - '0' + 26;
if ('a' <= c && c <= 'z') return c - 'a';
if ('A' <= c && c <= 'Z') return c - 'A';
return -1;
}
bool ScanNextDelta(const char*& punycode_begin, const char* const punycode_end,
uint32_t bias, uint32_t& i) {
uint64_t w = 1;
for (uint32_t k = kBase; punycode_begin != punycode_end; k += kBase) {
const int digit_value = DigitValue(*punycode_begin++);
if (digit_value < 0) return false;
const uint64_t new_i = i + static_cast<uint64_t>(digit_value) * w;
static_assert(
kMaxI >= kMaxChars * kMaxCodePoint,
"kMaxI is too small to prevent spurious failures on good input");
if (new_i > kMaxI) return false;
static_assert(
kMaxI < (uint64_t{1} << 32),
"Make kMaxI smaller or i 64 bits wide to prevent silent wraparound");
i = static_cast<uint32_t>(new_i);
uint32_t t;
if (k <= bias + kTMin) {
t = kTMin;
} else if (k >= bias + kTMax) {
t = kTMax;
} else {
t = k - bias;
}
if (static_cast<uint32_t>(digit_value) < t) return true;
w *= kBase - t;
}
return false;
}
}
absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options) {
const char* punycode_begin = options.punycode_begin;
const char* const punycode_end = options.punycode_end;
char* const out_begin = options.out_begin;
char* const out_end = options.out_end;
const size_t out_size = static_cast<size_t>(out_end - out_begin);
if (out_size == 0) return nullptr;
*out_begin = '\0';
uint32_t n = 128, i = 0, bias = 72, num_chars = 0;
if (!ConsumeOptionalAsciiPrefix(punycode_begin, punycode_end,
out_begin, out_end, num_chars)) {
return nullptr;
}
uint32_t total_utf8_bytes = num_chars;
BoundedUtf8LengthSequence<kMaxChars> utf8_lengths;
while (punycode_begin != punycode_end) {
if (num_chars >= kMaxChars) return nullptr;
const uint32_t old_i = i;
if (!ScanNextDelta(punycode_begin, punycode_end, bias, i)) return nullptr;
uint32_t delta = i - old_i;
delta /= (old_i == 0 ? kDamp : 2);
delta += delta/(num_chars + 1);
bias = 0;
while (delta > ((kBase - kTMin) * kTMax)/2) {
delta /= kBase - kTMin;
bias += kBase;
}
bias += ((kBase - kTMin + 1) * delta)/(delta + kSkew);
static_assert(
kMaxI + kMaxCodePoint < (uint64_t{1} << 32),
"Make kMaxI smaller or n 64 bits wide to prevent silent wraparound");
n += i/(num_chars + 1);
i %= num_chars + 1;
Utf8ForCodePoint utf8_for_code_point(n);
if (!utf8_for_code_point.ok()) return nullptr;
if (total_utf8_bytes + utf8_for_code_point.length + 1 > out_size) {
return nullptr;
}
uint32_t n_index =
utf8_lengths.InsertAndReturnSumOfPredecessors(
i, utf8_for_code_point.length);
std::memmove(
out_begin + n_index + utf8_for_code_point.length, out_begin + n_index,
total_utf8_bytes + 1 - n_index);
std::memcpy(out_begin + n_index, utf8_for_code_point.bytes,
utf8_for_code_point.length);
total_utf8_bytes += utf8_for_code_point.length;
++num_chars;
++i;
}
return out_begin + total_utf8_bytes;
}
}
ABSL_NAMESPACE_END
} | #include "absl/debugging/internal/decode_rust_punycode.h"
#include <cstddef>
#include <cstring>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
using ::testing::AllOf;
using ::testing::Eq;
using ::testing::IsNull;
using ::testing::Pointee;
using ::testing::ResultOf;
using ::testing::StrEq;
class DecodeRustPunycodeTest : public ::testing::Test {
protected:
void FillBufferWithNonzeroBytes() {
std::memset(buffer_storage_, 0xab, sizeof(buffer_storage_));
}
DecodeRustPunycodeOptions WithAmpleSpace() {
FillBufferWithNonzeroBytes();
DecodeRustPunycodeOptions options;
options.punycode_begin = punycode_.data();
options.punycode_end = punycode_.data() + punycode_.size();
options.out_begin = buffer_storage_;
options.out_end = buffer_storage_ + sizeof(buffer_storage_);
return options;
}
DecodeRustPunycodeOptions WithJustEnoughSpace() {
FillBufferWithNonzeroBytes();
const size_t begin_offset = sizeof(buffer_storage_) - plaintext_.size() - 1;
DecodeRustPunycodeOptions options;
options.punycode_begin = punycode_.data();
options.punycode_end = punycode_.data() + punycode_.size();
options.out_begin = buffer_storage_ + begin_offset;
options.out_end = buffer_storage_ + sizeof(buffer_storage_);
return options;
}
DecodeRustPunycodeOptions WithOneByteTooFew() {
FillBufferWithNonzeroBytes();
const size_t begin_offset = sizeof(buffer_storage_) - plaintext_.size();
DecodeRustPunycodeOptions options;
options.punycode_begin = punycode_.data();
options.punycode_end = punycode_.data() + punycode_.size();
options.out_begin = buffer_storage_ + begin_offset;
options.out_end = buffer_storage_ + sizeof(buffer_storage_);
return options;
}
auto PointsToTheNulAfter(const std::string& golden) {
const size_t golden_size = golden.size();
return AllOf(
Pointee(Eq('\0')),
ResultOf("preceding string body",
[golden_size](const char* p) { return p - golden_size; },
StrEq(golden)));
}
std::string punycode_;
std::string plaintext_;
char buffer_storage_[1024];
};
TEST_F(DecodeRustPunycodeTest, MapsEmptyToEmpty) {
punycode_ = "";
plaintext_ = "";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest,
StripsTheTrailingDelimiterFromAPureRunOfBasicChars) {
punycode_ = "foo_";
plaintext_ = "foo";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, TreatsTheLastUnderscoreAsTheDelimiter) {
punycode_ = "foo_bar_";
plaintext_ = "foo_bar";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsALeadingUnderscoreIfNotTheDelimiter) {
punycode_ = "_foo_";
plaintext_ = "_foo";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsALeadingUnderscoreDelimiter) {
punycode_ = "_foo";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsEmbeddedNul) {
punycode_ = std::string("foo\0bar_", 8);
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsAsciiCharsOtherThanIdentifierChars) {
punycode_ = "foo\007_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "foo-_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "foo;_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "foo\177_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsRawNonAsciiChars) {
punycode_ = "\x80";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "\x80_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "\xff";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "\xff_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RecognizesU0080) {
punycode_ = "a";
plaintext_ = "\xc2\x80";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, OneByteDeltaSequencesMustBeA) {
punycode_ = "b";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "z";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "0";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "9";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsDeltaSequenceBA) {
punycode_ = "ba";
plaintext_ = "\xc2\x81";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsOtherDeltaSequencesWithSecondByteA) {
punycode_ = "ca";
plaintext_ = "\xc2\x82";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "za";
plaintext_ = "\xc2\x99";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "0a";
plaintext_ = "\xc2\x9a";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "1a";
plaintext_ = "\xc2\x9b";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "9a";
plaintext_ = "£";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
}
TEST_F(DecodeRustPunycodeTest, RejectsDeltaWhereTheSecondAndLastDigitIsNotA) {
punycode_ = "bb";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "zz";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "00";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "99";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsDeltasWithSecondByteBFollowedByA) {
punycode_ = "bba";
plaintext_ = "¤";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "cba";
plaintext_ = "¥";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "zba";
plaintext_ = "¼";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "0ba";
plaintext_ = "½";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "1ba";
plaintext_ = "¾";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "9ba";
plaintext_ = "Æ";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
}
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAlone) {
punycode_ = "0ca";
plaintext_ = "à";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharBeforeBasicChars) {
punycode_ = "_la_mode_yya";
plaintext_ = "à_la_mode";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAmidBasicChars) {
punycode_ = "verre__vin_m4a";
plaintext_ = "verre_à_vin";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAfterBasicChars) {
punycode_ = "belt_3na";
plaintext_ = "beltà";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedTwoByteChar) {
punycode_ = "0caaaa";
plaintext_ = "àààà";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyTwoByteCharsInOrder) {
punycode_ = "3camsuz";
plaintext_ = "ãéïôù";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyTwoByteCharsOutOfOrder) {
punycode_ = "3caltsx";
plaintext_ = "ùéôãï";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsThreeByteCharAlone) {
punycode_ = "fiq";
plaintext_ = "中";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedThreeByteChar) {
punycode_ = "fiqaaaa";
plaintext_ = "中中中中中";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsThreeByteCharsInOrder) {
punycode_ = "fiq228c";
plaintext_ = "中文";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyThreeByteCharsOutOfOrder) {
punycode_ = "fiq128c";
plaintext_ = "文中";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAlone) {
punycode_ = "uy7h";
plaintext_ = "🂻";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharBeforeBasicChars) {
punycode_ = "jack__uh63d";
plaintext_ = "jack_🂻";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAmidBasicChars) {
punycode_ = "jack__of_hearts_ki37n";
plaintext_ = "jack_🂻_of_hearts";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAfterBasicChars) {
punycode_ = "_of_hearts_kz45i";
plaintext_ = "🂻_of_hearts";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedFourByteChar) {
punycode_ = "uy7haaaa";
plaintext_ = "🂻🂻🂻🂻🂻";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyFourByteCharsInOrder) {
punycode_ = "8x7hcjmf";
plaintext_ = "🂦🂧🂪🂭🂮";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyFourByteCharsOutOfOrder) {
punycode_ = "8x7hcild";
plaintext_ = "🂮🂦🂭🂪🂧";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsAMixtureOfByteLengths) {
punycode_ = "3caltsx2079ivf8aiuy7cja3a6ak";
plaintext_ = "ùéôãï中文🂮🂦🂭🂪🂧";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsOverlargeDeltas) {
punycode_ = "123456789a";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, Beowulf) {
punycode_ = "hwt_we_gardena_in_geardagum_"
"eodcyninga_rym_gefrunon_"
"hu_a_elingas_ellen_fremedon_hxg9c70do9alau";
plaintext_ = "hwæt_we_gardena_in_geardagum_"
"þeodcyninga_þrym_gefrunon_"
"hu_ða_æþelingas_ellen_fremedon";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, MengHaoran) {
punycode_ = "gmq4ss0cfvao1e2wg8mcw8b0wkl9a7tt90a8riuvbk7t8kbv9a66ogofvzlf6"
"3d01ybn1u28dyqi5q2cxyyxnk5d2gx1ks9ddvfm17bk6gbsd6wftrav60u4ta";
plaintext_ = "故人具雞黍" "邀我至田家"
"綠樹村邊合" "青山郭外斜"
"開軒面場圃" "把酒話桑麻"
"待到重陽日" "還來就菊花";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, YamanoueNoOkura) {
punycode_ = "48jdaa3a6ccpepjrsmlb0q4bwcdtid8fg6c0cai9822utqeruk3om0u4f2wbp0"
"em23do0op23cc2ff70mb6tae8aq759gja";
plaintext_ = "瓜食めば"
"子ども思ほゆ"
"栗食めば"
"まして偲はゆ"
"何処より"
"来りしものそ"
"眼交に"
"もとな懸りて"
"安眠し寝さぬ";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, EshmunazarSarcophagus) {
punycode_ = "wj9caaabaabbaaohcacxvhdc7bgxbccbdcjeacddcedcdlddbdbddcdbdcknfcee"
"ifel8del2a7inq9fhcpxikms7a4a9ac9ataaa0g";
plaintext_ = "𐤁𐤉𐤓𐤇𐤁𐤋𐤁𐤔𐤍𐤕𐤏𐤎𐤓"
"𐤅𐤀𐤓𐤁𐤏𐤗𐤖𐤖𐤖𐤖𐤋𐤌𐤋𐤊𐤉𐤌𐤋𐤊"
"𐤀𐤔𐤌𐤍𐤏𐤆𐤓𐤌𐤋𐤊𐤑𐤃𐤍𐤌"
"𐤁𐤍𐤌𐤋𐤊𐤕𐤁𐤍𐤕𐤌𐤋𐤊𐤑𐤃𐤍𐤌"
"𐤃𐤁𐤓𐤌𐤋𐤊𐤀𐤔𐤌𐤍𐤏𐤆𐤓𐤌𐤋𐤊"
"𐤑𐤃𐤍𐤌𐤋𐤀𐤌𐤓𐤍𐤂𐤆𐤋𐤕";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/decode_rust_punycode.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/decode_rust_punycode_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
4a05b0eb-d44a-4a63-8905-bc98c7271120 | cpp | google/cel-cpp | benchmark | internal/benchmark.h | eval/tests/benchmark_test.cc | #ifndef THIRD_PARTY_CEL_CPP_INTERNAL_BENCHMARK_H_
#define THIRD_PARTY_CEL_CPP_INTERNAL_BENCHMARK_H_
#include "benchmark/benchmark.h"
#endif | #include "internal/benchmark.h"
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/rpc/context/attribute_context.pb.h"
#include "google/protobuf/text_format.h"
#include "absl/base/attributes.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_set.h"
#include "absl/flags/flag.h"
#include "absl/strings/match.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/tests/request_context.pb.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "google/protobuf/arena.h"
ABSL_FLAG(bool, enable_optimizations, false, "enable const folding opt");
ABSL_FLAG(bool, enable_recursive_planning, false, "enable recursive planning");
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::rpc::context::AttributeContext;
InterpreterOptions GetOptions(google::protobuf::Arena& arena) {
InterpreterOptions options;
if (absl::GetFlag(FLAGS_enable_optimizations)) {
options.constant_arena = &arena;
options.constant_folding = true;
}
if (absl::GetFlag(FLAGS_enable_recursive_planning)) {
options.max_recursion_depth = -1;
}
return options;
}
static void BM_Eval(benchmark::State& state) {
google::protobuf::Arena arena;
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
int len = state.range(0);
Expr root_expr;
Expr* cur_expr = &root_expr;
for (int i = 0; i < len; i++) {
Expr::Call* call = cur_expr->mutable_call_expr();
call->set_function("_+_");
call->add_args()->mutable_const_expr()->set_int64_value(1);
cur_expr = call->add_args();
}
cur_expr->mutable_const_expr()->set_int64_value(1);
SourceInfo source_info;
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&root_expr, &source_info));
for (auto _ : state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsInt64());
ASSERT_TRUE(result.Int64OrDie() == len + 1);
}
}
BENCHMARK(BM_Eval)->Range(1, 10000);
absl::Status EmptyCallback(int64_t expr_id, const CelValue& value,
google::protobuf::Arena* arena) {
return absl::OkStatus();
}
static void BM_Eval_Trace(benchmark::State& state) {
google::protobuf::Arena arena;
InterpreterOptions options = GetOptions(arena);
options.enable_recursive_tracing = true;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
int len = state.range(0);
Expr root_expr;
Expr* cur_expr = &root_expr;
for (int i = 0; i < len; i++) {
Expr::Call* call = cur_expr->mutable_call_expr();
call->set_function("_+_");
call->add_args()->mutable_const_expr()->set_int64_value(1);
cur_expr = call->add_args();
}
cur_expr->mutable_const_expr()->set_int64_value(1);
SourceInfo source_info;
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&root_expr, &source_info));
for (auto _ : state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Trace(activation, &arena, EmptyCallback));
ASSERT_TRUE(result.IsInt64());
ASSERT_TRUE(result.Int64OrDie() == len + 1);
}
}
BENCHMARK(BM_Eval_Trace)->Range(1, 10000);
static void BM_EvalString(benchmark::State& state) {
google::protobuf::Arena arena;
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
int len = state.range(0);
Expr root_expr;
Expr* cur_expr = &root_expr;
for (int i = 0; i < len; i++) {
Expr::Call* call = cur_expr->mutable_call_expr();
call->set_function("_+_");
call->add_args()->mutable_const_expr()->set_string_value("a");
cur_expr = call->add_args();
}
cur_expr->mutable_const_expr()->set_string_value("a");
SourceInfo source_info;
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&root_expr, &source_info));
for (auto _ : state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsString());
ASSERT_TRUE(result.StringOrDie().value().size() == len + 1);
}
}
BENCHMARK(BM_EvalString)->Range(1, 10000);
static void BM_EvalString_Trace(benchmark::State& state) {
google::protobuf::Arena arena;
InterpreterOptions options = GetOptions(arena);
options.enable_recursive_tracing = true;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
int len = state.range(0);
Expr root_expr;
Expr* cur_expr = &root_expr;
for (int i = 0; i < len; i++) {
Expr::Call* call = cur_expr->mutable_call_expr();
call->set_function("_+_");
call->add_args()->mutable_const_expr()->set_string_value("a");
cur_expr = call->add_args();
}
cur_expr->mutable_const_expr()->set_string_value("a");
SourceInfo source_info;
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&root_expr, &source_info));
for (auto _ : state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Trace(activation, &arena, EmptyCallback));
ASSERT_TRUE(result.IsString());
ASSERT_TRUE(result.StringOrDie().value().size() == len + 1);
}
}
BENCHMARK(BM_EvalString_Trace)->Range(1, 10000);
const char kIP[] = "10.0.1.2";
const char kPath[] = "/admin/edit";
const char kToken[] = "admin";
ABSL_ATTRIBUTE_NOINLINE
bool NativeCheck(absl::btree_map<std::string, std::string>& attributes,
const absl::flat_hash_set<std::string>& denylists,
const absl::flat_hash_set<std::string>& allowlists) {
auto& ip = attributes["ip"];
auto& path = attributes["path"];
auto& token = attributes["token"];
if (denylists.find(ip) != denylists.end()) {
return false;
}
if (absl::StartsWith(path, "v1")) {
if (token == "v1" || token == "v2" || token == "admin") {
return true;
}
} else if (absl::StartsWith(path, "v2")) {
if (token == "v2" || token == "admin") {
return true;
}
} else if (absl::StartsWith(path, "/admin")) {
if (token == "admin") {
if (allowlists.find(ip) != allowlists.end()) {
return true;
}
}
}
return false;
}
void BM_PolicyNative(benchmark::State& state) {
const auto denylists =
absl::flat_hash_set<std::string>{"10.0.1.4", "10.0.1.5", "10.0.1.6"};
const auto allowlists =
absl::flat_hash_set<std::string>{"10.0.1.1", "10.0.1.2", "10.0.1.3"};
auto attributes = absl::btree_map<std::string, std::string>{
{"ip", kIP}, {"token", kToken}, {"path", kPath}};
for (auto _ : state) {
auto result = NativeCheck(attributes, denylists, allowlists);
ASSERT_TRUE(result);
}
}
BENCHMARK(BM_PolicyNative);
void BM_PolicySymbolic(benchmark::State& state) {
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
!(ip in ["10.0.1.4", "10.0.1.5", "10.0.1.6"]) &&
((path.startsWith("v1") && token in ["v1", "v2", "admin"]) ||
(path.startsWith("v2") && token in ["v2", "admin"]) ||
(path.startsWith("/admin") && token == "admin" && ip in [
"10.0.1.1", "10.0.1.2", "10.0.1.3"
])
))cel"));
InterpreterOptions options = GetOptions(arena);
options.constant_folding = true;
options.constant_arena = &arena;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
SourceInfo source_info;
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(
&parsed_expr.expr(), &source_info));
Activation activation;
activation.InsertValue("ip", CelValue::CreateStringView(kIP));
activation.InsertValue("path", CelValue::CreateStringView(kPath));
activation.InsertValue("token", CelValue::CreateStringView(kToken));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_PolicySymbolic);
class RequestMap : public CelMap {
public:
absl::optional<CelValue> operator[](CelValue key) const override {
if (!key.IsString()) {
return {};
}
auto value = key.StringOrDie().value();
if (value == "ip") {
return CelValue::CreateStringView(kIP);
} else if (value == "path") {
return CelValue::CreateStringView(kPath);
} else if (value == "token") {
return CelValue::CreateStringView(kToken);
}
return {};
}
int size() const override { return 3; }
absl::StatusOr<const CelList*> ListKeys() const override {
return absl::UnimplementedError("CelMap::ListKeys is not implemented");
}
};
void BM_PolicySymbolicMap(benchmark::State& state) {
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
!(request.ip in ["10.0.1.4", "10.0.1.5", "10.0.1.6"]) &&
((request.path.startsWith("v1") && request.token in ["v1", "v2", "admin"]) ||
(request.path.startsWith("v2") && request.token in ["v2", "admin"]) ||
(request.path.startsWith("/admin") && request.token == "admin" &&
request.ip in ["10.0.1.1", "10.0.1.2", "10.0.1.3"])
))cel"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
SourceInfo source_info;
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(
&parsed_expr.expr(), &source_info));
Activation activation;
RequestMap request;
activation.InsertValue("request", CelValue::CreateMap(&request));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_PolicySymbolicMap);
void BM_PolicySymbolicProto(benchmark::State& state) {
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
!(request.ip in ["10.0.1.4", "10.0.1.5", "10.0.1.6"]) &&
((request.path.startsWith("v1") && request.token in ["v1", "v2", "admin"]) ||
(request.path.startsWith("v2") && request.token in ["v2", "admin"]) ||
(request.path.startsWith("/admin") && request.token == "admin" &&
request.ip in ["10.0.1.1", "10.0.1.2", "10.0.1.3"])
))cel"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
SourceInfo source_info;
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder->CreateExpression(
&parsed_expr.expr(), &source_info));
Activation activation;
RequestContext request;
request.set_ip(kIP);
request.set_path(kPath);
request.set_token(kToken);
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_PolicySymbolicProto);
constexpr char kListSum[] = R"(
id: 1
comprehension_expr: <
accu_var: "__result__"
iter_var: "x"
iter_range: <
id: 2
ident_expr: <
name: "list_var"
>
>
accu_init: <
id: 3
const_expr: <
int64_value: 0
>
>
loop_step: <
id: 4
call_expr: <
function: "_+_"
args: <
id: 5
ident_expr: <
name: "__result__"
>
>
args: <
id: 6
ident_expr: <
name: "x"
>
>
>
>
loop_condition: <
id: 7
const_expr: <
bool_value: true
>
>
result: <
id: 8
ident_expr: <
name: "__result__"
>
>
>)";
void BM_Comprehension(benchmark::State& state) {
google::protobuf::Arena arena;
Expr expr;
Activation activation;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kListSum, &expr));
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
ContainerBackedListImpl cel_list(std::move(list));
activation.InsertValue("list_var", CelValue::CreateList(&cel_list));
InterpreterOptions options = GetOptions(arena);
options.comprehension_max_iterations = 10000000;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, nullptr));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsInt64());
ASSERT_EQ(result.Int64OrDie(), len);
}
}
BENCHMARK(BM_Comprehension)->Range(1, 1 << 20);
void BM_Comprehension_Trace(benchmark::State& state) {
google::protobuf::Arena arena;
Expr expr;
Activation activation;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kListSum, &expr));
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
ContainerBackedListImpl cel_list(std::move(list));
activation.InsertValue("list_var", CelValue::CreateList(&cel_list));
InterpreterOptions options = GetOptions(arena);
options.enable_recursive_tracing = true;
options.comprehension_max_iterations = 10000000;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, nullptr));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Trace(activation, &arena, EmptyCallback));
ASSERT_TRUE(result.IsInt64());
ASSERT_EQ(result.Int64OrDie(), len);
}
}
BENCHMARK(BM_Comprehension_Trace)->Range(1, 1 << 20);
void BM_HasMap(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("has(request.path) && !has(request.ip)"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
std::vector<std::pair<CelValue, CelValue>> map_pairs{
{CelValue::CreateStringView("path"), CelValue::CreateStringView("path")}};
auto cel_map =
CreateContainerBackedMap(absl::Span<std::pair<CelValue, CelValue>>(
map_pairs.data(), map_pairs.size()));
activation.InsertValue("request", CelValue::CreateMap((*cel_map).get()));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_HasMap);
void BM_HasProto(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("has(request.path) && !has(request.ip)"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
auto reg_status = RegisterBuiltinFunctions(builder->GetRegistry(), options);
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
RequestContext request;
request.set_path(kPath);
request.set_token(kToken);
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_HasProto);
void BM_HasProtoMap(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("has(request.headers.create_time) && "
"!has(request.headers.update_time)"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
auto reg_status = RegisterBuiltinFunctions(builder->GetRegistry(), options);
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
RequestContext request;
request.mutable_headers()->insert({"create_time", "2021-01-01"});
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_HasProtoMap);
void BM_ReadProtoMap(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
request.headers.create_time == "2021-01-01"
)cel"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
auto reg_status = RegisterBuiltinFunctions(builder->GetRegistry(), options);
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
RequestContext request;
request.mutable_headers()->insert({"create_time", "2021-01-01"});
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_ReadProtoMap);
void BM_NestedProtoFieldRead(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
!request.a.b.c.d.e
)cel"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
auto reg_status = RegisterBuiltinFunctions(builder->GetRegistry(), options);
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
RequestContext request;
request.mutable_a()->mutable_b()->mutable_c()->mutable_d()->set_e(false);
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_NestedProtoFieldRead);
void BM_NestedProtoFieldReadDefaults(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
!request.a.b.c.d.e
)cel"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
auto reg_status = RegisterBuiltinFunctions(builder->GetRegistry(), options);
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
RequestContext request;
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_NestedProtoFieldReadDefaults);
void BM_ProtoStructAccess(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
has(request.auth.claims.iss) && request.auth.claims.iss == 'accounts.google.com'
)cel"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
AttributeContext::Request request;
auto* auth = request.mutable_auth();
(*auth->mutable_claims()->mutable_fields())["iss"].set_string_value(
"accounts.google.com");
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_ProtoStructAccess);
void BM_ProtoListAccess(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse(R"cel(
"
)cel"));
InterpreterOptions options = GetOptions(arena);
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&parsed_expr.expr(), nullptr));
AttributeContext::Request request;
auto* auth = request.mutable_auth();
auth->add_access_levels("
auth->add_access_levels("
auth->add_access_levels("
auth->add_access_levels("
auth->add_access_levels("
activation.InsertValue("request",
CelProtoWrapper::CreateMessage(&request, &arena));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
}
BENCHMARK(BM_ProtoListAccess);
constexpr char kNestedListSum[] = R"(
id: 1
comprehension_expr: <
accu_var: "__result__"
iter_var: "x"
iter_range: <
id: 2
ident_expr: <
name: "list_var"
>
>
accu_init: <
id: 3
const_expr: <
int64_value: 0
>
>
loop_step: <
id: 4
call_expr: <
function: "_+_"
args: <
id: 5
ident_expr: <
name: "__result__"
>
>
args: <
id: 6
comprehension_expr: <
accu_var: "__result__"
iter_var: "x"
iter_range: <
id: 9
ident_expr: <
name: "list_var"
>
>
accu_init: <
id: 10
const_expr: <
int64_value: 0
>
>
loop_step: <
id: 11
call_expr: <
function: "_+_"
args: <
id: 12
ident_expr: <
name: "__result__"
>
>
args: <
id: 13
ident_expr: <
name: "x"
>
>
>
>
loop_condition: <
id: 14
const_expr: <
bool_value: true
>
>
result: <
id: 15
ident_expr: <
name: "__result__"
>
>
>
>
>
>
loop_condition: <
id: 7
const_expr: <
bool_value: true
>
>
result: <
id: 8
ident_expr: <
name: "__result__"
>
>
>)";
void BM_NestedComprehension(benchmark::State& state) {
google::protobuf::Arena arena;
Expr expr;
Activation activation;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kNestedListSum, &expr));
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
ContainerBackedListImpl cel_list(std::move(list));
activation.InsertValue("list_var", CelValue::CreateList(&cel_list));
InterpreterOptions options = GetOptions(arena);
options.comprehension_max_iterations = 10000000;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, nullptr));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsInt64());
ASSERT_EQ(result.Int64OrDie(), len * len);
}
}
BENCHMARK(BM_NestedComprehension)->Range(1, 1 << 10);
void BM_NestedComprehension_Trace(benchmark::State& state) {
google::protobuf::Arena arena;
Expr expr;
Activation activation;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kNestedListSum, &expr));
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
ContainerBackedListImpl cel_list(std::move(list));
activation.InsertValue("list_var", CelValue::CreateList(&cel_list));
InterpreterOptions options = GetOptions(arena);
options.comprehension_max_iterations = 10000000;
options.enable_comprehension_list_append = true;
options.enable_recursive_tracing = true;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder->CreateExpression(&expr, nullptr));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Trace(activation, &arena, EmptyCallback));
ASSERT_TRUE(result.IsInt64());
ASSERT_EQ(result.Int64OrDie(), len * len);
}
}
BENCHMARK(BM_NestedComprehension_Trace)->Range(1, 1 << 10);
void BM_ListComprehension(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("list_var.map(x, x * 2)"));
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
ContainerBackedListImpl cel_list(std::move(list));
activation.InsertValue("list_var", CelValue::CreateList(&cel_list));
InterpreterOptions options = GetOptions(arena);
options.comprehension_max_iterations = 10000000;
options.enable_comprehension_list_append = true;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(
auto cel_expr, builder->CreateExpression(&(parsed_expr.expr()), nullptr));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsList());
ASSERT_EQ(result.ListOrDie()->size(), len);
}
}
BENCHMARK(BM_ListComprehension)->Range(1, 1 << 16);
void BM_ListComprehension_Trace(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("list_var.map(x, x * 2)"));
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
ContainerBackedListImpl cel_list(std::move(list));
activation.InsertValue("list_var", CelValue::CreateList(&cel_list));
InterpreterOptions options = GetOptions(arena);
options.comprehension_max_iterations = 10000000;
options.enable_comprehension_list_append = true;
options.enable_recursive_tracing = true;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(
auto cel_expr, builder->CreateExpression(&(parsed_expr.expr()), nullptr));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Trace(activation, &arena, EmptyCallback));
ASSERT_TRUE(result.IsList());
ASSERT_EQ(result.ListOrDie()->size(), len);
}
}
BENCHMARK(BM_ListComprehension_Trace)->Range(1, 1 << 16);
void BM_ListComprehension_Opt(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("list_var.map(x, x * 2)"));
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
ContainerBackedListImpl cel_list(std::move(list));
activation.InsertValue("list_var", CelValue::CreateList(&cel_list));
InterpreterOptions options;
options.constant_arena = &arena;
options.constant_folding = true;
options.comprehension_max_iterations = 10000000;
options.enable_comprehension_list_append = true;
auto builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry()));
ASSERT_OK_AND_ASSIGN(
auto cel_expr, builder->CreateExpression(&(parsed_expr.expr()), nullptr));
for (auto _ : state) {
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsList());
ASSERT_EQ(result.ListOrDie()->size(), len);
}
}
BENCHMARK(BM_ListComprehension_Opt)->Range(1, 1 << 16);
void BM_ComprehensionCpp(benchmark::State& state) {
google::protobuf::Arena arena;
Activation activation;
int len = state.range(0);
std::vector<CelValue> list;
list.reserve(len);
for (int i = 0; i < len; i++) {
list.push_back(CelValue::CreateInt64(1));
}
auto op = [&list]() {
int sum = 0;
for (const auto& value : list) {
sum += value.Int64OrDie();
}
return sum;
};
for (auto _ : state) {
int result = op();
ASSERT_EQ(result, len);
}
}
BENCHMARK(BM_ComprehensionCpp)->Range(1, 1 << 20);
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/benchmark.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/tests/benchmark_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
2366ba18-350e-4b10-bef5-ae4764ba8121 | cpp | google/arolla | expr_utils | arolla/expr/eval/expr_utils.cc | arolla/expr/eval/expr_utils_test.cc | #include "arolla/expr/eval/expr_utils.h"
#include <functional>
#include <stack>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
absl::StatusOr<ExprNodePtr> ExtractLambda(
const ExprNodePtr& expr,
std::function<absl::StatusOr<bool>(const ExprNodePtr&)> is_in_lambda) {
struct Task {
enum class Stage { kPreorder, kPostorder };
ExprNodePtr node;
Stage stage;
};
std::vector<ExprNodePtr> lambda_args;
ExprOperatorSignature lambda_signature;
absl::flat_hash_set<Fingerprint> previsited;
absl::flat_hash_map<Fingerprint, ExprNodePtr> new_nodes;
std::stack<Task> tasks;
tasks.push(Task{.node = expr, .stage = Task::Stage::kPreorder});
int next_placeholder = 0;
while (!tasks.empty()) {
auto [node, stage] = std::move(tasks.top());
tasks.pop();
if (stage == Task::Stage::kPreorder) {
if (!previsited.insert(node->fingerprint()).second) {
continue;
}
ASSIGN_OR_RETURN(bool in_lambda, is_in_lambda(node));
if (in_lambda) {
tasks.push(Task{.node = node, .stage = Task::Stage::kPostorder});
for (auto dep = node->node_deps().rbegin();
dep != node->node_deps().rend(); ++dep) {
tasks.push(Task{.node = *dep, .stage = Task::Stage::kPreorder});
}
} else {
auto [it, inserted] = new_nodes.insert({node->fingerprint(), nullptr});
if (inserted) {
it->second = Placeholder(absl::StrCat("_", next_placeholder++));
lambda_args.emplace_back(node);
lambda_signature.parameters.push_back(
ExprOperatorSignature::Parameter{
.name = it->second->placeholder_key()});
}
}
} else {
std::vector<ExprNodePtr> new_deps;
new_deps.reserve(node->node_deps().size());
for (const auto& dep : node->node_deps()) {
new_deps.push_back(new_nodes.at(dep->fingerprint()));
}
ASSIGN_OR_RETURN(new_nodes[node->fingerprint()],
WithNewDependencies(node, new_deps));
}
}
ASSIGN_OR_RETURN(
ExprOperatorPtr lambda,
MakeLambdaOperator(lambda_signature, new_nodes.at(expr->fingerprint())));
return MakeOpNode(lambda, lambda_args);
}
} | #include "arolla/expr/eval/expr_utils.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/testing/testing.h"
namespace arolla::expr::eval_internal {
namespace {
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
using ::testing::ElementsAre;
using ::testing::Pointee;
using ::testing::Property;
using ::testing::WhenDynamicCastTo;
TEST(ExptUtilsTest, ExtractLambda) {
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("math.add", {CallOp("math.add", {Leaf("x"), Leaf("y")}),
Literal(1.0)}));
auto is_op = [](const ExprNodePtr& node) -> absl::StatusOr<bool> {
return node->is_op();
};
ASSERT_OK_AND_ASSIGN(auto expr_as_lambda, ExtractLambda(expr, is_op));
EXPECT_THAT(expr_as_lambda->node_deps(),
ElementsAre(EqualsExpr(Leaf("x")), EqualsExpr(Leaf("y")),
EqualsExpr(Literal(1.0))));
EXPECT_THAT(
expr_as_lambda->op().get(),
WhenDynamicCastTo<const LambdaOperator*>(Pointee(Property(
&LambdaOperator::lambda_body,
EqualsExpr(CallOp(
"math.add",
{CallOp("math.add", {Placeholder("_0"), Placeholder("_1")}),
Placeholder("_2")}))))));
}
TEST(ExptUtilsTest, ExtractLambda_WithSameSubnodes) {
ASSERT_OK_AND_ASSIGN(
auto to_keep_out,
CallOp("math.add",
{CallOp("math.add", {Leaf("x"), Leaf("y")}), Literal(1.0)}));
ASSERT_OK_AND_ASSIGN(auto to_keep_in,
CallOp("math.add", {Literal(2.0), Literal(1.0)}));
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.add", {to_keep_out, to_keep_in}), to_keep_out}));
auto keep_in = [=](const ExprNodePtr& node) -> absl::StatusOr<bool> {
return node->fingerprint() != to_keep_out->fingerprint();
};
ASSERT_OK_AND_ASSIGN(auto expr_as_lambda, ExtractLambda(expr, keep_in));
EXPECT_THAT(expr_as_lambda->node_deps(),
ElementsAre(EqualsExpr(to_keep_out)));
EXPECT_THAT(
expr_as_lambda->op().get(),
WhenDynamicCastTo<const LambdaOperator*>(Pointee(Property(
&LambdaOperator::lambda_body,
EqualsExpr(CallOp(
"math.add", {CallOp("math.add", {Placeholder("_0"), to_keep_in}),
Placeholder("_0")}))))));
}
TEST(ExptUtilsTest, ExtractLambda_AllFalse) {
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("math.add", {CallOp("math.add", {Leaf("x"), Leaf("y")}),
Literal(1.0)}));
auto all_false = [](const ExprNodePtr& node) -> absl::StatusOr<bool> {
return false;
};
ASSERT_OK_AND_ASSIGN(auto expr_as_lambda, ExtractLambda(expr, all_false));
EXPECT_THAT(expr_as_lambda->node_deps(), ElementsAre(EqualsExpr(expr)));
EXPECT_THAT(
expr_as_lambda->op().get(),
WhenDynamicCastTo<const LambdaOperator*>(Pointee(Property(
&LambdaOperator::lambda_body, EqualsExpr(Placeholder("_0"))))));
}
TEST(ExptUtilsTest, ExtractLambda_FilterFails) {
ASSERT_OK_AND_ASSIGN(
auto expr,
CallOp("math.add",
{CallOp("math.subtract", {Leaf("x"), Leaf("y")}), Literal(1.0)}));
auto returns_error = [](const ExprNodePtr& node) -> absl::StatusOr<bool> {
if (node->is_op() && node->op()->display_name() == "math.subtract") {
return absl::InvalidArgumentError("foo");
}
return true;
};
EXPECT_THAT(ExtractLambda(expr, returns_error),
StatusIs(absl::StatusCode::kInvalidArgument, "foo"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/expr_utils.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/expr_utils_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
44ef2692-2606-4e6a-bfc5-981b4ffcf957 | cpp | google/tensorstore | kvs_backed_chunk_driver | tensorstore/driver/kvs_backed_chunk_driver.cc | tensorstore/driver/kvs_backed_chunk_driver_test.cc | #include "tensorstore/driver/kvs_backed_chunk_driver.h"
#include <stddef.h>
#include <cassert>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/absl_log.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/box.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/driver/driver.h"
#include "tensorstore/driver/driver_handle.h"
#include "tensorstore/driver/kvs_backed_chunk_driver_impl.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/internal/box_difference.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/async_initialized_cache_mixin.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/chunk_cache.h"
#include "tensorstore/internal/cache/kvs_backed_chunk_cache.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/chunk_grid_specification.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/staleness_bound.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/open_mode_spec.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/unowned_to_shared.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/open_mode.h"
#include "tensorstore/open_options.h"
#include "tensorstore/rank.h"
#include "tensorstore/resize_options.h"
#include "tensorstore/schema.h"
#include "tensorstore/staleness_bound.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/iterate_over_index_range.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#ifndef TENSORSTORE_KVS_DRIVER_DEBUG
#define TENSORSTORE_KVS_DRIVER_DEBUG 0
#endif
namespace tensorstore {
namespace internal_kvs_backed_chunk_driver {
MetadataOpenState::~MetadataOpenState() = default;
DataCacheBase::~DataCacheBase() = default;
Result<IndexTransform<>> DataCacheBase::GetExternalToInternalTransform(
const void* metadata, size_t component_index) {
return IndexTransform<>();
}
MetadataOpenState::MetadataOpenState(Initializer initializer)
: PrivateOpenState{std::move(initializer.request.transaction),
std::move(initializer.request.batch),
std::move(initializer.spec),
initializer.request.read_write_mode} {
request_time_ = absl::Now();
}
std::string MetadataOpenState::GetMetadataCacheKey() { return {}; }
Result<kvstore::DriverPtr> MetadataOpenState::GetMetadataKeyValueStore(
kvstore::DriverPtr base_kv_store) {
return base_kv_store;
}
Result<kvstore::DriverPtr> OpenState::GetDataKeyValueStore(
kvstore::DriverPtr base_kv_store, const void* metadata) {
return base_kv_store;
}
ReadWriteMode MetadataOpenState::GetReadWriteMode(const void* metadata) {
return ReadWriteMode::read_write;
}
AtomicUpdateConstraint MetadataOpenState::GetCreateConstraint() {
return AtomicUpdateConstraint::kRequireMissing;
}
bool OpenState::DataCacheUsesMetadataCachePool(const void* metadata_ptr) {
return false;
}
MetadataCache::MetadataCache(Initializer initializer)
: Base(kvstore::DriverPtr()),
data_copy_concurrency_(std::move(initializer.data_copy_concurrency)),
metadata_cache_pool_(std::move(initializer.cache_pool)) {}
DataCacheBase::DataCacheBase(Initializer&& initializer)
: metadata_cache_entry_(std::move(initializer.metadata_cache_entry)),
initial_metadata_(std::move(initializer.metadata)),
cache_pool_(std::move(initializer.cache_pool)) {}
DataCache::DataCache(Initializer&& initializer,
internal::ChunkGridSpecification&& grid)
: KvsBackedChunkCache(std::move(initializer.store)),
ChunkedDataCacheBase(std::move(initializer)),
grid_(std::move(grid)) {}
namespace {
using MetadataPtr = std::shared_ptr<const void>;
const char invalid_metadata = 0;
absl::Status ShapeConstraintError(DimensionIndex output_dim,
DimensionIndex affected_inclusive_min,
DimensionIndex affected_exclusive_max) {
assert(affected_inclusive_min != affected_exclusive_max);
if (affected_inclusive_min < affected_exclusive_max) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Resize operation would also affect output dimension ", output_dim,
" over the interval ",
IndexInterval::UncheckedHalfOpen(affected_inclusive_min,
affected_exclusive_max),
" but `resize_tied_bounds` was not specified"));
}
return absl::FailedPreconditionError(tensorstore::StrCat(
"Resize operation would also affect output dimension ", output_dim,
" over the out-of-bounds interval ",
IndexInterval::UncheckedHalfOpen(affected_exclusive_max,
affected_inclusive_min)));
}
IndexInterval GetNewIndexInterval(IndexInterval existing,
Index new_inclusive_min,
Index new_exclusive_max) {
return IndexInterval::UncheckedHalfOpen(
ExplicitIndexOr(new_inclusive_min, existing.inclusive_min()),
ExplicitIndexOr(new_exclusive_max, existing.exclusive_max()));
}
absl::Status ValidateResizeDomainConstraint(
BoxView<> current_domain, span<const Index> inclusive_min_constraint,
span<const Index> exclusive_max_constraint) {
assert(current_domain.rank() == inclusive_min_constraint.size());
assert(current_domain.rank() == exclusive_max_constraint.size());
for (DimensionIndex i = 0; i < current_domain.rank(); ++i) {
const IndexInterval cur_interval = current_domain[i];
if (!ImplicitOrEqual(inclusive_min_constraint[i],
cur_interval.inclusive_min())) {
return ShapeConstraintError(i, cur_interval.inclusive_min(),
inclusive_min_constraint[i]);
}
if (!ImplicitOrEqual(exclusive_max_constraint[i],
cur_interval.exclusive_max())) {
return ShapeConstraintError(i, exclusive_max_constraint[i],
cur_interval.exclusive_max());
}
}
return absl::OkStatus();
}
absl::Status ValidateExpandShrinkConstraints(
BoxView<> current_domain, span<const Index> new_inclusive_min,
span<const Index> new_exclusive_max, bool expand_only, bool shrink_only) {
assert(current_domain.rank() == new_inclusive_min.size());
assert(current_domain.rank() == new_exclusive_max.size());
for (DimensionIndex i = 0; i < current_domain.rank(); ++i) {
const IndexInterval cur_interval = current_domain[i];
const IndexInterval new_interval = GetNewIndexInterval(
cur_interval, new_inclusive_min[i], new_exclusive_max[i]);
if (shrink_only && !Contains(cur_interval, new_interval)) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Resize operation would expand output dimension ",
i, " from ", cur_interval, " to ", new_interval,
" but `shrink_only` was specified"));
}
if (expand_only && !Contains(new_interval, cur_interval)) {
return absl::FailedPreconditionError(
tensorstore::StrCat("Resize operation would shrink output dimension ",
i, " from ", cur_interval, " to ", new_interval,
" but `expand_only` was specified"));
}
}
return absl::OkStatus();
}
std::string GetMetadataMissingErrorMessage(
MetadataCache::Entry* metadata_cache_entry) {
return tensorstore::StrCat(
"Metadata at ",
GetOwningCache(*metadata_cache_entry)
.kvstore_driver()
->DescribeKey(metadata_cache_entry->GetKeyValueStoreKey()),
" does not exist");
}
absl::Status ValidateNewMetadata(DataCacheBase* cache,
const void* new_metadata) {
if (!new_metadata) {
return absl::FailedPreconditionError(
GetMetadataMissingErrorMessage(cache->metadata_cache_entry_.get()));
}
auto* initial_metadata = cache->initial_metadata_.get();
if (initial_metadata != new_metadata) {
TENSORSTORE_RETURN_IF_ERROR(
cache->ValidateMetadataCompatibility(initial_metadata, new_metadata));
}
return absl::OkStatus();
}
Result<MetadataPtr> GetUpdatedMetadataWithAssumeCachedMetadata(
KvsMetadataDriverBase& driver, DataCacheBase& cache,
internal::OpenTransactionPtr transaction) {
assert(driver.assumed_metadata_time_ != absl::InfiniteFuture() &&
driver.assumed_metadata_);
assert(&cache == driver.cache());
const auto handle_entry_or_node =
[&](auto& entry_or_node) -> Result<MetadataPtr> {
MetadataPtr new_metadata;
if (MetadataCache::ReadLock<void> lock(entry_or_node);
lock.stamp().time > driver.assumed_metadata_time_) {
new_metadata = lock.shared_data();
} else {
new_metadata = driver.assumed_metadata_;
}
if constexpr (std::is_same_v<absl::remove_cvref_t<decltype(entry_or_node)>,
MetadataCache::TransactionNode>) {
TENSORSTORE_ASSIGN_OR_RETURN(
new_metadata,
entry_or_node.GetUpdatedMetadata(std::move(new_metadata)),
cache.metadata_cache_entry_->AnnotateError(_,
false));
}
return new_metadata;
};
if (transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node,
GetTransactionNode(*cache.metadata_cache_entry_, transaction));
return handle_entry_or_node(*node);
} else {
return handle_entry_or_node(*cache.metadata_cache_entry_);
}
}
Result<MetadataPtr> ValidateNewMetadata(
KvsMetadataDriverBase& driver, internal::OpenTransactionPtr transaction) {
MetadataPtr new_metadata;
auto& cache = *driver.cache();
if (driver.assumed_metadata_) {
if (driver.assumed_metadata_time_ == absl::InfiniteFuture()) {
return driver.assumed_metadata_;
}
TENSORSTORE_ASSIGN_OR_RETURN(new_metadata,
GetUpdatedMetadataWithAssumeCachedMetadata(
driver, cache, std::move(transaction)));
} else {
TENSORSTORE_ASSIGN_OR_RETURN(
new_metadata,
cache.metadata_cache_entry_->GetMetadata(std::move(transaction)));
}
TENSORSTORE_RETURN_IF_ERROR(ValidateNewMetadata(&cache, new_metadata.get()));
return new_metadata;
}
Result<IndexTransform<>> GetInitialTransform(DataCacheBase* cache,
const void* metadata,
size_t component_index) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_transform, cache->GetExternalToInternalTransform(
cache->initial_metadata_.get(), component_index));
return ResolveBoundsFromMetadata(cache, metadata, component_index,
std::move(new_transform),
{});
}
}
void ChunkedDataCacheBase::GetComponentBounds(
const void* metadata, size_t component_index,
Box<dynamic_rank(kMaxRank)>& bounds, DimensionSet& implicit_lower_bounds,
DimensionSet& implicit_upper_bounds) {
const auto& grid = this->grid();
const auto& component_spec = grid.components[component_index];
const DimensionIndex component_rank = component_spec.rank();
bounds.set_rank(component_rank);
Box<dynamic_rank(kMaxRank)> grid_bounds(grid.chunk_shape.size());
DimensionSet grid_implicit_lower_bounds;
DimensionSet grid_implicit_upper_bounds;
this->GetChunkGridBounds(metadata, grid_bounds, grid_implicit_lower_bounds,
grid_implicit_upper_bounds);
span<const DimensionIndex> chunked_to_cell_dimensions =
component_spec.chunked_to_cell_dimensions;
bounds = component_spec.array_spec.overall_fill_value.domain();
implicit_lower_bounds = false;
implicit_upper_bounds = false;
for (DimensionIndex grid_dim = 0; grid_dim < grid_bounds.rank(); ++grid_dim) {
const DimensionIndex cell_dim = chunked_to_cell_dimensions[grid_dim];
bounds[cell_dim] = grid_bounds[grid_dim];
implicit_lower_bounds[cell_dim] = grid_implicit_lower_bounds[grid_dim];
implicit_upper_bounds[cell_dim] = grid_implicit_upper_bounds[grid_dim];
}
}
Result<ChunkLayout> ChunkedDataCacheBase::GetChunkLayout(
size_t component_index) {
return GetChunkLayoutFromMetadata(initial_metadata_.get(), component_index);
}
Future<IndexTransform<>> KvsMetadataDriverBase::ResolveBounds(
ResolveBoundsRequest request) {
return ResolveBounds(std::move(request), metadata_staleness_bound_);
}
Future<MetadataPtr> KvsMetadataDriverBase::ResolveMetadata(
internal::OpenTransactionPtr transaction,
absl::Time metadata_staleness_bound) {
if (assumed_metadata_ && assumed_metadata_time_ >= metadata_staleness_bound) {
return ValidateNewMetadata(*this, std::move(transaction));
}
auto* cache = this->cache();
if (transaction) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node,
GetTransactionNode(*cache->metadata_cache_entry_, transaction));
auto read_future = node->Read({metadata_staleness_bound});
return MapFuture(
cache->executor(),
[cache = DataCacheBase::Ptr(cache), node = std::move(node)](
const Result<void>& result) -> Result<MetadataPtr> {
TENSORSTORE_RETURN_IF_ERROR(result);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_metadata, node->GetUpdatedMetadata(),
cache->metadata_cache_entry_->AnnotateError(_,
false));
TENSORSTORE_RETURN_IF_ERROR(
ValidateNewMetadata(cache.get(), new_metadata.get()));
return new_metadata;
},
std::move(read_future));
}
return MapFuture(
cache->executor(),
[cache = DataCacheBase::Ptr(cache)](
const Result<void>& result) -> Result<MetadataPtr> {
TENSORSTORE_RETURN_IF_ERROR(result);
auto new_metadata = cache->metadata_cache_entry_->GetMetadata();
TENSORSTORE_RETURN_IF_ERROR(
ValidateNewMetadata(cache.get(), new_metadata.get()));
return new_metadata;
},
cache->metadata_cache_entry_->Read({metadata_staleness_bound}));
}
Future<IndexTransform<>> KvsMetadataDriverBase::ResolveBounds(
ResolveBoundsRequest request, StalenessBound metadata_staleness_bound) {
auto* cache = this->cache();
return MapFutureValue(
cache->executor(),
[cache = DataCacheBase::Ptr(cache), component_index = component_index(),
options = std::move(request.options),
transform = std::move(request.transform)](
const MetadataPtr& new_metadata) mutable {
return ResolveBoundsFromMetadata(cache.get(), new_metadata.get(),
component_index, std::move(transform),
options);
},
ResolveMetadata(std::move(request.transaction),
metadata_staleness_bound.time));
}
namespace {
Future<const void> RequestResize(ChunkedDataCacheBase* cache,
internal::OpenTransactionPtr transaction,
ResizeParameters parameters) {
return cache->metadata_cache_entry_->RequestAtomicUpdate(
transaction,
[parameters = std::move(parameters),
cache = ChunkedDataCacheBase::Ptr(cache),
metadata_constraint = cache->initial_metadata_](
const MetadataCache::MetadataPtr& current_metadata)
-> Result<std::shared_ptr<const void>> {
if (!current_metadata) {
return absl::NotFoundError("Metadata was deleted");
}
if (metadata_constraint.get() != current_metadata.get()) {
TENSORSTORE_RETURN_IF_ERROR(cache->ValidateMetadataCompatibility(
metadata_constraint.get(), current_metadata.get()));
}
Box<dynamic_rank(kMaxRank)> bounds(parameters.new_inclusive_min.size());
DimensionSet implicit_lower_bounds;
DimensionSet implicit_upper_bounds;
cache->GetChunkGridBounds(current_metadata.get(), bounds,
implicit_lower_bounds, implicit_upper_bounds);
TENSORSTORE_RETURN_IF_ERROR(ValidateResizeConstraints(
bounds, parameters.new_inclusive_min, parameters.new_exclusive_max,
parameters.inclusive_min_constraint,
parameters.exclusive_max_constraint, parameters.expand_only,
parameters.shrink_only));
return cache->GetResizedMetadata(current_metadata.get(),
parameters.new_inclusive_min,
parameters.new_exclusive_max);
},
AtomicUpdateConstraint::kRequireExisting);
}
struct ResizeContinuation {
internal::IntrusivePtr<KvsMetadataDriverBase> driver;
internal::OpenTransactionPtr transaction;
size_t component_index;
IndexTransform<> transform;
Result<IndexTransform<>> GetResult() {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_metadata,
ValidateNewMetadata(*driver, std::move(transaction)));
return ResolveBoundsFromMetadata(driver->cache(), new_metadata.get(),
component_index, std::move(transform),
{});
}
void operator()(Promise<IndexTransform<>> promise, ReadyFuture<const void>) {
promise.SetResult(GetResult());
}
};
struct ResizeState {
internal::IntrusivePtr<KvsChunkedDriverBase> driver;
ChunkedDataCacheBase::Ptr cache;
internal::OpenTransactionPtr transaction;
size_t component_index;
IndexTransform<> transform;
ResizeParameters resize_parameters;
};
void SubmitResizeRequest(Promise<IndexTransform<>> promise, ResizeState state) {
auto* cache_ptr = state.cache.get();
LinkValue(
WithExecutor(cache_ptr->executor(),
ResizeContinuation{std::move(state.driver),
state.transaction, state.component_index,
std::move(state.transform)}),
std::move(promise),
RequestResize(cache_ptr, state.transaction,
std::move(state.resize_parameters)));
}
struct DeleteChunksForResizeContinuation {
std::unique_ptr<ResizeState> state;
void operator()(Promise<IndexTransform<>> promise, ReadyFuture<const void>) {
SubmitResizeRequest(std::move(promise), std::move(*state));
}
};
Future<const void> DeleteChunksForResize(
ChunkedDataCacheBase::Ptr cache, BoxView<> current_bounds,
span<const Index> new_inclusive_min, span<const Index> new_exclusive_max,
internal::OpenTransactionPtr transaction) {
span<const Index> chunk_shape = cache->grid().chunk_shape;
const DimensionIndex rank = chunk_shape.size();
assert(current_bounds.rank() == rank);
assert(new_inclusive_min.size() == rank);
assert(new_exclusive_max.size() == rank);
auto pair = PromiseFuturePair<void>::Make(MakeResult(absl::Status()));
pair.future.Force();
Box<dynamic_rank(internal::kNumInlinedDims)> current_grid_bounds(rank);
Box<dynamic_rank(internal::kNumInlinedDims)> new_grid_bounds(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
const IndexInterval cur_dim_bounds = current_bounds[i];
const IndexInterval new_dim_bounds = IndexInterval::UncheckedHalfOpen(
ExplicitIndexOr(new_inclusive_min[i], cur_dim_bounds.inclusive_min()),
ExplicitIndexOr(new_exclusive_max[i], cur_dim_bounds.exclusive_max()));
const Index chunk_size = chunk_shape[i];
current_grid_bounds[i] = DividePositiveRoundOut(cur_dim_bounds, chunk_size);
new_grid_bounds[i] = DividePositiveRoundOut(new_dim_bounds, chunk_size);
}
internal::BoxDifference box_difference(current_grid_bounds, new_grid_bounds);
Box<dynamic_rank(internal::kNumInlinedDims)> part(rank);
for (Index box_i = 0; box_i < box_difference.num_sub_boxes(); ++box_i) {
box_difference.GetSubBox(box_i, part);
IterateOverIndexRange(part, [&](span<const Index> cell_indices) {
LinkError(pair.promise, cache->DeleteCell(cell_indices, transaction));
});
}
return pair.future;
}
struct ResolveBoundsForDeleteAndResizeContinuation {
std::unique_ptr<ResizeState> state;
void operator()(Promise<IndexTransform<>> promise, ReadyFuture<const void>) {
std::shared_ptr<const void> new_metadata;
if (auto result = ValidateNewMetadata(*state->driver, state->transaction);
result.ok()) {
new_metadata = *std::move(result);
} else {
promise.SetResult(std::move(result).status());
return;
}
const DimensionIndex grid_rank = state->cache->grid().chunk_shape.size();
assert(!state->resize_parameters.expand_only);
Box<dynamic_rank(internal::kNumInlinedDims)> bounds(grid_rank);
DimensionSet implicit_lower_bounds;
DimensionSet implicit_upper_bounds;
state->cache->GetChunkGridBounds(new_metadata.get(), bounds,
implicit_lower_bounds,
implicit_upper_bounds);
if (auto status = ValidateResizeConstraints(
bounds, state->resize_parameters.new_inclusive_min,
state->resize_parameters.new_exclusive_max,
state->resize_parameters.inclusive_min_constraint,
state->resize_parameters.exclusive_max_constraint,
false,
state->resize_parameters.shrink_only);
!status.ok()) {
promise.SetResult(std::move(status));
return;
}
auto* state_ptr = state.get();
LinkValue(
WithExecutor(state_ptr->cache->executor(),
DeleteChunksForResizeContinuation{std::move(state)}),
std::move(promise),
DeleteChunksForResize(state_ptr->cache, bounds,
state_ptr->resize_parameters.new_inclusive_min,
state_ptr->resize_parameters.new_exclusive_max,
state_ptr->transaction));
}
};
}
Result<ChunkLayout> KvsChunkedDriverBase::GetChunkLayout(
IndexTransformView<> transform) {
auto* cache = this->cache();
return cache->GetChunkLayoutFromMetadata(cache->initial_metadata().get(),
component_index()) |
transform;
}
Future<IndexTransform<>> KvsChunkedDriverBase::Resize(
internal::Driver::ResizeRequest request) {
if (assumed_metadata_time_ == absl::InfiniteFuture()) {
return absl::InvalidArgumentError(
"Resize not supported because assume_metadata was specified");
}
auto* cache = this->cache();
auto resize_parameters = GetResizeParameters(
cache, cache->initial_metadata_.get(), component_index(),
request.transform, request.inclusive_min, request.exclusive_max,
request.options,
request.transaction ? request.transaction->mode()
: TransactionMode::no_transaction_mode);
if (!resize_parameters) {
if (resize_parameters.status().code() == absl::StatusCode::kAborted) {
return ResolveBounds(
{std::move(request.transaction), std::move(request.transform)},
{});
}
return resize_parameters.status();
}
auto pair = PromiseFuturePair<IndexTransform<>>::Make();
ResizeState resize_state{
internal::IntrusivePtr<KvsChunkedDriverBase>(this),
ChunkedDataCacheBase::Ptr(cache),
std::move(request.transaction),
component_index(),
std::move(request.transform),
*std::move(resize_parameters),
};
if ((request.options.mode & resize_metadata_only) == resize_metadata_only ||
(request.options.mode & expand_only) == expand_only) {
SubmitResizeRequest(std::move(pair.promise), std::move(resize_state));
} else {
LinkValue(WithExecutor(
cache->executor(),
ResolveBoundsForDeleteAndResizeContinuation{
std::make_unique<ResizeState>(std::move(resize_state))}),
std::move(pair.promise),
cache->metadata_cache_entry_->Read({absl::Now()}));
}
return std::move(pair.future);
}
Result<IndexTransform<>> KvsMetadataDriverBase::GetBoundSpecData(
internal::OpenTransactionPtr transaction, KvsDriverSpec& spec,
IndexTransformView<> transform_view) {
auto* cache = this->cache();
auto* metadata_cache = cache->metadata_cache();
TENSORSTORE_ASSIGN_OR_RETURN(spec.store.driver,
metadata_cache->base_store()->GetBoundSpec());
spec.store.path = cache->GetBaseKvstorePath();
spec.data_copy_concurrency = metadata_cache->data_copy_concurrency_;
spec.cache_pool = cache->cache_pool_;
if (spec.cache_pool != metadata_cache->metadata_cache_pool_) {
spec.metadata_cache_pool = metadata_cache->metadata_cache_pool_;
}
spec.delete_existing = false;
spec.open = true;
spec.create = false;
spec.assume_metadata = assumed_metadata_time_ == absl::InfiniteFuture();
spec.staleness.metadata = this->metadata_staleness_bound();
spec.staleness.data = this->data_staleness_bound();
spec.schema.Set(RankConstraint{this->rank()}).IgnoreError();
spec.schema.Set(this->dtype()).IgnoreError();
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_metadata,
ValidateNewMetadata(*this, std::move(transaction)));
TENSORSTORE_RETURN_IF_ERROR(cache->GetBoundSpecData(
spec, validated_metadata.get(), this->component_index()));
IndexTransform<> transform(transform_view);
TENSORSTORE_ASSIGN_OR_RETURN(
auto external_to_internal_transform,
cache->GetExternalToInternalTransform(validated_metadata.get(),
component_index()));
if (external_to_internal_transform.valid()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto internal_to_external_transform,
InverseTransform(external_to_internal_transform));
TENSORSTORE_ASSIGN_OR_RETURN(
transform,
ComposeTransforms(internal_to_external_transform, transform));
}
return transform;
}
absl::Status KvsDriverSpec::ApplyOptions(SpecOptions&& options) {
if (options.recheck_cached_data.specified()) {
staleness.data = StalenessBound(options.recheck_cached_data);
}
if (options.recheck_cached_metadata.specified()) {
staleness.metadata = StalenessBound(options.recheck_cached_metadata);
}
if (options.kvstore.valid()) {
if (store.valid()) {
return absl::InvalidArgumentError("\"kvstore\" is already specified");
}
store = std::move(options.kvstore);
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(static_cast<Schema&&>(options)));
return OpenModeSpec::ApplyOptions(options);
}
OpenMode KvsDriverSpec::open_mode() const {
auto mode = this->OpenModeSpec::open_mode();
return (mode == OpenMode{}) ? OpenMode::open : mode;
}
kvstore::Spec KvsDriverSpec::GetKvstore() const { return store; }
KvStore KvsMetadataDriverBase::GetKvstore(const Transaction& transaction) {
auto* cache = this->cache();
auto* metadata_cache = cache->metadata_cache();
return KvStore{kvstore::DriverPtr(metadata_cache->base_store()),
cache->GetBaseKvstorePath(), transaction};
}
namespace {
Result<size_t> ValidateOpenRequest(OpenState* state, const void* metadata) {
auto& base = *(PrivateOpenState*)state;
if (!metadata) {
return absl::NotFoundError(
GetMetadataMissingErrorMessage(base.metadata_cache_entry_.get()));
}
return state->GetComponentIndex(metadata, base.spec_->open_mode());
}
Result<internal::Driver::Handle> CreateTensorStoreFromMetadata(
OpenState::Ptr state, std::shared_ptr<const void> metadata,
size_t component_index) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "CreateTensorStoreFromMetadata: state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto read_write_mode = state->GetReadWriteMode(metadata.get());
if (base.read_write_mode_ != ReadWriteMode::dynamic) {
TENSORSTORE_RETURN_IF_ERROR(internal::ValidateSupportsModes(
read_write_mode, base.read_write_mode_));
read_write_mode = base.read_write_mode_;
}
std::string chunk_cache_identifier;
bool data_cache_uses_metadata_cache_pool =
state->DataCacheUsesMetadataCachePool(metadata.get());
if (!base.metadata_cache_key_.empty()) {
auto data_cache_key = state->GetDataCacheKey(metadata.get());
if (!data_cache_key.empty()) {
internal::EncodeCacheKey(&chunk_cache_identifier, data_cache_key,
base.metadata_cache_entry_.get(),
state->cache_pool()->get());
}
}
absl::Status data_key_value_store_status;
const auto& state_ref = *state;
auto data_cache = internal::GetCacheWithExplicitTypeInfo<DataCacheBase>(
(data_cache_uses_metadata_cache_pool
? GetOwningCache(*base.metadata_cache_entry_).pool()
: state->cache_pool()->get()),
typeid(state_ref), chunk_cache_identifier,
[&]() -> std::unique_ptr<DataCacheBase> {
auto store_result = state->GetDataKeyValueStore(
GetOwningCache(*base.metadata_cache_entry_).base_store_,
metadata.get());
if (!store_result) {
data_key_value_store_status = std::move(store_result).status();
return nullptr;
}
DataCacheInitializer initializer;
initializer.store = *std::move(store_result);
initializer.metadata_cache_entry = base.metadata_cache_entry_;
initializer.metadata = metadata;
initializer.cache_pool = state->cache_pool();
return state->GetDataCache(std::move(initializer));
});
TENSORSTORE_RETURN_IF_ERROR(data_key_value_store_status);
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_transform,
GetInitialTransform(data_cache.get(), metadata.get(), component_index));
if (base.transaction_ &&
!(base.spec_->assume_metadata || base.spec_->assume_cached_metadata)) {
data_cache->metadata_cache_entry_
->RequestAtomicUpdate(
base.transaction_,
[data_cache = data_cache, transform = new_transform,
component_index](
const MetadataCache::MetadataPtr& existing_metadata)
-> Result<MetadataCache::MetadataPtr> {
TENSORSTORE_RETURN_IF_ERROR(ValidateNewMetadata(
data_cache.get(), existing_metadata.get()));
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_transform,
GetInitialTransform(data_cache.get(), existing_metadata.get(),
component_index));
if (transform != new_transform) {
return absl::AbortedError("Metadata is inconsistent");
}
return existing_metadata;
},
AtomicUpdateConstraint::kRequireExisting)
.IgnoreFuture();
}
DriverInitializer initializer;
initializer.cache = std::move(data_cache);
initializer.component_index = component_index;
initializer.data_staleness_bound =
base.spec_->staleness.data.BoundAtOpen(base.request_time_);
internal::ReadWritePtr<KvsMetadataDriverBase> driver(
state->AllocateDriver(std::move(initializer)), read_write_mode);
driver->metadata_staleness_bound_ =
base.spec_->staleness.metadata.BoundAtOpen(base.request_time_);
if (base.spec_->assume_metadata || base.spec_->assume_cached_metadata) {
driver->assumed_metadata_ = metadata;
driver->assumed_metadata_time_ = base.spec_->assume_cached_metadata
? base.request_time_
: absl::InfiniteFuture();
}
return internal::Driver::Handle{
std::move(driver), std::move(new_transform),
internal::TransactionState::ToTransaction(std::move(base.transaction_))};
}
struct HandleWroteMetadata {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise,
ReadyFuture<const void> future) {
auto& base = *(PrivateOpenState*)state.get();
auto& result = future.result();
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "HandleWroteMetadata: state=" << state.get()
<< ", status=" << result.status();
if (!result) {
if (result.status().code() != absl::StatusCode::kAlreadyExists ||
!base.spec_->open) {
promise.SetResult(result.status());
return;
}
}
promise.SetResult([&]() -> Result<internal::Driver::Handle> {
TENSORSTORE_ASSIGN_OR_RETURN(
auto metadata,
base.metadata_cache_entry_->GetMetadata(base.transaction_));
return state->CreateDriverHandleFromMetadata(std::move(metadata));
}());
}
};
void CreateMetadata(MetadataOpenState::Ptr state,
Promise<internal::Driver::Handle> promise) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "CreateMetadata: state=" << state.get();
auto state_ptr = state.get();
auto& base = *(PrivateOpenState*)state.get();
internal::OpenTransactionPtr transaction = base.transaction_;
auto state_copy = state;
Link(WithExecutor(state_ptr->executor(),
HandleWroteMetadata{std::move(state)}),
std::move(promise),
base.metadata_cache_entry_->RequestAtomicUpdate(
transaction,
[state = std::move(state_copy)](
const MetadataCache::MetadataPtr& existing_metadata)
-> Result<MetadataCache::MetadataPtr> {
return state->Create(existing_metadata.get(), {});
},
state_ptr->GetCreateConstraint(), base.request_time_));
}
struct HandleReadMetadata {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise,
ReadyFuture<const void> metadata_future) {
auto& base = *(PrivateOpenState*)state.get();
std::shared_ptr<const void> metadata;
if (auto result =
base.metadata_cache_entry_->GetMetadata(base.transaction_);
result.ok()) {
metadata = *std::move(result);
} else {
promise.SetResult(std::move(result).status());
return;
}
auto handle_result = state->CreateDriverHandleFromMetadata(metadata);
if (handle_result) {
promise.SetResult(std::move(handle_result));
return;
}
if (handle_result.status().code() == absl::StatusCode::kNotFound) {
if (base.spec_->create) {
CreateMetadata(std::move(state), std::move(promise));
return;
}
}
promise.SetResult(std::move(handle_result).status());
}
};
struct GetMetadataForOpen {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "GetMetadataForOpen: state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto state_ptr = state.get();
auto batch = std::move(base.batch_);
if (base.spec_->open) {
if (base.spec_->assume_metadata || base.spec_->assume_cached_metadata) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto metadata, state->Create(nullptr, {true}),
static_cast<void>(promise.SetResult(_)));
promise.SetResult(
state->CreateDriverHandleFromMetadata(std::move(metadata)));
return;
}
LinkValue(
WithExecutor(state_ptr->executor(),
HandleReadMetadata{std::move(state)}),
std::move(promise),
base.metadata_cache_entry_->Read(
{base.spec_->staleness.metadata.BoundAtOpen(base.request_time_)
.time,
batch}));
return;
}
assert(base.spec_->create);
CreateMetadata(std::move(state), std::move(promise));
}
};
struct HandleKeyValueStoreReady {
MetadataOpenState::Ptr state;
void operator()(Promise<internal::Driver::Handle> promise,
ReadyFuture<const void> store) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "Metadata kvstore ready: state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto* state_ptr = state.get();
if (base.spec_->delete_existing) {
KeyRange range_to_delete =
KeyRange::Prefix(state->GetPrefixForDeleteExisting());
auto* kvstore =
GetOwningCache(*base.metadata_cache_entry_).base_store_.get();
if (!base.transaction_) {
LinkValue(std::bind(WithExecutor(state_ptr->executor(),
GetMetadataForOpen{std::move(state)}),
std::placeholders::_1),
std::move(promise),
kvstore->DeleteRange(std::move(range_to_delete)));
return;
}
if (auto status = kvstore->TransactionalDeleteRange(
base.transaction_, std::move(range_to_delete));
!status.ok()) {
promise.SetResult(status);
return;
}
base.transaction_->Barrier();
}
GetMetadataForOpen{std::move(state)}(std::move(promise));
}
};
}
Future<const void> MetadataCache::Entry::RequestAtomicUpdate(
const internal::OpenTransactionPtr& transaction, UpdateFunction update,
AtomicUpdateConstraint update_constraint,
std::optional<absl::Time> read_time) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*this, transaction));
node->updated_metadata_base_state_ =
internal::UnownedToShared(&invalid_metadata);
node->updated_metadata_ = nullptr;
if (node->transaction()->implicit_transaction()) {
auto [promise, future] = PromiseFuturePair<void>::Make();
node->AddPendingWrite(
PendingWrite{std::move(update), update_constraint, promise});
LinkError(std::move(promise), node.unlock()->transaction()->future());
return std::move(future);
}
node->AddPendingWrite(PendingWrite{std::move(update), update_constraint});
if (read_time) {
return node->Read({*read_time});
}
return MakeReadyFuture();
}
Result<MetadataCache::MetadataPtr> MetadataCache::Entry::GetMetadata(
internal::OpenTransactionPtr transaction) {
if (!transaction) return GetMetadata();
TENSORSTORE_ASSIGN_OR_RETURN(auto node,
GetTransactionNode(*this, transaction));
TENSORSTORE_ASSIGN_OR_RETURN(auto metadata, node->GetUpdatedMetadata(),
this->AnnotateError(_, false));
return metadata;
}
Result<MetadataCache::MetadataPtr>
MetadataCache::TransactionNode::GetUpdatedMetadata(MetadataPtr metadata) {
UniqueWriterLock lock(*this);
if (this->updated_metadata_base_state_ == metadata) {
return this->updated_metadata_;
}
this->updated_metadata_base_state_ = metadata;
for (const auto& request : this->pending_writes) {
auto result = request.update(metadata);
if (result) {
assert(*result);
assert(request.update_constraint !=
AtomicUpdateConstraint::kRequireMissing ||
metadata == nullptr);
assert(request.update_constraint !=
AtomicUpdateConstraint::kRequireExisting ||
metadata != nullptr);
metadata = std::move(*result);
if (!request.promise.null()) {
request.promise.raw_result() = MakeResult();
}
} else {
if (!request.promise.null()) {
request.promise.raw_result() = GetOwningEntry(*this).AnnotateError(
result.status(), false);
} else {
this->updated_metadata_ = result.status();
return std::move(result).status();
}
}
}
this->updated_metadata_ = metadata;
return metadata;
}
Result<MetadataCache::MetadataPtr>
MetadataCache::TransactionNode::GetUpdatedMetadata() {
auto metadata = ReadLock<void>(*this).shared_data();
return GetUpdatedMetadata(std::move(metadata));
}
void MetadataCache::Entry::DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) {
GetOwningCache(*this).executor()([this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
MetadataPtr new_metadata;
if (value) {
if (auto result = GetOwningCache(*this).DecodeMetadata(this->key(),
*std::move(value));
result.ok()) {
new_metadata = *std::move(result);
} else {
execution::set_error(
receiver, internal::ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(receiver, std::move(new_metadata));
});
}
std::string MetadataCache::Entry::GetKeyValueStoreKey() {
return GetOwningCache(*this).GetMetadataStorageKey(this->key());
}
void MetadataCache::TransactionNode::DoApply(ApplyOptions options,
ApplyReceiver receiver) {
if (this->pending_writes.empty() &&
options.apply_mode != ApplyOptions::kSpecifyUnchanged) {
execution::set_value(
receiver, ReadState{{}, TimestampedStorageGeneration::Unconditional()});
return;
}
auto continuation = [this, receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
return execution::set_error(receiver, future.result().status());
}
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Apply metadata";
auto read_state = AsyncCache::ReadLock<void>(*this).read_state();
std::shared_ptr<const void> new_data;
if (auto result = this->GetUpdatedMetadata(read_state.data); result.ok()) {
new_data = *std::move(result);
} else {
execution::set_error(receiver, std::move(result).status());
return;
}
if (new_data != read_state.data) {
read_state.stamp.generation.MarkDirty();
read_state.data = std::move(new_data);
}
execution::set_value(receiver, std::move(read_state));
};
this->Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(GetOwningCache(*this).executor(),
std::move(continuation)));
}
void MetadataCache::TransactionNode::InvalidateReadState() {
Base::TransactionNode::InvalidateReadState();
this->updated_metadata_base_state_ =
internal::UnownedToShared(&invalid_metadata);
this->updated_metadata_ = nullptr;
}
void MetadataCache::Entry::DoEncode(std::shared_ptr<const void> data,
EncodeReceiver receiver) {
ABSL_LOG_IF(INFO, TENSORSTORE_ASYNC_CACHE_DEBUG)
<< *this << "Encoding metadata";
auto& entry = GetOwningEntry(*this);
auto& cache = GetOwningCache(entry);
if (auto encoded_result = cache.EncodeMetadata(entry.key(), data.get());
encoded_result.ok()) {
execution::set_value(receiver, *std::move(encoded_result));
} else {
execution::set_error(receiver, std::move(encoded_result).status());
}
}
Future<const void> DataCache::DeleteCell(
span<const Index> grid_cell_indices,
internal::OpenTransactionPtr transaction) {
return internal::ChunkCache::DeleteCell(grid_cell_indices,
std::move(transaction));
}
namespace {
internal::CachePtr<MetadataCache> GetOrCreateMetadataCache(
MetadataOpenState* state) {
auto& base = *(PrivateOpenState*)state;
auto& spec = *base.spec_;
internal::EncodeCacheKey(&base.metadata_cache_key_, spec.store.driver,
typeid(*state), state->GetMetadataCacheKey());
return internal::GetOrCreateAsyncInitializedCache<MetadataCache>(
state->metadata_cache_pool()->get(), base.metadata_cache_key_,
[&] {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "Creating metadata cache: open_state=" << state;
return state->GetMetadataCache(
{base.spec_->data_copy_concurrency, state->metadata_cache_pool()});
},
[&](Promise<void> initialized,
internal::CachePtr<MetadataCache> metadata_cache) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "Opening metadata kvstore: open_state=" << state;
LinkValue(
[state = MetadataOpenState::Ptr(state),
metadata_cache = std::move(metadata_cache)](
Promise<void> metadata_cache_promise,
ReadyFuture<kvstore::DriverPtr> future) {
metadata_cache->base_store_ = *future.result();
if (auto result = state->GetMetadataKeyValueStore(
metadata_cache->base_store_);
result.ok()) {
metadata_cache->SetKvStoreDriver(*std::move(result));
} else {
metadata_cache_promise.SetResult(std::move(result).status());
}
},
initialized, kvstore::Open(spec.store.driver));
});
}
}
Result<internal::Driver::Handle> OpenState::CreateDriverHandleFromMetadata(
std::shared_ptr<const void> metadata) {
TENSORSTORE_ASSIGN_OR_RETURN(size_t component_index,
ValidateOpenRequest(this, metadata.get()));
return CreateTensorStoreFromMetadata(OpenState::Ptr(this),
std::move(metadata), component_index);
}
Future<internal::Driver::Handle> OpenDriver(MetadataOpenState::Ptr state) {
ABSL_LOG_IF(INFO, TENSORSTORE_KVS_DRIVER_DEBUG)
<< "OpenDriver: open_state=" << state.get();
auto& base = *(PrivateOpenState*)state.get();
auto& spec = *base.spec_;
TENSORSTORE_RETURN_IF_ERROR(
spec.OpenModeSpec::Validate(base.read_write_mode_));
if (!spec.store.valid()) {
return absl::InvalidArgumentError("\"kvstore\" must be specified");
}
auto* state_ptr = state.get();
auto metadata_cache = GetOrCreateMetadataCache(state_ptr);
base.metadata_cache_entry_ =
GetCacheEntry(metadata_cache, state->GetMetadataCacheEntryKey());
return PromiseFuturePair<internal::Driver::Handle>::LinkValue(
HandleKeyValueStoreReady{std::move(state)},
metadata_cache->initialized_)
.future;
}
Result<IndexTransform<>> ResolveBoundsFromMetadata(
DataCacheBase* data_cache, const void* new_metadata, size_t component_index,
IndexTransform<> transform, ResolveBoundsOptions options) {
DimensionSet base_implicit_lower_bounds;
DimensionSet base_implicit_upper_bounds;
Box<dynamic_rank(kMaxRank)> base_bounds;
data_cache->GetComponentBounds(new_metadata, component_index, base_bounds,
base_implicit_lower_bounds,
base_implicit_upper_bounds);
if ((options.mode & fix_resizable_bounds) == fix_resizable_bounds) {
base_implicit_lower_bounds = false;
base_implicit_upper_bounds = false;
}
return PropagateBoundsToTransform(
BoxView<>(base_bounds), base_implicit_lower_bounds,
base_implicit_upper_bounds, std::move(transform));
}
absl::Status ValidateResizeConstraints(
BoxView<> current_domain, span<const Index> new_inclusive_min,
span<const Index> new_exclusive_max,
span<const Index> inclusive_min_constraint,
span<const Index> exclusive_max_constraint, bool expand_only,
bool shrink_only) {
TENSORSTORE_RETURN_IF_ERROR(ValidateResizeDomainConstraint(
current_domain, inclusive_min_constraint, exclusive_max_constraint));
TENSORSTORE_RETURN_IF_ERROR(ValidateExpandShrinkConstraints(
current_domain, new_inclusive_min, new_exclusive_max, expand_only,
shrink_only));
return absl::OkStatus();
}
Result<ResizeParameters> GetResizeParameters(
ChunkedDataCacheBase* data_cache, const void* metadata,
size_t component_index, IndexTransformView<> transform,
span<const Index> inclusive_min, span<const Index> exclusive_max,
ResizeOptions options, TransactionMode transaction_mode) {
assert(transform.input_rank() == inclusive_min.size());
assert(transform.input_rank() == exclusive_max.size());
const DimensionIndex output_rank = transform.output_rank();
DimensionSet base_implicit_lower_bounds;
DimensionSet base_implicit_upper_bounds;
Box<dynamic_rank(kMaxRank)> base_bounds;
data_cache->GetComponentBounds(metadata, component_index, base_bounds,
base_implicit_lower_bounds,
base_implicit_upper_bounds);
const auto& grid = data_cache->grid();
const DimensionIndex grid_rank = grid.grid_rank();
Index new_output_inclusive_min[kMaxRank];
Index new_output_exclusive_max[kMaxRank];
Index output_inclusive_min_constraint[kMaxRank];
Index output_exclusive_max_constraint[kMaxRank];
bool is_noop;
TENSORSTORE_RETURN_IF_ERROR(PropagateInputDomainResizeToOutput(
transform, inclusive_min, exclusive_max,
(options.mode & resize_tied_bounds) ==
resize_tied_bounds,
{&output_inclusive_min_constraint[0], output_rank},
{&output_exclusive_max_constraint[0], output_rank},
{&new_output_inclusive_min[0], output_rank},
{&new_output_exclusive_max[0], output_rank}, &is_noop));
if (is_noop) return absl::AbortedError("");
if (grid.components.size() != 1 && !(options.mode & resize_tied_bounds)) {
return absl::FailedPreconditionError(
"Resize operation would affect other fields but "
"`resize_tied_bounds` was not specified");
}
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const IndexInterval dim_bounds = base_bounds[output_dim];
if (!base_implicit_lower_bounds[output_dim]) {
const Index min_constraint = output_inclusive_min_constraint[output_dim];
if (!ImplicitOrEqual(min_constraint, dim_bounds.inclusive_min())) {
return ShapeConstraintError(output_dim, dim_bounds.inclusive_min(),
min_constraint);
}
const Index new_inclusive_min = new_output_inclusive_min[output_dim];
if (!ImplicitOrEqual(new_inclusive_min, dim_bounds.inclusive_min())) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Cannot change inclusive lower bound of output dimension ",
output_dim, ", which is fixed at ", dim_bounds.inclusive_min(),
", to ", new_inclusive_min));
}
}
if (!base_implicit_upper_bounds[output_dim]) {
const Index max_constraint = output_exclusive_max_constraint[output_dim];
if (!ImplicitOrEqual(max_constraint, dim_bounds.exclusive_max())) {
return ShapeConstraintError(output_dim, max_constraint,
dim_bounds.exclusive_max());
}
const Index new_exclusive_max = new_output_exclusive_max[output_dim];
if (!ImplicitOrEqual(new_exclusive_max, dim_bounds.exclusive_max())) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Cannot change exclusive upper bound of output dimension ",
output_dim, ", which is fixed at ", dim_bounds.exclusive_max(),
", to ", new_exclusive_max));
}
}
if (transaction_mode == TransactionMode::atomic_isolated &&
!(options.mode & resize_metadata_only) &&
!(options.mode & expand_only)) {
output_inclusive_min_constraint[output_dim] = dim_bounds.inclusive_min();
output_exclusive_max_constraint[output_dim] = dim_bounds.exclusive_max();
}
}
span<const DimensionIndex> chunked_to_cell_dimensions =
grid.components[component_index].chunked_to_cell_dimensions;
std::vector<Index> new_grid_inclusive_min(grid_rank);
std::vector<Index> new_grid_exclusive_max(grid_rank);
std::vector<Index> grid_inclusive_min_constraint(grid_rank);
std::vector<Index> grid_exclusive_max_constraint(grid_rank);
for (DimensionIndex i = 0; i < grid_rank; ++i) {
const DimensionIndex j = chunked_to_cell_dimensions[i];
new_grid_inclusive_min[i] = new_output_inclusive_min[j];
new_grid_exclusive_max[i] = new_output_exclusive_max[j];
grid_inclusive_min_constraint[i] = output_inclusive_min_constraint[j];
grid_exclusive_max_constraint[i] = output_exclusive_max_constraint[j];
}
return ResizeParameters{
new_grid_inclusive_min,
new_grid_exclusive_max,
grid_inclusive_min_constraint,
grid_exclusive_max_constraint,
(options.mode & expand_only) == expand_only,
(options.mode & shrink_only) == shrink_only};
}
void KvsMetadataDriverBase::GarbageCollectionBase::Visit(
garbage_collection::GarbageCollectionVisitor& visitor,
const KvsMetadataDriverBase& value) {
auto* cache = value.cache();
auto* metadata_cache = cache->metadata_cache();
garbage_collection::GarbageCollectionVisit(visitor,
*metadata_cache->base_store());
}
namespace jb = tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_BINDER(
SpecJsonBinder,
jb::Sequence(
jb::Member(internal::DataCopyConcurrencyResource::id,
jb::Projection<&KvsDriverSpec::data_copy_concurrency>()),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&KvsDriverSpec::cache_pool>()),
jb::Member("metadata_cache_pool",
jb::Projection<&KvsDriverSpec::metadata_cache_pool>()),
jb::Projection<&KvsDriverSpec::store>(jb::KvStoreSpecAndPathJsonBinder),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->store.path);
return absl::OkStatus();
}),
jb::Projection<&KvsDriverSpec::staleness>(jb::Sequence(
jb::Member("recheck_cached_metadata",
jb::Projection(&StalenessBounds::metadata,
jb::DefaultValue([](auto* obj) {
obj->bounded_by_open_time = true;
}))),
jb::Member("recheck_cached_data",
jb::Projection(&StalenessBounds::data,
jb::DefaultInitializedValue())))),
internal::OpenModeSpecJsonBinder));
}
} | #include "tensorstore/driver/kvs_backed_chunk_driver.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/driver/kvs_backed_chunk_driver_impl.h"
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::Index;
using ::tensorstore::kImplicit;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_kvs_backed_chunk_driver::
ValidateResizeConstraints;
using ISpan = ::tensorstore::span<const Index>;
TEST(ValidateResizeConstraintsTest, Success) {
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({4, 6}),
ISpan({0, 0}),
ISpan({4, kImplicit}),
false,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
false));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 3}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
true));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 5}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
true));
EXPECT_EQ(absl::OkStatus(),
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 5}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
true));
}
TEST(ValidateResizeConstraintsTest, Failure) {
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({5, kImplicit}),
false,
false),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Resize operation would also affect output dimension 0 "
"over the out-of-bounds interval \\[4, 5\\)"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({3, kImplicit}),
false,
false),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would also affect output dimension 0 over the "
"interval \\[3, 4\\) but `resize_tied_bounds` was not specified"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 6}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
false,
true),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would expand output dimension 1 from "
"\\[0, 5\\) to \\[0, 6\\) but `shrink_only` was specified"));
EXPECT_THAT(
ValidateResizeConstraints(
Box({0, 0}, {4, 5}),
ISpan({kImplicit, kImplicit}),
ISpan({kImplicit, 4}),
ISpan({0, 0}),
ISpan({kImplicit, kImplicit}),
true,
false),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Resize operation would shrink output dimension 1 from "
"\\[0, 5\\) to \\[0, 4\\) but `expand_only` was specified"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/kvs_backed_chunk_driver.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/kvs_backed_chunk_driver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
aacfa9f6-1622-4f49-9337-a55a4bb65fda | cpp | tensorflow/tensorflow | sort | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/sort.cc | third_party/xla/xla/tests/sort_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/sort.h"
#include <cstdint>
#include "llvm/ADT/ilist.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
using OpListType = llvm::iplist<Operation>;
template <typename ReturnOpType>
bool MatchTopKComparator(Region& comparator) {
if (!comparator.hasOneBlock()) return false;
Block& comparator_blk = comparator.front();
OpListType& operations = comparator_blk.getOperations();
if (operations.size() != 2) return false;
auto compare_op =
llvm::dyn_cast_or_null<mhlo::CompareOp>(&operations.front());
auto return_op = llvm::dyn_cast_or_null<ReturnOpType>(&operations.back());
if (!compare_op || !return_op) return false;
if (compare_op.getComparisonDirection() != mhlo::ComparisonDirection::GT) {
return false;
}
if (compare_op.getOperands()[0] != comparator_blk.getArgument(0) ||
compare_op.getOperands()[1] != comparator_blk.getArgument(1)) {
return false;
}
return return_op.getOperands().front() == compare_op.getResult();
}
bool IsSortOpNotTopK(mhlo::SortOp op) {
if (op->getNumOperands() != 2) {
return true;
}
auto keys_opr = op.getInputs().front();
auto keys_type = llvm::cast<ShapedType>(keys_opr.getType());
if (!keys_type.hasStaticShape() ||
!keys_type.getElementType().isIntOrFloat()) {
return true;
}
auto indices_opr = op.getInputs().back();
auto indices_type = llvm::cast<ShapedType>(indices_opr.getType());
if (!indices_type.hasStaticShape() ||
!indices_type.getElementType().isInteger(32)) {
return true;
}
const int64_t sort_dim = op.getDimension();
const auto k = indices_type.getDimSize(sort_dim);
const auto rank = keys_type.getRank();
if (sort_dim != rank - 1 || k < 1) {
return true;
}
OpBuilder b(op->getContext());
if (!MatchIota(b.getI64TensorAttr({sort_dim}), indices_opr)) {
return true;
}
if (!MatchTopKComparator<mhlo::ReturnOp>(op.getComparator())) {
return true;
}
return false;
}
class LegalizeSortOp : public OpConversionPattern<mhlo::SortOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::SortOp sort_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeSortOp::matchAndRewrite(
mhlo::SortOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsSortOpNotTopK(op)) {
return failure();
}
auto keys = op.getInputs().front();
auto indices = op.getInputs().back();
auto indices_type = llvm::cast<ShapedType>(indices.getType());
const int32_t k = indices_type.getShape().back();
auto k_cst_attr = DenseIntElementsAttr::get(
RankedTensorType::get({}, rewriter.getI32Type()), k);
auto k_cst = rewriter.create<arith::ConstantOp>(op->getLoc(), k_cst_attr);
rewriter.replaceOpWithNewOp<TFL::TopKV2Op>(op, keys.getType(),
indices.getType(), keys, k_cst);
return success();
}
}
void PopulateSortPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeSortOp>(ctx);
target.addDynamicallyLegalOp<mhlo::SortOp>(IsSortOpNotTopK);
}
} | #include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "xla/error_spec.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace {
class SortTest : public HloTestBase {};
XLA_TEST_F(SortTest, SortDim0) {
std::string_view hlo_text_module = R"(
HloModule sort
compare {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
x = f32[32,64] parameter(0)
ROOT sort = f32[32,64] sort(x), dimensions={0}, to_apply=compare
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
XLA_TEST_F(SortTest, SortDim1) {
std::string_view hlo_text_module = R"(
HloModule sort
compare {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
x = f32[32,64] parameter(0)
ROOT sort = f32[32,64] sort(x), dimensions={1}, to_apply=compare
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
XLA_TEST_F(SortTest, SortTwiceWithSameComparator) {
std::string_view hlo_text_module = R"(
HloModule sort
compare {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
x = f32[32,64] parameter(0)
y = f32[64,32] parameter(1)
sort_x = f32[32,64] sort(x), dimensions={0}, to_apply=compare
sort_y = f32[64,32] sort(y), dimensions={1}, to_apply=compare
ROOT tuple = (f32[32,64], f32[64,32]) tuple(sort_x, sort_y)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
class SortManyInputsTest : public SortTest,
public ::testing::WithParamInterface<int> {
public:
static std::string Name(const ::testing::TestParamInfo<int>& info) {
auto num_inputs = info.param;
return absl::StrFormat("Sort%dInputs", num_inputs);
}
};
XLA_TEST_P(SortManyInputsTest, SortManyInputs) {
int num_inputs = GetParam();
std::string_view hlo_text_module_template = R"(
HloModule sort
compare {
${COMPARE_DECLARATIONS}
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
${SORT_DECLARATIONS}
ROOT sort = (${SORT_SHAPE}) sort(${SORT_PARAMS}), dimensions={0},
to_apply=compare
}
)";
std::string sort_decls = "";
std::vector<std::string> param_names;
param_names.reserve(num_inputs * 2);
for (int i = 0; i < num_inputs; ++i) {
sort_decls += absl::StrFormat("p%d = f32[32,64] parameter(%d)\n", i, i);
param_names.emplace_back(absl::StrCat("p", i));
}
std::string sort_params = absl::StrJoin(param_names, ", ");
std::string sort_shape =
absl::StrJoin(std::vector<std::string>(num_inputs, "f32[32,64]"), ",");
std::string compare_decls = "";
for (int i = 0; i < num_inputs * 2; ++i) {
compare_decls += absl::StrFormat("p%d = f32[] parameter(%d)\n", i, i);
}
std::string compare_params = absl::StrJoin(param_names, ", ");
std::string hlo_text_module = absl::StrReplaceAll(
hlo_text_module_template, {{"${SORT_DECLARATIONS}", sort_decls},
{"${SORT_SHAPE}", sort_shape},
{"${SORT_PARAMS}", sort_params},
{"${COMPARE_DECLARATIONS}", compare_decls}});
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
INSTANTIATE_TEST_SUITE_P(ManyInputs, SortManyInputsTest,
::testing::Values(17, 20), SortManyInputsTest::Name);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/sort.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/sort_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f058373b-e476-464a-856c-33de8c061930 | cpp | tensorflow/tensorflow | activations | tensorflow/lite/kernels/activations.cc | tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc | #include <stddef.h>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/leaky_relu.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/gelu.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h"
#include "tensorflow/lite/kernels/internal/reference/logistic.h"
#include "tensorflow/lite/kernels/internal/reference/prelu.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/softmax.h"
#include "tensorflow/lite/kernels/internal/reference/tanh.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace activations {
enum KernelType {
kReference,
kGenericOptimized,
kFixedPointOptimized,
};
struct OpData {
int32_t input_multiplier = 0;
int input_left_shift = 0;
int32_t input_range_radius = 0;
int diff_min = 0;
union {
uint8_t lut_uint8[LUTSize<uint8_t>()];
int8_t lut_int8[LUTSize<int8_t>()];
int16_t lut_int16[LUTSize<int16_t>()];
};
};
struct SoftmaxOpData {
struct SoftmaxParams params = {};
float table[256];
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
uint8_t uint8_table1[256];
uint8_t uint8_table2[256];
#endif
static constexpr int kInt16LUTArraySize = LUTSize<int16_t>();
int16_t exp_lut[kInt16LUTArraySize];
int16_t one_over_one_plus_x_lut[kInt16LUTArraySize];
};
struct LogSoftmaxOpData : public OpData {
int32_t reverse_scaling_divisor = 0;
int32_t reverse_scaling_right_shift = 0;
struct SoftmaxParams params = {};
float f_table[256];
};
struct LeakyReluOpData : public OpData {
int32_t output_multiplier_alpha = 0;
int32_t output_shift_alpha = 0;
int32_t output_multiplier_identity = 0;
int32_t output_shift_identity = 0;
};
struct PreluOpData : public OpData {
int32_t output_multiplier_1 = 0;
int32_t output_shift_1 = 0;
int32_t output_multiplier_2 = 0;
int32_t output_shift_2 = 0;
bool requires_broadcast;
};
struct HardSwishData {
HardSwishParams params;
};
struct ReluOpData : public OpData {
int32_t output_multiplier = 0;
int output_shift = 0;
};
namespace {
template <typename T>
void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input,
TfLiteTensor* output, const ReluOpData* data) {
ReluParams params;
params.quantized_activation_min =
std::max(static_cast<int32_t>(std::numeric_limits<T>::min()),
output->params.zero_point +
static_cast<int32>(roundf(act_min / output->params.scale)));
params.quantized_activation_max =
act_max == std::numeric_limits<float>::infinity()
? static_cast<int32_t>(std::numeric_limits<T>::max())
: std::min(
static_cast<int32_t>(std::numeric_limits<T>::max()),
output->params.zero_point +
static_cast<int32>(roundf(act_max / output->params.scale)));
params.input_offset = input->params.zero_point;
params.output_offset = output->params.zero_point;
params.output_multiplier = data->output_multiplier;
params.output_shift = data->output_shift;
optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input),
GetTensorShape(output), GetTensorData<T>(output));
}
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) {
return new SoftmaxOpData;
}
void SoftmaxFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<SoftmaxOpData*>(buffer);
}
void* LogSoftmaxInit(TfLiteContext* context, const char* buffer,
size_t length) {
return new LogSoftmaxOpData;
}
void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) {
return new PreluOpData;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
void LogSoftmaxFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<LogSoftmaxOpData*>(buffer);
}
void PreluFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<PreluOpData*>(buffer);
}
void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) {
return new HardSwishData;
}
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) {
return new ReluOpData;
}
void ReluFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<ReluOpData*>(buffer);
}
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) {
ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt16) {
double real_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier, &data->output_multiplier,
&data->output_shift);
}
if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) {
return new LeakyReluOpData;
}
void LeakyReluFree(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<LeakyReluOpData*>(buffer);
}
void HardSwishFree(TfLiteContext* context, void* buffer) {
delete static_cast<HardSwishData*>(buffer);
}
TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_STATUS(GenericPrepare(context, node));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
HardSwishData* data = static_cast<HardSwishData*>(node->user_data);
HardSwishParams* params = &data->params;
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
params->input_zero_point = input->params.zero_point;
params->output_zero_point = output->params.zero_point;
const float input_scale = input->params.scale;
const float hires_input_scale = (1.0f / 128.0f) * input_scale;
const float reluish_scale = 3.0f / 32768.0f;
const float output_scale = output->params.scale;
const float output_multiplier = hires_input_scale / output_scale;
int32_t output_multiplier_fixedpoint_int32;
QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
output_multiplier_fixedpoint_int32,
¶ms->output_multiplier_fixedpoint_int16);
TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0);
const float reluish_multiplier = hires_input_scale / reluish_scale;
int32_t reluish_multiplier_fixedpoint_int32;
QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_exponent);
DownScaleInt32ToInt16Multiplier(
reluish_multiplier_fixedpoint_int32,
¶ms->reluish_multiplier_fixedpoint_int16);
}
return kTfLiteOk;
}
TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data);
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
double alpha_multiplier =
input->params.scale * params->alpha / output->params.scale;
QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha,
&data->output_shift_alpha);
double identity_multiplier = input->params.scale / output->params.scale;
QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity,
&data->output_shift_identity);
}
if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (kernel_type == kFixedPointOptimized) {
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
static constexpr int kInputIntegerBits = 4;
const double input_real_multiplier =
input->params.scale *
static_cast<double>(1 << (15 - kInputIntegerBits));
const double q =
std::frexp(input_real_multiplier, &data->input_left_shift);
auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1LL << 15)));
data->input_multiplier = static_cast<int16_t>(q_fixed);
int16_t input_range_radius =
CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15);
data->input_range_radius = input_range_radius;
}
}
if (kernel_type == kGenericOptimized || kernel_type == kReference) {
if (input->type == kTfLiteUInt8) {
LUTPopulate<uint8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return std::tanh(value); }, data->lut_uint8);
} else if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return std::tanh(value); }, data->lut_int8);
}
}
if (input->type == kTfLiteInt16) {
static constexpr int kInputIntegerBits = 3;
static constexpr int kOutputFractionalBits = 15;
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
int input_scale_log2_rounded;
bool param_scale_pot =
CheckedLog2(input->params.scale, &input_scale_log2_rounded);
data->input_left_shift =
(15 - kInputIntegerBits) + input_scale_log2_rounded;
param_scale_pot &=
(data->input_left_shift == 0 || data->input_left_shift == 1);
if (!param_scale_pot) {
double multiplier = input->params.scale * 4096.0 * 3.0;
data->input_left_shift = 0;
while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) {
data->input_left_shift++;
multiplier = multiplier * 2.0;
}
data->input_multiplier = static_cast<int32_t>(multiplier);
}
int output_scale_log2_rounded;
TF_LITE_ENSURE(
context, CheckedLog2(output->params.scale, &output_scale_log2_rounded));
TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded,
-kOutputFractionalBits);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (kernel_type == kFixedPointOptimized) {
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
std::numeric_limits<uint8_t>::min());
}
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
std::numeric_limits<int8_t>::min());
}
TF_LITE_ENSURE(context, output->params.scale == 1. / 256);
static constexpr int kInputIntegerBits = 4;
const double input_real_multiplier =
input->params.scale *
static_cast<double>(1 << (15 - kInputIntegerBits));
const double q =
std::frexp(input_real_multiplier, &data->input_left_shift);
auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1LL << 15)));
data->input_multiplier = static_cast<int16_t>(q_fixed);
int16_t input_range_radius =
CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15);
data->input_range_radius = input_range_radius;
}
}
if (kernel_type == kGenericOptimized || kernel_type == kReference) {
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE(context, output->params.scale == 1. / 256);
LUTPopulate<uint8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return 1.0f / (1.0f + std::exp(-value)); },
data->lut_uint8);
} else if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE(context, output->params.scale == 1. / 256);
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return 1.0f / (1.0f + std::exp(-value)); },
data->lut_int8);
} else if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE(context, output->params.scale == 1. / 32768);
TF_LITE_ENSURE(context, output->params.zero_point == 0);
}
}
if (input->type == kTfLiteInt16) {
static constexpr int kInputIntegerBits = 3;
static constexpr int kOutputFractionalBits = 15;
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
int input_scale_log2_rounded;
bool param_scale_pot =
CheckedLog2(input->params.scale, &input_scale_log2_rounded);
data->input_left_shift =
(15 - kInputIntegerBits) + input_scale_log2_rounded;
param_scale_pot &= (data->input_left_shift == 0);
if (!param_scale_pot) {
double multiplier = input->params.scale * 4096.0 * 3.0;
data->input_left_shift = 0;
while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) {
data->input_left_shift++;
multiplier = multiplier * 2.0;
}
data->input_multiplier = static_cast<int32_t>(multiplier);
}
int output_scale_log2_rounded;
TF_LITE_ENSURE(
context, CheckedLog2(output->params.scale, &output_scale_log2_rounded));
TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded,
-kOutputFractionalBits);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data);
SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE(context, NumDimensions(input) >= 1);
if (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128);
TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 256,
(0.001f * 1.f / 256));
} else if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768,
(0.001f * 1.f / 32768));
}
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
if (kernel_type == kReference) {
const int kScaledDiffIntegerBits = 5;
int input_left_shift;
tflite::PreprocessSoftmaxScaling(
static_cast<double>(params->beta),
static_cast<double>(input->params.scale), kScaledDiffIntegerBits,
&data->params.input_multiplier, &input_left_shift);
data->params.input_left_shift = input_left_shift;
data->params.diff_min =
-1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits,
input_left_shift);
} else {
switch (output->type) {
case kTfLiteUInt8:
case kTfLiteInt8:
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
data->params.uint8_table1 = data->uint8_table1;
data->params.uint8_table2 = data->uint8_table2;
optimized_ops::PopulateSoftmaxUInt8LookupTable(
&data->params, input->params.scale, params->beta);
break;
#endif
case kTfLiteInt16:
default:
data->params.table = data->table;
optimized_ops::PopulateSoftmaxLookupTable(
&data->params, input->params.scale, params->beta);
}
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
}
} else if (input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
const int32_t range = std::numeric_limits<int16_t>::max() -
std::numeric_limits<int16_t>::min();
data->params.exp_lut = data->exp_lut;
LUTPopulate<int16_t>(
10.0 / range, std::numeric_limits<int16_t>::max(), 2.0 / range, 0,
[](double value) { return std::exp(value); }, data->params.exp_lut);
data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut;
LUTPopulate<int16_t>(
1.0 / range, std::numeric_limits<int16_t>::min(), 2.0 / range, 0,
[](double value) { return 1.0 / (1.0 + value); },
data->params.one_over_one_plus_x_lut);
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
double input_scale_beta_rescale =
input->params.scale * params->beta /
(10.0 / 65535.0);
QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier,
&data->params.input_left_shift);
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
template <KernelType kernel_type>
TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256);
static const double kBeta = 1.0;
if (input->type == kTfLiteUInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255);
}
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127);
}
if (kernel_type == kReference) {
const int kScaledDiffIntegerBits = 5;
int input_left_shift;
int reverse_scaling_right_shift;
tflite::PreprocessLogSoftmaxScalingExp(
kBeta, static_cast<double>(input->params.scale),
kScaledDiffIntegerBits, &data->params.input_multiplier,
&input_left_shift, &data->params.reverse_scaling_divisor,
&reverse_scaling_right_shift);
reverse_scaling_right_shift *= -1;
data->params.input_left_shift = input_left_shift;
data->params.reverse_scaling_right_shift = reverse_scaling_right_shift;
data->params.diff_min = -tflite::CalculateInputRadius(
kScaledDiffIntegerBits, input_left_shift);
} else {
data->params.table = data->f_table;
optimized_ops::PopulateSoftmaxLookupTable(&data->params,
input->params.scale, kBeta);
data->params.zero_point = output->params.zero_point;
data->params.scale = output->params.scale;
}
}
return context->ResizeTensor(context, output,
TfLiteIntArrayCopy(input->dims));
}
TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const TfLiteTensor* alpha;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));
PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data);
TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type);
output->type = input->type;
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) {
double real_multiplier_1 = input->params.scale / output->params.scale;
double real_multiplier_2 =
input->params.scale * alpha->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1,
&data->output_shift_1);
QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2,
&data->output_shift_2);
}
data->requires_broadcast = !HaveSameShapes(input, alpha);
TfLiteIntArray* output_size = nullptr;
TF_LITE_ENSURE_OK(
context, CalculateShapeForBroadcast(context, input, alpha, &output_size));
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, output, output_size));
TF_LITE_ENSURE(context, HaveSameShapes(input, output));
return kTfLiteOk;
}
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} break;
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
case kTfLiteInt16: {
QuantizedReluX<int16_t>(0.0f, std::numeric_limits<float>::infinity(),
input, output, data);
} break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 and int16 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data);
return kTfLiteOk;
}
case kTfLiteInt8: {
QuantizedReluX<int8_t>(-1, 1, input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) {
HardSwishData* data = static_cast<HardSwishData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kReference) {
reference_ops::HardSwish(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::HardSwish(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
return kTfLiteOk;
} break;
case kTfLiteUInt8: {
HardSwishParams& params = data->params;
if (kernel_type == kReference) {
reference_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
} break;
case kTfLiteInt8: {
HardSwishParams& params = data->params;
if (kernel_type == kReference) {
reference_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_ops::HardSwish(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
}
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, uint8 and int8 are supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus Relu0to1Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Relu0To1(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8: {
QuantizedReluX<uint8_t>(0.0f, 1.0f, input, output, data);
return kTfLiteOk;
}
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0, 1, input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
size_t elements = input->bytes / sizeof(float);
const float* in = GetTensorData<float>(input);
const float* in_end = in + elements;
float* out = GetTensorData<float>(output);
for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f);
return kTfLiteOk;
}
case kTfLiteUInt8:
QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data);
return kTfLiteOk;
case kTfLiteInt8: {
QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data);
return kTfLiteOk;
}
case kTfLiteInt16: {
QuantizedReluX<int16_t>(0.0f, 6.0f, input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int8 and int16 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kReference) {
reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
} else {
optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output),
GetTensorData<float>(output));
}
return kTfLiteOk;
} break;
case kTfLiteInt16: {
TanhParams params;
params.input_left_shift = data->input_left_shift;
if (kernel_type == kReference || (data->input_multiplier > 0)) {
reference_integer_ops::Tanh(
data->input_multiplier, data->input_left_shift,
GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
} else {
optimized_ops::Tanh(
params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
}
return kTfLiteOk;
} break;
case kTfLiteUInt8: {
if (kernel_type == kFixedPointOptimized) {
TanhParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Tanh16bitPrecision(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<uint8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_uint8, GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
} break;
case kTfLiteInt8: {
if (kernel_type == kFixedPointOptimized) {
TanhParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Tanh16bitPrecision(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
}
return kTfLiteOk;
} break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int16 and int8 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kReference) {
reference_ops::Logistic(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::Logistic(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
break;
}
case kTfLiteInt16: {
LogisticParams params;
if (kernel_type == kReference || (data->input_multiplier > 0)) {
const int size =
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output));
reference_integer_ops::Logistic(
data->input_multiplier, data->input_left_shift, size,
GetTensorData<int16_t>(input), GetTensorData<int16_t>(output));
} else {
optimized_ops::Logistic(
params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
}
break;
}
case kTfLiteUInt8: {
if (kernel_type == kFixedPointOptimized) {
LogisticParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Logistic16bitPrecision(
params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<uint8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_uint8, GetTensorData<uint8_t>(output));
}
break;
}
case kTfLiteInt8: {
if (kernel_type == kFixedPointOptimized) {
LogisticParams params;
params.input_zero_point = input->params.zero_point;
params.input_range_radius = data->input_range_radius;
params.input_multiplier = data->input_multiplier;
params.input_left_shift = data->input_left_shift;
optimized_ops::Logistic16bitPrecision(
params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8, int16 and int8 are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input,
TfLiteTensor* output, TfLiteSoftmaxParams* params,
KernelType kernel_type = kGenericOptimized) {
SoftmaxParams op_params;
op_params.beta = params->beta;
if (kernel_type == kReference) {
reference_ops::Softmax(op_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(output),
GetTensorData<float>(output));
} else {
optimized_ops::Softmax(op_params, GetTensorShape(input),
GetTensorData<float>(input), GetTensorShape(output),
GetTensorData<float>(output),
CpuBackendContext::GetFromContext(context));
}
return kTfLiteOk;
}
template <typename In, typename Out>
TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input,
TfLiteTensor* output, SoftmaxOpData* data,
KernelType kernel_type = kGenericOptimized) {
if (kernel_type == kReference) {
reference_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<In>(input), GetTensorShape(output),
GetTensorData<Out>(output));
} else {
optimized_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<In>(input), GetTensorShape(output),
GetTensorData<Out>(output));
}
return kTfLiteOk;
}
template <>
TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) {
if (kernel_type == kReference) {
reference_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<int8_t>(input), GetTensorShape(output),
GetTensorData<int8_t>(output));
} else {
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
optimized_ops::SoftmaxInt8LUT(
data->params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<int8_t>(output));
#else
optimized_ops::Softmax(data->params, GetTensorShape(input),
GetTensorData<int8_t>(input), GetTensorShape(output),
GetTensorData<int8_t>(output));
#endif
}
return kTfLiteOk;
}
template <>
TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) {
if (kernel_type == kReference) {
reference_ops::Softmax(
data->params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
optimized_ops::SoftmaxInt8LUT(
data->params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
#else
optimized_ops::Softmax(
data->params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
#endif
}
return kTfLiteOk;
}
template <>
TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) {
if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) {
reference_ops::SoftmaxInt16(
data->params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<int16_t>(output));
return kTfLiteOk;
} else {
TF_LITE_KERNEL_LOG(context,
"Only 1D, 2D, 3D and 4D tensors supported for int16 "
"input with int16 output, got %dD.",
NumDimensions(input));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data);
SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
return SoftmaxFloat(context, input, output, params, kernel_type);
}
case kTfLiteUInt8: {
switch (output->type) {
case kTfLiteUInt8:
return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output,
data, kernel_type);
case kTfLiteInt16:
return SoftmaxQuantized<uint8_t, int16_t>(context, input, output,
data, kernel_type);
default:
TF_LITE_KERNEL_LOG(context,
"Only uint8_t and int16_t outputs are supported "
"with uint8_t inputs currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
case kTfLiteInt8: {
switch (output->type) {
case kTfLiteInt8:
return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data,
kernel_type);
case kTfLiteInt16:
return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data,
kernel_type);
default:
TF_LITE_KERNEL_LOG(context,
"Only int8_t and int16_t outputs are supported "
"with int8_t inputs currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
case kTfLiteInt16: {
return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data,
kernel_type);
}
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8_t, Int8_t, Int16_t are supported "
"currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type>
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
const LogSoftmaxOpData* data =
reinterpret_cast<LogSoftmaxOpData*>(node->user_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
SoftmaxParams op_params;
if (kernel_type == kGenericOptimized) {
optimized_ops::LogSoftmax(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
reference_ops::LogSoftmax(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
return kTfLiteOk;
}
case kTfLiteUInt8: {
const SoftmaxParams& op_params = data->params;
if (kernel_type == kGenericOptimized) {
optimized_ops::LogSoftmax(
op_params, input->params.scale, GetTensorShape(input),
GetTensorData<uint8_t>(input), GetTensorShape(output),
GetTensorData<uint8_t>(output));
} else {
reference_ops::LogSoftmax(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
}
case kTfLiteInt8: {
const SoftmaxParams& op_params = data->params;
if (kernel_type == kGenericOptimized) {
optimized_ops::LogSoftmax(
op_params, input->params.scale, GetTensorShape(input),
GetTensorData<int8_t>(input), GetTensorShape(output),
GetTensorData<int8_t>(output));
} else {
const auto input_shape = GetTensorShape(input);
const auto output_shape = GetTensorShape(output);
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
reference_integer_ops::LogSoftmax(
op_params.input_multiplier, op_params.input_left_shift,
op_params.reverse_scaling_divisor,
op_params.reverse_scaling_right_shift, op_params.diff_min,
outer_size, depth, GetTensorData<int8_t>(input),
GetTensorData<int8_t>(output));
}
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, uint8 and int8 are supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <typename T>
T ApplyPrelu(T input, T alpha) {
return input >= 0.0 ? input : input * alpha;
}
template <KernelType kernel_type>
TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
const TfLiteTensor* alpha;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data);
switch (input->type) {
case kTfLiteFloat32: {
if (kernel_type == kGenericOptimized) {
tflite::ArithmeticParams op_params;
bool need_broadcast = optimized_ops::ProcessBroadcastShapes(
GetTensorShape(input), GetTensorShape(alpha), &op_params);
if (need_broadcast) {
optimized_ops::BroadcastPReluDispatch(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(alpha), GetTensorData<float>(alpha),
GetTensorShape(output), GetTensorData<float>(output),
ApplyPrelu<float>);
} else {
const int flat_size =
MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha),
GetTensorShape(output));
optimized_ops::PReluElementWise(
flat_size, op_params, GetTensorData<float>(alpha),
GetTensorData<float>(input), GetTensorData<float>(output));
}
} else {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(alpha), GetTensorData<float>(alpha),
GetTensorShape(output), GetTensorData<float>(output),
ApplyPrelu<float>);
} else {
reference_ops::BinaryFunction<float, float, float>(
GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(alpha), GetTensorData<float>(alpha),
GetTensorShape(output), GetTensorData<float>(output),
ApplyPrelu<float>);
}
}
return kTfLiteOk;
}
case kTfLiteUInt8: {
PreluParams op_params;
op_params.input_offset = -input->params.zero_point;
op_params.alpha_offset = -alpha->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier_1 = data->output_multiplier_1;
op_params.output_shift_1 = data->output_shift_1;
op_params.output_multiplier_2 = data->output_multiplier_2;
op_params.output_shift_2 = data->output_shift_2;
if (data->requires_broadcast) {
reference_ops::BroadcastPrelu4DSlow(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(alpha), GetTensorData<uint8_t>(alpha),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else {
reference_ops::Prelu(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(alpha), GetTensorData<uint8_t>(alpha),
GetTensorShape(output), GetTensorData<uint8_t>(output));
}
return kTfLiteOk;
}
case kTfLiteInt8: {
PreluParams op_params;
op_params.input_offset = -input->params.zero_point;
op_params.alpha_offset = -alpha->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier_1 = data->output_multiplier_1;
op_params.output_shift_1 = data->output_shift_1;
op_params.output_multiplier_2 = data->output_multiplier_2;
op_params.output_shift_2 = data->output_shift_2;
if (data->requires_broadcast) {
reference_ops::BroadcastPrelu4DSlow(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(alpha), GetTensorData<int8_t>(alpha),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
reference_ops::Prelu(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(alpha), GetTensorData<int8_t>(alpha),
GetTensorShape(output), GetTensorData<int8_t>(output));
}
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32 and uint8 and int8 are supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
template <KernelType kernel_type, typename T>
void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output,
const LeakyReluOpData* data) {
LeakyReluParams op_params;
op_params.input_offset = input->params.zero_point;
op_params.output_offset = output->params.zero_point;
op_params.output_multiplier_alpha = data->output_multiplier_alpha;
op_params.output_shift_alpha = data->output_shift_alpha;
op_params.output_multiplier_identity = data->output_multiplier_identity;
op_params.output_shift_identity = data->output_shift_identity;
if (kernel_type != KernelType::kReference && input->type == kTfLiteInt16) {
optimized_integer_ops::QuantizeLeakyRelu(
op_params, GetTensorShape(input), GetTensorData<int16>(input),
GetTensorShape(output), GetTensorData<int16>(output));
} else {
reference_ops::QuantizeLeakyRelu(
op_params, GetTensorShape(input), GetTensorData<T>(input),
GetTensorShape(output), GetTensorData<T>(output));
}
}
template <KernelType kernel_type>
TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
const auto* params =
reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data);
const LeakyReluOpData* data =
reinterpret_cast<LeakyReluOpData*>(node->user_data);
LeakyReluParams op_params;
switch (input->type) {
case kTfLiteFloat32: {
op_params.alpha = params->alpha;
optimized_ops::LeakyRelu(
op_params, GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteUInt8: {
QuantizeLeakyRelu<kernel_type, uint8_t>(input, output, data);
return kTfLiteOk;
}
case kTfLiteInt8: {
QuantizeLeakyRelu<kernel_type, int8_t>(input, output, data);
return kTfLiteOk;
}
case kTfLiteInt16: {
QuantizeLeakyRelu<kernel_type, int16_t>(input, output, data);
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context,
"Only float32, int8, int16 and uint8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(
input->params.scale, input->params.zero_point, output->params.scale,
output->params.zero_point,
[](float value) { return value < 0.0f ? std::expm1(value) : value; },
data->lut_int8);
}
return GenericPrepare(context, node);
}
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32: {
optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input),
GetTensorShape(output), GetTensorData<float>(output));
return kTfLiteOk;
}
case kTfLiteInt8: {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
return kTfLiteOk;
}
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 and int8 is supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
TfLiteStatus GeluPrepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
OpData* data = reinterpret_cast<OpData*>(node->user_data);
auto* params = reinterpret_cast<TfLiteGeluParams*>(node->builtin_data);
if (input->type == kTfLiteInt8) {
LUTPopulate<int8_t>(input->params.scale, input->params.zero_point,
output->params.scale, output->params.zero_point,
params->approximate
? reference_ops::GeluTransformApproximate
: reference_ops::GeluTransform,
data->lut_int8);
} else if (input->type == kTfLiteUInt8) {
LUTPopulate<uint8_t>(input->params.scale, input->params.zero_point,
output->params.scale, output->params.zero_point,
params->approximate
? reference_ops::GeluTransformApproximate
: reference_ops::GeluTransform,
data->lut_uint8);
}
return GenericPrepare(context, node);
}
TfLiteStatus GeluEval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
auto* params = reinterpret_cast<TfLiteGeluParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
switch (input->type) {
case kTfLiteFloat32:
reference_ops::Gelu(GetTensorShape(input), GetTensorData<float>(input),
params->approximate, GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
case kTfLiteUInt8:
optimized_integer_ops::LookupTable(
GetTensorData<uint8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_uint8, GetTensorData<uint8_t>(output));
return kTfLiteOk;
case kTfLiteInt8:
optimized_integer_ops::LookupTable(
GetTensorData<int8_t>(input),
MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)),
data->lut_int8, GetTensorData<int8_t>(output));
return kTfLiteOk;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32, int8 and uint8 supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_ELU() {
static TfLiteRegistration r = {activations::Init, activations::Free,
activations::EluPrepare, activations::EluEval};
return &r;
}
TfLiteRegistration* Register_RELU() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::ReluEval};
return &r;
}
TfLiteRegistration* Register_RELU_N1_TO_1() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::Relu1Eval};
return &r;
}
TfLiteRegistration* Register_RELU6() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::Relu6Eval};
return &r;
}
TfLiteRegistration* Register_RELU_0_TO_1() {
static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree,
activations::ReluPrepare,
activations::Relu0to1Eval};
return &r;
}
TfLiteRegistration* Register_TANH_REF() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::TanhPrepare<activations::kReference>,
activations::TanhEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_TANH_GENERIC_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::TanhPrepare<activations::kGenericOptimized>,
activations::TanhEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::TanhPrepare<activations::kFixedPointOptimized>,
activations::TanhEval<activations::kFixedPointOptimized>};
return &r;
}
TfLiteRegistration* Register_TANH() {
return Register_TANH_GENERIC_OPT();
}
TfLiteRegistration* Register_LOGISTIC_REF() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::SigmoidPrepare<activations::kReference>,
activations::SigmoidEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::SigmoidPrepare<activations::kGenericOptimized>,
activations::SigmoidEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() {
static TfLiteRegistration r = {
activations::Init, activations::Free,
activations::SigmoidPrepare<activations::kFixedPointOptimized>,
activations::SigmoidEval<activations::kFixedPointOptimized>};
return &r;
}
TfLiteRegistration* Register_LOGISTIC() {
return Register_LOGISTIC_GENERIC_OPT();
}
TfLiteRegistration* Register_SOFTMAX_REF() {
static TfLiteRegistration r = {
activations::SoftmaxInit,
activations::SoftmaxFree,
activations::SoftmaxPrepare<activations::kReference>,
activations::SoftmaxEval<activations::kReference>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared};
return &r;
}
TfLiteRegistration* Register_SOFTMAX() {
static TfLiteRegistration r = {
activations::SoftmaxInit,
activations::SoftmaxFree,
activations::SoftmaxPrepare<activations::kGenericOptimized>,
activations::SoftmaxEval<activations::kGenericOptimized>,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared};
return &r;
}
TfLiteRegistration* Register_LOG_SOFTMAX_REF() {
static TfLiteRegistration r = {
activations::LogSoftmaxInit, activations::LogSoftmaxFree,
activations::LogSoftmaxPrepare<activations::kReference>,
activations::LogSoftmaxEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_LOG_SOFTMAX() {
static TfLiteRegistration r = {
activations::LogSoftmaxInit, activations::LogSoftmaxFree,
activations::LogSoftmaxPrepare<activations::kGenericOptimized>,
activations::LogSoftmaxEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_PRELU_REF() {
static TfLiteRegistration r = {
activations::PreluInit, activations::PreluFree, activations::PreluPrepare,
activations::PreluEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_PRELU() {
static TfLiteRegistration r = {
activations::PreluInit, activations::PreluFree, activations::PreluPrepare,
activations::PreluEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_LEAKY_RELU_REF() {
static TfLiteRegistration r = {
activations::LeakyReluInit, activations::LeakyReluFree,
activations::LeakyReluPrepare,
activations::LeakyReluEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_LEAKY_RELU() {
static TfLiteRegistration r = {
activations::LeakyReluInit, activations::LeakyReluFree,
activations::LeakyReluPrepare,
activations::LeakyReluEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_HARD_SWISH() {
static TfLiteRegistration r = {
activations::HardSwishInit, activations::HardSwishFree,
activations::HardSwishPrepare,
activations::HardSwishEval<activations::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_HARD_SWISH_REF() {
static TfLiteRegistration r = {
activations::HardSwishInit, activations::HardSwishFree,
activations::HardSwishPrepare,
activations::HardSwishEval<activations::kReference>};
return &r;
}
TfLiteRegistration* Register_GELU() {
static TfLiteRegistration r = {activations::Init, activations::Free,
activations::GeluPrepare,
activations::GeluEval};
return &r;
}
}
}
} | #include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <limits>
#include <random>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
namespace {
void GenerateUniformRandomVector(int size, float min, float max,
std::minstd_rand* random_engine,
std::vector<float>* result) {
result->resize(size);
for (int i = 0; i < size; i++) {
float random_value_scaled_0_1 =
(*random_engine)() *
(1.0f / static_cast<float>(std::minstd_rand::modulus));
(*result)[i] = min + (max - min) * random_value_scaled_0_1;
}
}
}
class ActivationOpModel : public SingleOpModelWithHexagon {
public:
explicit ActivationOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(type, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
BuiltinOperator op_code_;
int input_;
int output_;
};
template <typename integer_type, TensorType tensor_dtype>
void ReluTestImpl() {
const float kMin = -6;
const float kMax = 6;
ActivationOpModel model(BuiltinOperator_RELU,
{tensor_dtype, {1, 3}, kMin, kMax},
{tensor_dtype, {1, 3}, kMin, kMax});
model.SetInput<integer_type>({1, 5, 7});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(
ArrayFloatNear({1.0, 5.0, 6.0}, 0.03)));
}
template <typename integer_type, TensorType tensor_dtype>
void Relu6TestImpl() {
const float kMin = -8;
const float kMax = 8;
ActivationOpModel model(BuiltinOperator_RELU6,
{tensor_dtype, {1, 3}, kMin, kMax},
{tensor_dtype, {1, 3}, kMin, kMax});
model.SetInput<integer_type>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(
ArrayFloatNear({4.0, 0.0, 6.0}, 0.03)));
}
template <typename integer_type, TensorType tensor_dtype>
void TanhTestImpl() {
const float kMin = -1;
const float kMax = 127.f / 128.f;
ActivationOpModel model(BuiltinOperator_TANH,
{tensor_dtype, {1, 3}, 8 * kMin, 8 * kMax},
{tensor_dtype, {1, 3}, kMin, kMax});
model.SetInput<integer_type>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear({1.00392, -0.752941, 1.00392},
0.03)));
}
template <typename integer_type, TensorType tensor_dtype>
void SigmoidTestImpl() {
const float kMin = -8;
const float kMax = 8;
TensorData output;
if (tensor_dtype == TensorType_UINT8) {
output = {tensor_dtype, {}, 0, 0, 1. / 256};
} else if (tensor_dtype == TensorType_INT8) {
output = {tensor_dtype, {}, 0, 0, 1. / 256, -128};
}
ActivationOpModel model(BuiltinOperator_LOGISTIC,
{tensor_dtype, {1, 3}, kMin, kMax},
output);
model.SetInput<integer_type>({4, -1.0, 8});
model.ApplyDelegateAndInvoke();
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(model.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear({0.977, 0.266, 0.996},
0.03)));
}
TEST(ActivationOpModel, ReluOutput_UInt8) {
ReluTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, ReluOutput_Int8) {
ReluTestImpl<int8_t, TensorType_INT8>();
}
TEST(ActivationOpModel, Relu6Output_UInt8) {
Relu6TestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, Relu6Output_Int8) {
Relu6TestImpl<int8_t, TensorType_INT8>();
}
TEST(ActivationOpModel, SigmoidOutput_UInt8) {
SigmoidTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, SigmoidOutput_Int8) {
SigmoidTestImpl<int8_t, TensorType_INT8>();
}
TEST(ActivationOpModel, TanhOutput_UInt8) {
TanhTestImpl<uint8_t, TensorType_UINT8>();
}
TEST(ActivationOpModel, TanhOutput_Int8) {
TanhTestImpl<int8_t, TensorType_INT8>();
}
void EvalTestReferenceHardSwish(int size, const std::vector<float>& input,
std::vector<float>* result) {
result->resize(size);
for (int i = 0; i < size; i++) {
const float in = input[i];
(*result)[i] = in * std::min(6.0f, std::max(0.0f, in + 3)) * (1.0f / 6.0f);
}
}
template <TensorType Tensor_Type, typename input_type>
void TestQuantizedHardSwish(int size, float input_min, float input_max,
float output_min, float output_max,
std::minstd_rand* random_engine) {
std::vector<float> float_input_values;
GenerateUniformRandomVector(size, input_min, input_max, random_engine,
&float_input_values);
std::vector<float> float_ref_output_values;
EvalTestReferenceHardSwish(size, float_input_values,
&float_ref_output_values);
for (float& val : float_ref_output_values) {
val = std::min(output_max, std::max(output_min, val));
}
ActivationOpModel m(
BuiltinOperator_HARD_SWISH,
{Tensor_Type, {1, 1, 1, size}, input_min, input_max},
{Tensor_Type, {1, 1, 1, size}, output_min, output_max});
m.SetInput<input_type>(float_input_values);
m.ApplyDelegateAndInvoke();
const std::vector<float> dequantized_output =
m.GetDequantizedOutput<input_type>();
const float quant_recommended_tolerance =
std::max(input_max - input_min, output_max - output_min) * (1.5f / 256.f);
const float kTolerance = std::max(0.03f, quant_recommended_tolerance);
EXPECT_THAT(dequantized_output, ElementsAreArray(ArrayFloatNear(
float_ref_output_values, kTolerance)));
}
template <TensorType Tensor_Type, typename input_type>
void HardSwishTestImpl() {
std::minstd_rand random_engine;
std::vector<std::pair<float, float>> minmax_pairs{{0.f, 1.f}, {-5.f, 10.f}};
for (const auto& input_minmax : minmax_pairs) {
for (const auto& output_minmax : minmax_pairs) {
float input_min = input_minmax.first;
float input_max = input_minmax.second;
float output_min = output_minmax.first;
float output_max = output_minmax.second;
for (int size : {1, 3, 40}) {
TestQuantizedHardSwish<Tensor_Type, input_type>(
size, input_min, input_max, output_min, output_max, &random_engine);
}
}
}
}
TEST(ActivationOpModel, HardSwishTestUInt8) {
HardSwishTestImpl<TensorType_UINT8, uint8_t>();
}
TEST(ActivationOpModel, HardSwishTestInt8) {
HardSwishTestImpl<TensorType_INT8, int8_t>();
}
template <TensorType Tensor_Type, typename input_type>
void HardSwishBiasTestImpl() {
float input_min = -11.654928f;
float input_max = 25.036512f;
float output_min = -0.3905796f;
float output_max = 24.50887f;
float tolerated_bias = 0.035;
const float quantized_type_range =
static_cast<float>(std::numeric_limits<int8_t>::max()) -
static_cast<float>(std::numeric_limits<int8_t>::min());
const float input_scale = (input_max - input_min) / quantized_type_range;
const float output_scale = (output_max - output_min) / quantized_type_range;
const float max_scale = std::max(output_scale, input_scale);
ASSERT_LE(input_min, -3.0f);
ASSERT_GE(input_max, 3.0f);
const int quantized_input_negative_three =
std::round(std::numeric_limits<input_type>::min() +
(-3.0f - input_min) / input_scale);
const int quantized_input_positive_three =
std::round(std::numeric_limits<input_type>::min() +
(3.0f - input_min) / input_scale);
std::vector<float> float_input_values;
for (int i = quantized_input_negative_three;
i <= quantized_input_positive_three; i++) {
float_input_values.push_back(
input_min + (i - std::numeric_limits<int8_t>::min()) * input_scale);
}
const int size = float_input_values.size();
std::vector<float> float_ref_output_values;
EvalTestReferenceHardSwish(size, float_input_values,
&float_ref_output_values);
for (float& val : float_ref_output_values) {
val = std::min(output_max, std::max(output_min, val));
}
ActivationOpModel m(
BuiltinOperator_HARD_SWISH,
{Tensor_Type, {1, 1, 1, size}, input_min, input_max},
{Tensor_Type, {1, 1, 1, size}, output_min, output_max});
m.SetInput<input_type>(float_input_values);
m.ApplyDelegateAndInvoke();
const std::vector<float> dequantized_output =
m.GetDequantizedOutput<input_type>();
float sum_diff = 0;
for (int i = 0; i < size; i++) {
sum_diff += dequantized_output[i] - float_ref_output_values[i];
}
const float bias = sum_diff / (size * max_scale);
EXPECT_LE(std::abs(bias), tolerated_bias);
}
TEST(ActivationOpModel, HardSwishBiasTest) {
HardSwishBiasTestImpl<TensorType_UINT8, uint8_t>();
}
TEST(ActivationOpModel, HardSwishBiasTestInt8) {
HardSwishBiasTestImpl<TensorType_INT8, int8_t>();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/activations.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/activations_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4de958a3-e1a5-4cae-a5d6-151e97e19ad6 | cpp | google/tensorstore | meta | tensorstore/internal/meta.h | tensorstore/internal/meta_test.cc | #ifndef TENSORSTORE_INTERNAL_META_H_
#define TENSORSTORE_INTERNAL_META_H_
namespace tensorstore {
namespace internal {
template <typename T, typename... Ts>
constexpr T&& GetFirstArgument(T&& t, Ts&&... ts) {
return static_cast<T&&>(t);
}
inline int constexpr_assert_failed() noexcept { return 0; }
#define TENSORSTORE_CONSTEXPR_ASSERT(...) \
(static_cast<void>( \
(__VA_ARGS__) ? 0 \
: tensorstore::internal::constexpr_assert_failed()))
}
}
#endif | #include "tensorstore/internal/meta.h"
#include <type_traits>
namespace {
using ::tensorstore::internal::GetFirstArgument;
static_assert(
std::is_same_v<int&, decltype(GetFirstArgument(std::declval<int&>(),
std::declval<float&>()))>);
static_assert(std::is_same_v<
const int&, decltype(GetFirstArgument(std::declval<const int&>(),
std::declval<float&>()))>);
static_assert(
std::is_same_v<int&&, decltype(GetFirstArgument(std::declval<int>(),
std::declval<float&>()))>);
static_assert(GetFirstArgument(3, 4) == 3);
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/meta.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/meta_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8ca2d24a-803e-4bbb-8f49-b99dbb2bd95d | cpp | tensorflow/tensorflow | compilation_environments | third_party/xla/xla/service/compilation_environments.cc | third_party/xla/xla/service/compilation_environments_test.cc | #include "xla/service/compilation_environments.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
ABSL_CONST_INIT absl::Mutex process_new_env_fns_mu(absl::kConstInit);
absl::flat_hash_map<const tsl::protobuf::Descriptor*,
CompilationEnvironments::ProcessNewEnvFn>*
process_new_env_fns ABSL_GUARDED_BY(process_new_env_fns_mu) = nullptr;
class GlobalCompEnvStats {
public:
static GlobalCompEnvStats& GetSingleton() {
static GlobalCompEnvStats* singleton = new GlobalCompEnvStats();
return *singleton;
}
void DefaultEnvCreatedByCompilationEnvironments(std::string_view env_type)
ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
++stats_[std::string(env_type)]
.default_env_created_by_compilation_environments;
}
VLOG(1) << "New GlobalCompEnvStats value: " << ToString();
}
void EnvAdded(std::string_view env_type) ABSL_LOCKS_EXCLUDED(mu_) {
{
absl::MutexLock l(&mu_);
++stats_[std::string(env_type)].env_added;
}
VLOG(1) << "New GlobalCompEnvStats value: " << ToString();
}
std::string ToString() const ABSL_LOCKS_EXCLUDED(mu_) {
absl::ReaderMutexLock l(&mu_);
return absl::StrJoin(
stats_, "; ",
[](std::string* out, const StatMap::value_type& env_stats_pair) {
absl::StrAppend(out, env_stats_pair.first, ": { ",
env_stats_pair.second.ToString(), " }");
});
}
private:
struct PerEnvStats {
std::string ToString() const {
return absl::StrCat(
"# default envs created by CompilationEnvironments: ",
default_env_created_by_compilation_environments, " ",
"# envs added to CompilationEnvironments: ", env_added);
}
unsigned default_env_created_by_compilation_environments = 0;
unsigned env_added = 0;
};
using StatMap = absl::flat_hash_map<std::string, PerEnvStats>;
GlobalCompEnvStats() = default;
GlobalCompEnvStats(const GlobalCompEnvStats&) = delete;
GlobalCompEnvStats& operator=(const GlobalCompEnvStats&) = delete;
GlobalCompEnvStats(GlobalCompEnvStats&&) = delete;
GlobalCompEnvStats& operator=(GlobalCompEnvStats&&) = delete;
mutable absl::Mutex mu_;
StatMap stats_ ABSL_GUARDED_BY(mu_);
};
}
CompilationEnvironments& CompilationEnvironments::operator=(
const CompilationEnvironments& rhs) {
Clear();
for (const auto& descriptor_message_pair : rhs.environments_) {
auto env = absl::WrapUnique(descriptor_message_pair.second->New());
env->CopyFrom(*descriptor_message_pair.second);
environments_.insert({descriptor_message_pair.first, std::move(env)});
}
return *this;
}
absl::StatusOr<std::unique_ptr<CompilationEnvironments>>
CompilationEnvironments::CreateFromProto(
const CompilationEnvironmentsProto& proto) {
auto envs = std::make_unique<CompilationEnvironments>();
const tsl::protobuf::DescriptorPool* const pool =
tsl::protobuf::DescriptorPool::generated_pool();
for (const auto& env_proto : proto.environments()) {
std::string fullname;
if (!google::protobuf::Any::ParseAnyTypeUrl(env_proto.type_url(),
&fullname)) {
return tsl::errors::DataLoss(
"Invalid CompilationEnvironment message type url: %s",
env_proto.type_url());
}
const tsl::protobuf::Descriptor* const descriptor =
pool->FindMessageTypeByName(fullname);
if (descriptor == nullptr) {
return tsl::errors::DataLoss(
"Unknown CompilationEnvironment message type: %s", fullname);
}
const tsl::protobuf::Message* const prototype =
tsl::protobuf::MessageFactory::generated_factory()->GetPrototype(
descriptor);
if (prototype == nullptr) {
return tsl::errors::Internal(
"Unsupported CompilationEnvironment message type: %s", fullname);
}
std::unique_ptr<tsl::protobuf::Message> env(prototype->New());
if (!env_proto.UnpackTo(env.get())) {
return tsl::errors::DataLoss(
"Unable to unpack CompilationEnvironment message of type '%s'",
fullname);
}
TF_RETURN_IF_ERROR(envs->AddEnv(std::move(env)));
}
return envs;
}
void CompilationEnvironments::RegisterProcessNewEnvFn(
const tsl::protobuf::Descriptor* descriptor,
ProcessNewEnvFn process_new_env) {
absl::MutexLock l(&process_new_env_fns_mu);
if (process_new_env_fns == nullptr) {
process_new_env_fns =
new absl::flat_hash_map<const tsl::protobuf::Descriptor*,
CompilationEnvironments::ProcessNewEnvFn>();
}
const bool inserted =
process_new_env_fns->insert({descriptor, std::move(process_new_env)})
.second;
CHECK(inserted) << "ProcessNewEnvFn for XLA compilation environment '"
<< descriptor->full_name() << "' has already been registered";
}
absl::Status CompilationEnvironments::AddEnv(
std::unique_ptr<tsl::protobuf::Message> env) {
if (!env) {
return tsl::errors::InvalidArgument(
"Can not add a null compilation environment.");
}
const tsl::protobuf::Descriptor& descriptor = *env->GetDescriptor();
return AddEnvImpl(descriptor, std::move(env));
}
CompilationEnvironmentsProto CompilationEnvironments::ToProto() const {
std::vector<const tsl::protobuf::Descriptor*> descriptors;
descriptors.reserve(environments_.size());
for (const auto& [descriptor, message] : environments_) {
descriptors.push_back(descriptor);
}
absl::c_sort(descriptors, [](const tsl::protobuf::Descriptor* lhs,
const tsl::protobuf::Descriptor* rhs) {
return lhs->full_name() < rhs->full_name();
});
CompilationEnvironmentsProto proto;
for (const auto* const descriptor : descriptors) {
proto.add_environments()->PackFrom(*environments_.at(descriptor));
}
return proto;
}
CompilationEnvironments::ProcessNewEnvFn
CompilationEnvironments::GetProcessNewEnvFn(
const tsl::protobuf::Descriptor& descriptor) {
absl::MutexLock l(&process_new_env_fns_mu);
if (process_new_env_fns == nullptr) {
return nullptr;
}
const auto it = process_new_env_fns->find(&descriptor);
if (it == process_new_env_fns->end()) {
return nullptr;
}
return it->second;
}
void CompilationEnvironments::DefaultEnvCreatedByCompilationEnvironments(
std::string_view env_type) {
GlobalCompEnvStats::GetSingleton().DefaultEnvCreatedByCompilationEnvironments(
env_type);
}
void CompilationEnvironments::EnvAdded(std::string_view env_type) {
GlobalCompEnvStats::GetSingleton().EnvAdded(env_type);
}
absl::Status CompilationEnvironments::AddEnvImpl(
const tsl::protobuf::Descriptor& descriptor,
std::unique_ptr<tsl::protobuf::Message> env) {
if (environments_.contains(&descriptor)) {
return tsl::errors::InvalidArgument(
"Replacing CompilationEnvironment of type %s.", descriptor.full_name());
}
ProcessNewEnvFn process_new_env = GetProcessNewEnvFn(descriptor);
if (!process_new_env) {
return tsl::errors::InvalidArgument(
"Unknown compilation environment type: %s", descriptor.full_name());
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<tsl::protobuf::Message> processed_env,
process_new_env(std::move(env)));
const tsl::protobuf::UnknownFieldSet& unknown_fields =
processed_env->GetReflection()->GetUnknownFields(*processed_env);
std::vector<int> unknown_tags;
unknown_tags.reserve(unknown_fields.field_count());
for (int i = 0; i < unknown_fields.field_count(); ++i) {
const tsl::protobuf::UnknownField& field = unknown_fields.field(i);
unknown_tags.push_back(field.number());
}
if (!unknown_tags.empty()) {
LOG(WARNING) << "CompilationEnvironment " << descriptor.full_name()
<< " contains unknown fields with tag numbers: "
<< absl::StrJoin(unknown_tags, ", ");
}
environments_.insert({&descriptor, std::move(processed_env)});
EnvAdded(descriptor.full_name());
return absl::OkStatus();
}
} | #include "xla/service/compilation_environments.h"
#include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "xla/service/test_compilation_environment.pb.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/protobuf.h"
namespace xla {
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv1(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment1> env(
tensorflow::down_cast<test::TestCompilationEnvironment1*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment1>();
}
if (env->some_flag() == 0 || env->some_flag() == 1) {
env->set_some_flag(100);
}
return env;
}
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv2(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment2> env(
tensorflow::down_cast<test::TestCompilationEnvironment2*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment2>();
}
if (env->some_other_flag() == 0) {
env->set_some_other_flag(200);
}
return env;
}
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv3(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment3> env(
tensorflow::down_cast<test::TestCompilationEnvironment3*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment3>();
}
if (env->a_third_flag() == 0) {
env->set_a_third_flag(300);
}
return env;
}
namespace test {
namespace {
class CompilationEnvironmentsTest : public ::testing::Test {
protected:
static void SetUpTestSuite() {
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment1::descriptor(), ProcessNewEnv1);
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment2::descriptor(), ProcessNewEnv2);
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment3::descriptor(), ProcessNewEnv3);
}
};
TEST_F(CompilationEnvironmentsTest, GetDefaultEnv) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, GetDefaultMutableEnv) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, GetAddedEnvNotModifiedByProcessNewEnv) {
CompilationEnvironments envs;
auto env = std::make_unique<TestCompilationEnvironment1>();
env->set_some_flag(5);
TF_ASSERT_OK(envs.AddEnv(std::move(env)));
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 5);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 5);
}
TEST_F(CompilationEnvironmentsTest, GetAddedEnvModifiedByProcessNewEnv) {
CompilationEnvironments envs;
auto env = std::make_unique<TestCompilationEnvironment1>();
env->set_some_flag(1);
TF_ASSERT_OK(envs.AddEnv(std::move(env)));
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, MultipleEnvs) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment2>().some_other_flag(), 200);
EXPECT_EQ(envs.GetEnv<TestCompilationEnvironment1>().some_flag(), 100);
}
TEST_F(CompilationEnvironmentsTest, MultipleMutableEnvs) {
CompilationEnvironments envs;
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 100);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment2>().some_other_flag(),
200);
envs.GetMutableEnv<TestCompilationEnvironment1>().set_some_flag(101);
envs.GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(201);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment1>().some_flag(), 101);
EXPECT_EQ(envs.GetMutableEnv<TestCompilationEnvironment2>().some_other_flag(),
201);
}
TEST_F(CompilationEnvironmentsTest, CopyConstructor) {
auto envs = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs->AddEnv(std::move(env2)));
envs->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto envs_copy = std::make_unique<CompilationEnvironments>(*envs);
envs.reset();
EXPECT_EQ(envs_copy->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs_copy->GetEnv<TestCompilationEnvironment2>().some_other_flag(),
20);
}
TEST_F(CompilationEnvironmentsTest, CopyAssignment) {
auto envs1 = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs1->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs1->AddEnv(std::move(env2)));
envs1->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto envs2 = std::make_unique<CompilationEnvironments>();
auto env3 = std::make_unique<TestCompilationEnvironment1>();
env3->set_some_flag(30);
TF_ASSERT_OK(envs2->AddEnv(std::move(env3)));
auto env4 = std::make_unique<TestCompilationEnvironment3>();
env4->set_a_third_flag(40);
TF_ASSERT_OK(envs2->AddEnv(std::move(env4)));
*envs2 = *envs1;
envs1.reset();
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment2>().some_other_flag(), 20);
EXPECT_EQ(envs2->GetEnv<TestCompilationEnvironment3>().a_third_flag(), 300);
}
TEST_F(CompilationEnvironmentsTest, ProtoRoundTrip) {
auto envs = std::make_unique<CompilationEnvironments>();
auto env1 = std::make_unique<TestCompilationEnvironment1>();
env1->set_some_flag(10);
TF_ASSERT_OK(envs->AddEnv(std::move(env1)));
auto env2 = std::make_unique<TestCompilationEnvironment2>();
TF_ASSERT_OK(envs->AddEnv(std::move(env2)));
envs->GetMutableEnv<TestCompilationEnvironment2>().set_some_other_flag(20);
auto proto = envs->ToProto();
TF_ASSERT_OK_AND_ASSIGN(auto envs_deserialized,
CompilationEnvironments::CreateFromProto(proto));
EXPECT_EQ(
envs_deserialized->GetEnv<TestCompilationEnvironment1>().some_flag(), 10);
EXPECT_EQ(envs_deserialized->GetEnv<TestCompilationEnvironment2>()
.some_other_flag(),
20);
}
TEST_F(CompilationEnvironmentsTest, EnvTypePresenceCheck) {
CompilationEnvironments envs;
EXPECT_FALSE(envs.HasEnv<TestCompilationEnvironment1>());
envs.GetEnv<TestCompilationEnvironment1>();
EXPECT_TRUE(envs.HasEnv<TestCompilationEnvironment1>());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/compilation_environments.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/compilation_environments_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a64fe2e8-586b-45ce-a76d-0ae8985f6baf | cpp | abseil/abseil-cpp | test_instance_tracker | absl/container/internal/test_instance_tracker.cc | absl/container/internal/test_instance_tracker_test.cc | #include "absl/container/internal/test_instance_tracker.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace test_internal {
int BaseCountedInstance::num_instances_ = 0;
int BaseCountedInstance::num_live_instances_ = 0;
int BaseCountedInstance::num_moves_ = 0;
int BaseCountedInstance::num_copies_ = 0;
int BaseCountedInstance::num_swaps_ = 0;
int BaseCountedInstance::num_comparisons_ = 0;
}
ABSL_NAMESPACE_END
} | #include "absl/container/internal/test_instance_tracker.h"
#include "gtest/gtest.h"
namespace {
using absl::test_internal::CopyableMovableInstance;
using absl::test_internal::CopyableOnlyInstance;
using absl::test_internal::InstanceTracker;
using absl::test_internal::MovableOnlyInstance;
TEST(TestInstanceTracker, CopyableMovable) {
InstanceTracker tracker;
CopyableMovableInstance src(1);
EXPECT_EQ(1, src.value()) << src;
CopyableMovableInstance copy(src);
CopyableMovableInstance move(std::move(src));
EXPECT_EQ(1, tracker.copies());
EXPECT_EQ(1, tracker.moves());
EXPECT_EQ(0, tracker.swaps());
EXPECT_EQ(3, tracker.instances());
EXPECT_EQ(2, tracker.live_instances());
tracker.ResetCopiesMovesSwaps();
CopyableMovableInstance copy_assign(1);
copy_assign = copy;
CopyableMovableInstance move_assign(1);
move_assign = std::move(move);
EXPECT_EQ(1, tracker.copies());
EXPECT_EQ(1, tracker.moves());
EXPECT_EQ(0, tracker.swaps());
EXPECT_EQ(5, tracker.instances());
EXPECT_EQ(3, tracker.live_instances());
tracker.ResetCopiesMovesSwaps();
{
using std::swap;
swap(move_assign, copy);
swap(copy, move_assign);
EXPECT_EQ(2, tracker.swaps());
EXPECT_EQ(0, tracker.copies());
EXPECT_EQ(0, tracker.moves());
EXPECT_EQ(5, tracker.instances());
EXPECT_EQ(3, tracker.live_instances());
}
}
TEST(TestInstanceTracker, CopyableOnly) {
InstanceTracker tracker;
CopyableOnlyInstance src(1);
EXPECT_EQ(1, src.value()) << src;
CopyableOnlyInstance copy(src);
CopyableOnlyInstance copy2(std::move(src));
EXPECT_EQ(2, tracker.copies());
EXPECT_EQ(0, tracker.moves());
EXPECT_EQ(3, tracker.instances());
EXPECT_EQ(3, tracker.live_instances());
tracker.ResetCopiesMovesSwaps();
CopyableOnlyInstance copy_assign(1);
copy_assign = copy;
CopyableOnlyInstance copy_assign2(1);
copy_assign2 = std::move(copy2);
EXPECT_EQ(2, tracker.copies());
EXPECT_EQ(0, tracker.moves());
EXPECT_EQ(5, tracker.instances());
EXPECT_EQ(5, tracker.live_instances());
tracker.ResetCopiesMovesSwaps();
{
using std::swap;
swap(src, copy);
swap(copy, src);
EXPECT_EQ(2, tracker.swaps());
EXPECT_EQ(0, tracker.copies());
EXPECT_EQ(0, tracker.moves());
EXPECT_EQ(5, tracker.instances());
EXPECT_EQ(5, tracker.live_instances());
}
}
TEST(TestInstanceTracker, MovableOnly) {
InstanceTracker tracker;
MovableOnlyInstance src(1);
EXPECT_EQ(1, src.value()) << src;
MovableOnlyInstance move(std::move(src));
MovableOnlyInstance move_assign(2);
move_assign = std::move(move);
EXPECT_EQ(3, tracker.instances());
EXPECT_EQ(1, tracker.live_instances());
EXPECT_EQ(2, tracker.moves());
EXPECT_EQ(0, tracker.copies());
tracker.ResetCopiesMovesSwaps();
{
using std::swap;
MovableOnlyInstance other(2);
swap(move_assign, other);
swap(other, move_assign);
EXPECT_EQ(2, tracker.swaps());
EXPECT_EQ(0, tracker.copies());
EXPECT_EQ(0, tracker.moves());
EXPECT_EQ(4, tracker.instances());
EXPECT_EQ(2, tracker.live_instances());
}
}
TEST(TestInstanceTracker, ExistingInstances) {
CopyableMovableInstance uncounted_instance(1);
CopyableMovableInstance uncounted_live_instance(
std::move(uncounted_instance));
InstanceTracker tracker;
EXPECT_EQ(0, tracker.instances());
EXPECT_EQ(0, tracker.live_instances());
EXPECT_EQ(0, tracker.copies());
{
CopyableMovableInstance instance1(1);
EXPECT_EQ(1, tracker.instances());
EXPECT_EQ(1, tracker.live_instances());
EXPECT_EQ(0, tracker.copies());
EXPECT_EQ(0, tracker.moves());
{
InstanceTracker tracker2;
CopyableMovableInstance instance2(instance1);
CopyableMovableInstance instance3(std::move(instance2));
EXPECT_EQ(3, tracker.instances());
EXPECT_EQ(2, tracker.live_instances());
EXPECT_EQ(1, tracker.copies());
EXPECT_EQ(1, tracker.moves());
EXPECT_EQ(2, tracker2.instances());
EXPECT_EQ(1, tracker2.live_instances());
EXPECT_EQ(1, tracker2.copies());
EXPECT_EQ(1, tracker2.moves());
}
EXPECT_EQ(1, tracker.instances());
EXPECT_EQ(1, tracker.live_instances());
EXPECT_EQ(1, tracker.copies());
EXPECT_EQ(1, tracker.moves());
}
EXPECT_EQ(0, tracker.instances());
EXPECT_EQ(0, tracker.live_instances());
EXPECT_EQ(1, tracker.copies());
EXPECT_EQ(1, tracker.moves());
}
TEST(TestInstanceTracker, Comparisons) {
InstanceTracker tracker;
MovableOnlyInstance one(1), two(2);
EXPECT_EQ(0, tracker.comparisons());
EXPECT_FALSE(one == two);
EXPECT_EQ(1, tracker.comparisons());
EXPECT_TRUE(one != two);
EXPECT_EQ(2, tracker.comparisons());
EXPECT_TRUE(one < two);
EXPECT_EQ(3, tracker.comparisons());
EXPECT_FALSE(one > two);
EXPECT_EQ(4, tracker.comparisons());
EXPECT_TRUE(one <= two);
EXPECT_EQ(5, tracker.comparisons());
EXPECT_FALSE(one >= two);
EXPECT_EQ(6, tracker.comparisons());
EXPECT_TRUE(one.compare(two) < 0);
EXPECT_EQ(7, tracker.comparisons());
tracker.ResetCopiesMovesSwaps();
EXPECT_EQ(0, tracker.comparisons());
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/test_instance_tracker.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/test_instance_tracker_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
20c2a41d-cae1-4ee1-b9da-0a950d5bb1dc | cpp | google/libphonenumber | geocoding_data | cpp/src/phonenumbers/geocoding/geocoding_data.h | cpp/test/phonenumbers/geocoding/geocoding_data_test.cc | #ifndef I18N_PHONENUMBERS_GEOCODING_DATA
#define I18N_PHONENUMBERS_GEOCODING_DATA
#include <cstdint>
namespace i18n {
namespace phonenumbers {
struct CountryLanguages {
const char** available_languages;
const int available_languages_size;
};
struct PrefixDescriptions {
const int32_t* prefixes;
const int prefixes_size;
const char** descriptions;
const int32_t* possible_lengths;
const int possible_lengths_size;
};
const int* get_country_calling_codes();
int get_country_calling_codes_size();
const CountryLanguages* get_country_languages(int index);
const char** get_prefix_language_code_pairs();
int get_prefix_language_code_pairs_size();
const PrefixDescriptions* get_prefix_descriptions(int index);
}
}
#endif | #include <cmath>
#include <set>
#include <string>
#include <gtest/gtest.h>
#include "phonenumbers/base/basictypes.h"
#include "phonenumbers/geocoding/geocoding_data.h"
#include "phonenumbers/geocoding/geocoding_test_data.h"
#include "absl/container/btree_set.h"
namespace i18n {
namespace phonenumbers {
using std::set;
using std::string;
namespace {
typedef const CountryLanguages* (*country_languages_getter)(int index);
typedef const PrefixDescriptions* (*prefix_descriptions_getter)(int index);
void TestCountryLanguages(const CountryLanguages* languages) {
EXPECT_GT(languages->available_languages_size, 0);
for (int i = 0; i < languages->available_languages_size; ++i) {
string language(languages->available_languages[i]);
EXPECT_GT(language.size(), 0);
if (i > 0) {
EXPECT_LT(string(languages->available_languages[i - 1]),
language);
}
}
}
void TestCountryCallingCodeLanguages(
const int* country_calling_codes, int country_calling_codes_size,
country_languages_getter get_country_languages) {
EXPECT_GT(country_calling_codes_size, 0);
for (int i = 0; i < country_calling_codes_size; ++i) {
int code = country_calling_codes[i];
EXPECT_GT(code, 0);
if (i > 0) {
EXPECT_LT(country_calling_codes[i-1], code);
}
TestCountryLanguages(get_country_languages(i));
}
}
void TestPrefixDescriptions(const PrefixDescriptions* descriptions) {
EXPECT_GT(descriptions->prefixes_size, 0);
absl::btree_set<int> possible_lengths;
for (int i = 0; i < descriptions->prefixes_size; ++i) {
int prefix = descriptions->prefixes[i];
EXPECT_GT(prefix, 0);
if (i > 0) {
EXPECT_LT(descriptions->prefixes[i - 1], prefix);
}
possible_lengths.insert(log10(prefix) + 1);
}
EXPECT_GT(descriptions->possible_lengths_size, 0);
for (int i = 0; i < descriptions->possible_lengths_size; ++i) {
int possible_length = descriptions->possible_lengths[i];
EXPECT_GT(possible_length, 0);
if (i > 0) {
EXPECT_LT(descriptions->possible_lengths[i - 1], possible_length);
}
EXPECT_TRUE(
possible_lengths.find(possible_length) != possible_lengths.end());
}
}
void TestAllPrefixDescriptions(
const char** prefix_language_code_pairs,
int prefix_language_code_pairs_size,
prefix_descriptions_getter get_prefix_descriptions) {
EXPECT_GT(prefix_language_code_pairs_size, 0);
for (int i = 0; i < prefix_language_code_pairs_size; ++i) {
string language_code_pair(prefix_language_code_pairs[i]);
EXPECT_GT(language_code_pair.size(), 0);
if (i > 0) {
EXPECT_LT(string(prefix_language_code_pairs[i - 1]),
language_code_pair);
}
TestPrefixDescriptions(get_prefix_descriptions(i));
}
}
}
TEST(GeocodingDataTest, TestCountryCallingCodeLanguages) {
TestCountryCallingCodeLanguages(get_country_calling_codes(),
get_country_calling_codes_size(),
get_country_languages);
}
TEST(GeocodingDataTest, TestTestCountryCallingCodeLanguages) {
TestCountryCallingCodeLanguages(get_test_country_calling_codes(),
get_test_country_calling_codes_size(),
get_test_country_languages);
}
TEST(GeocodingDataTest, TestPrefixDescriptions) {
TestAllPrefixDescriptions(get_prefix_language_code_pairs(),
get_prefix_language_code_pairs_size(),
get_prefix_descriptions);
}
TEST(GeocodingDataTest, TestTestPrefixDescriptions) {
TestAllPrefixDescriptions(get_test_prefix_language_code_pairs(),
get_test_prefix_language_code_pairs_size(),
get_test_prefix_descriptions);
}
TEST(GeocodingDataTest, TestTestGeocodingData) {
ASSERT_EQ(3, get_test_country_calling_codes_size());
const int* country_calling_codes = get_test_country_calling_codes();
const int expected_calling_codes[] = {1, 54, 82};
for (int i = 0; i < get_test_country_calling_codes_size(); ++i) {
EXPECT_EQ(expected_calling_codes[i], country_calling_codes[i]);
}
const CountryLanguages* langs_1 = get_test_country_languages(0);
ASSERT_EQ(2, langs_1->available_languages_size);
const char* expected_languages[] = {"de", "en"};
for (int i = 0; i < langs_1->available_languages_size; ++i) {
EXPECT_STREQ(expected_languages[i], langs_1->available_languages[i]);
}
ASSERT_EQ(5, get_test_prefix_language_code_pairs_size());
const char** language_code_pairs = get_test_prefix_language_code_pairs();
const char* expected_language_code_pairs[] = {
"1_de", "1_en", "54_en", "82_en", "82_ko",
};
for (int i = 0; i < get_test_prefix_language_code_pairs_size(); ++i) {
EXPECT_STREQ(expected_language_code_pairs[i], language_code_pairs[i]);
}
const PrefixDescriptions* desc_1_de = get_test_prefix_descriptions(0);
ASSERT_EQ(2, desc_1_de->prefixes_size);
const int32 expected_prefixes[] = {1201, 1650};
const char* expected_descriptions[] = {
"New Jersey",
"Kalifornien",
};
for (int i = 0; i < desc_1_de->prefixes_size; ++i) {
EXPECT_EQ(expected_prefixes[i], desc_1_de->prefixes[i]);
EXPECT_STREQ(expected_descriptions[i], desc_1_de->descriptions[i]);
}
ASSERT_EQ(1, desc_1_de->possible_lengths_size);
const int expected_lengths[] = {4};
for (int i = 0; i < desc_1_de->possible_lengths_size; ++i) {
EXPECT_EQ(expected_lengths[i], desc_1_de->possible_lengths[i]);
}
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/geocoding/geocoding_data.h | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/geocoding/geocoding_data_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
58fa6423-d526-43fc-b63e-1b2185a19835 | cpp | tensorflow/tensorflow | mfcc_mel_filterbank | tensorflow/lite/kernels/internal/mfcc_mel_filterbank.cc | tensorflow/core/kernels/mfcc_mel_filterbank_test.cc | #include "tensorflow/lite/kernels/internal/mfcc_mel_filterbank.h"
#include <math.h>
namespace tflite {
namespace internal {
MfccMelFilterbank::MfccMelFilterbank() : initialized_(false) {}
bool MfccMelFilterbank::Initialize(int input_length, double input_sample_rate,
int output_channel_count,
double lower_frequency_limit,
double upper_frequency_limit) {
num_channels_ = output_channel_count;
sample_rate_ = input_sample_rate;
input_length_ = input_length;
if (num_channels_ < 1) {
return false;
}
if (sample_rate_ <= 0) {
return false;
}
if (input_length < 2) {
return false;
}
if (lower_frequency_limit < 0) {
return false;
}
if (upper_frequency_limit <= lower_frequency_limit) {
return false;
}
center_frequencies_.resize(num_channels_ + 1);
const double mel_low = FreqToMel(lower_frequency_limit);
const double mel_hi = FreqToMel(upper_frequency_limit);
const double mel_span = mel_hi - mel_low;
const double mel_spacing = mel_span / static_cast<double>(num_channels_ + 1);
for (int i = 0; i < num_channels_ + 1; ++i) {
center_frequencies_[i] = mel_low + (mel_spacing * (i + 1));
}
const double hz_per_sbin =
0.5 * sample_rate_ / static_cast<double>(input_length_ - 1);
start_index_ = static_cast<int>(1.5 + (lower_frequency_limit / hz_per_sbin));
end_index_ = static_cast<int>(upper_frequency_limit / hz_per_sbin);
band_mapper_.resize(input_length_);
int channel = 0;
for (int i = 0; i < input_length_; ++i) {
double melf = FreqToMel(i * hz_per_sbin);
if ((i < start_index_) || (i > end_index_)) {
band_mapper_[i] = -2;
} else {
while ((channel < num_channels_) &&
(center_frequencies_[channel] < melf)) {
++channel;
}
band_mapper_[i] = channel - 1;
}
}
weights_.resize(input_length_);
for (int i = 0; i < input_length_; ++i) {
channel = band_mapper_[i];
if ((i < start_index_) || (i > end_index_)) {
weights_[i] = 0.0;
} else {
if (channel >= 0) {
weights_[i] =
(center_frequencies_[channel + 1] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[channel + 1] - center_frequencies_[channel]);
} else {
weights_[i] = (center_frequencies_[0] - FreqToMel(i * hz_per_sbin)) /
(center_frequencies_[0] - mel_low);
}
}
}
std::vector<int> bad_channels;
for (int c = 0; c < num_channels_; ++c) {
float band_weights_sum = 0.0;
for (int i = 0; i < input_length_; ++i) {
if (band_mapper_[i] == c - 1) {
band_weights_sum += (1.0 - weights_[i]);
} else if (band_mapper_[i] == c) {
band_weights_sum += weights_[i];
}
}
if (band_weights_sum < 0.5) {
bad_channels.push_back(c);
}
}
if (!bad_channels.empty()) {
}
initialized_ = true;
return true;
}
void MfccMelFilterbank::Compute(const std::vector<double> &input,
std::vector<double> *output) const {
if (!initialized_) {
return;
}
if (input.size() <= end_index_) {
return;
}
output->assign(num_channels_, 0.0);
for (int i = start_index_; i <= end_index_; i++) {
double spec_val = sqrt(input[i]);
double weighted = spec_val * weights_[i];
int channel = band_mapper_[i];
if (channel >= 0)
(*output)[channel] += weighted;
channel++;
if (channel < num_channels_)
(*output)[channel] += spec_val - weighted;
}
}
double MfccMelFilterbank::FreqToMel(double freq) const {
return 1127.0 * log1p(freq / 700.0);
}
}
} | #include "tensorflow/core/kernels/mfcc_mel_filterbank.h"
#include <limits>
#include <vector>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(MfccMelFilterbankTest, AgreesWithPythonGoldenValues) {
MfccMelFilterbank filterbank;
std::vector<double> input;
const int kSampleCount = 513;
input.reserve(kSampleCount);
for (int i = 0; i < kSampleCount; ++i) {
input.push_back(i + 1);
}
const int kChannelCount = 20;
filterbank.Initialize(
input.size(), 22050 , kChannelCount ,
20.0 , 4000.0 );
std::vector<double> output;
filterbank.Compute(input, &output);
std::vector<double> expected = {
7.38894574, 10.30330648, 13.72703292, 17.24158686, 21.35253118,
25.77781089, 31.30624108, 37.05877236, 43.9436536, 51.80306637,
60.79867148, 71.14363376, 82.90910141, 96.50069158, 112.08428368,
129.96721968, 150.4277597, 173.74997634, 200.86037462, 231.59802942};
ASSERT_EQ(output.size(), kChannelCount);
for (int i = 0; i < kChannelCount; ++i) {
EXPECT_NEAR(output[i], expected[i], 1e-04);
}
}
TEST(MfccMelFilterbankTest, IgnoresExistingContentOfOutputVector) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::vector<double> input;
std::vector<double> output;
filterbank.Initialize(kSampleCount, 22050 ,
20 , 20.0 ,
4000.0 );
input.assign(kSampleCount, 1.0);
filterbank.Compute(input, &output);
for (const double value : output) {
EXPECT_LE(0.0, value);
}
input.assign(kSampleCount, 0.0);
filterbank.Compute(input, &output);
for (const double value : output) {
EXPECT_EQ(0.0, value);
}
}
TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxIntValue) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::size_t num_channels = std::numeric_limits<int>::max();
bool initialized = filterbank.Initialize(
kSampleCount, 2 , num_channels ,
1.0 , 5.0 );
EXPECT_FALSE(initialized);
}
TEST(MfccMelFilterbankTest, FailsWhenChannelsGreaterThanMaxSize) {
MfccMelFilterbank filterbank;
const int kSampleCount = 513;
std::size_t num_channels = std::vector<double>().max_size() + 1;
bool initialized = filterbank.Initialize(
kSampleCount, 2 , num_channels ,
1.0 , 5.0 );
EXPECT_FALSE(initialized);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/mfcc_mel_filterbank.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mfcc_mel_filterbank_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb0c36ad-0dad-4007-b2bb-20fde4559604 | cpp | tensorflow/tensorflow | tuple_util | third_party/xla/xla/service/tuple_util.cc | third_party/xla/xla/service/tuple_util_test.cc | #include "xla/service/tuple_util.h"
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
HloInstruction* TupleUtil::ExtractPrefix(HloInstruction* input_tuple,
int64_t elements,
absl::string_view name) {
CHECK(input_tuple->shape().IsTuple());
HloComputation* computation = input_tuple->parent();
const Shape& input_shape = input_tuple->shape();
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(elements);
for (int i = 0; i < elements; i++) {
std::string element_name;
if (!name.empty()) {
element_name = absl::StrCat(name, ".element.", i);
}
tuple_elements.push_back(computation->AddInstruction(
HloInstruction::CreateGetTupleElement(input_shape.tuple_shapes(i),
input_tuple, i),
element_name));
}
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements), name);
}
HloInstruction* TupleUtil::AppendSuffix(
HloInstruction* input_tuple,
absl::Span<HloInstruction* const> trailing_values) {
CHECK(input_tuple->shape().IsTuple());
HloComputation* computation = input_tuple->parent();
const Shape& input_shape = input_tuple->shape();
std::vector<HloInstruction*> tuple_elements;
tuple_elements.reserve(input_shape.tuple_shapes_size());
for (int i = 0; i < input_shape.tuple_shapes_size(); i++) {
tuple_elements.push_back(
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
input_shape.tuple_shapes(i), input_tuple, i)));
}
tuple_elements.insert(tuple_elements.end(), trailing_values.begin(),
trailing_values.end());
return computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
}
absl::StatusOr<HloInstruction*> TupleUtil::ReplaceTupleWith(
HloInstruction* new_instruction, HloInstruction* tuple,
ShapeIndex shape_index, bool insert_bitcast_if_different_shape) {
const Shape& tuple_shape = tuple->shape();
CHECK(tuple->shape().IsTuple())
<< "ReplaceTupleWith was called for a non-tuple. Tuple = "
<< tuple->ToString()
<< ", new_instruction = " << new_instruction->ToString()
<< ", shape_index = " << shape_index.ToString();
const HloInstruction* instruction = new_instruction;
bool equivalent = true;
for (int i = shape_index.size() - 1; i >= 0; --i) {
int index = shape_index[i];
if (instruction->opcode() != HloOpcode::kGetTupleElement ||
instruction->tuple_index() != index) {
equivalent = false;
break;
}
instruction = instruction->operand(0);
}
if (equivalent && instruction == tuple) {
VLOG(4) << "Instruction " << new_instruction->ToShortString()
<< " already exists at index " << shape_index.ToString() << " of "
<< tuple->ToShortString();
return tuple;
}
HloComputation* computation = new_instruction->parent();
std::vector<HloInstruction*> tuple_args(tuple_shape.tuple_shapes_size());
CHECK_GE(tuple_shape.tuple_shapes_size(), shape_index[0]);
for (int i = 0; i < tuple_shape.tuple_shapes_size(); ++i) {
const Shape& subshape = tuple_shape.tuple_shapes(i);
auto get_operand = [&]() {
if (tuple->opcode() == HloOpcode::kTuple) {
return tuple->mutable_operand(i);
} else {
return computation->AddInstruction(
HloInstruction::CreateGetTupleElement(subshape, tuple, i));
}
};
if (i == shape_index[0]) {
if (subshape.IsTuple()) {
TF_ASSIGN_OR_RETURN(tuple_args[i],
ReplaceTupleWith(new_instruction, get_operand(),
ShapeIndex(shape_index.begin() + 1,
shape_index.end())));
} else {
if (subshape != new_instruction->shape() &&
insert_bitcast_if_different_shape) {
VLOG(4) << "Old shape = " << subshape.ToString()
<< ", new shape = " << new_instruction->shape().ToString()
<< "; inserting a bitcast.";
new_instruction = computation->AddInstruction(
HloInstruction::CreateBitcast(subshape, new_instruction));
} else if (tuple->opcode() == HloOpcode::kTuple &&
tuple->operand(i) == new_instruction) {
VLOG(4) << "Tuple already contains the new instruction = "
<< new_instruction->ToShortString()
<< " tuple = " << tuple->ToShortString();
return tuple;
}
tuple_args[i] = new_instruction;
}
} else {
tuple_args[i] = get_operand();
}
}
if (shape_index[0] == tuple_shape.tuple_shapes_size()) {
tuple_args.push_back(new_instruction);
}
return computation->AddInstruction(HloInstruction::CreateTuple(tuple_args));
}
HloInstruction* TupleUtil::AddGetTupleElements(
const HloPosition& position) {
HloInstruction* instruction = position.instruction;
HloComputation* computation = instruction->parent();
for (int64_t index : position.index) {
auto gte_it = absl::c_find_if(
instruction->users(), [index](const HloInstruction* use) {
return use != use->parent()->root_instruction() &&
use->opcode() == HloOpcode::kGetTupleElement &&
use->tuple_index() == index;
});
if (gte_it != instruction->users().end()) {
instruction = *gte_it;
} else {
instruction =
computation->AddInstruction(HloInstruction::CreateGetTupleElement(
instruction->shape().tuple_shapes(index), instruction, index));
}
}
return instruction;
}
ShapeTree<HloInstruction*> TupleUtil::DisassembleTupleInstruction(
HloInstruction* tuple) {
const Shape& shape = tuple->shape();
ShapeTree<HloInstruction*> result(shape);
result.ForEachMutableElement([&](ShapeIndexView index,
HloInstruction** element) {
if (index.empty()) {
*element = tuple;
} else {
ShapeIndexView parent_index = index.subspan(0, index.size() - 1);
HloInstruction* parent = result.element(parent_index);
std::string name = absl::StrCat(tuple->name(), ".disassembled.",
absl::StrJoin(index, "."));
*element = tuple->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(parent, index.back()), name);
}
});
return result;
}
HloInstruction* TupleUtil::AssembleTupleInstruction(
HloComputation* computation, ShapeTree<HloInstruction*> elements,
absl::string_view name) {
elements.ForEachMutableElementPostOrder(
[&](const ShapeIndex& index, HloInstruction** element) {
const Shape& subshape = ShapeUtil::GetSubshape(elements.shape(), index);
if (subshape.IsTuple()) {
absl::InlinedVector<HloInstruction*, 2> children;
ShapeIndex child_index = index;
for (int i = 0; i < subshape.tuple_shapes_size(); ++i) {
child_index.push_back(i);
children.push_back(elements.element(child_index));
child_index.pop_back();
}
std::string new_name;
if (!name.empty()) {
if (index.empty()) {
new_name = std::string(name);
} else {
new_name =
absl::StrCat(name, ".assembled.", absl::StrJoin(index, "."));
}
}
*element = computation->AddInstruction(
HloInstruction::CreateTuple(children), new_name);
}
});
return elements.element({});
}
} | #include "xla/service/tuple_util.h"
#include <memory>
#include <string>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using TupleUtilTest = HloTestBase;
TEST_F(TupleUtilTest, ExtractPrefix) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)
ROOT p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
HloInstruction* prefix = TupleUtil::ExtractPrefix(param0, 2);
EXPECT_THAT(prefix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(TupleUtilTest, AppendSuffix) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0},f32[32,32]{1,0},f32[32,32]{1,0}) parameter(0)
ROOT p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* param0 =
module->entry_computation()->parameter_instruction(0);
HloInstruction* param1 =
module->entry_computation()->parameter_instruction(1);
HloInstruction* with_suffix =
TupleUtil::AppendSuffix(param0, {param1, param1});
EXPECT_THAT(with_suffix, op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::GetTupleElement(op::Parameter(0), 1),
op::GetTupleElement(op::Parameter(0), 2),
op::Parameter(1), op::Parameter(1)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithTupleInst) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(p0, p1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* tuple = FindInstruction(module.get(), "tuple");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p0, tuple, {1}));
EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(0), op::Parameter(0)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInst) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
ROOT p0 = (f32[32,32]{1,0}, f32[32,32]{1,0}) parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* p1 = FindInstruction(module.get(), "p1");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p1, p0, {0}));
EXPECT_THAT(new_tuple, op::Tuple(op::Parameter(1),
op::GetTupleElement(op::Parameter(0), 1)));
}
TEST_F(TupleUtilTest, ReplaceTupleWithNonTupleInstNested) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
ROOT p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* p1 = FindInstruction(module.get(), "p1");
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * new_tuple,
TupleUtil::ReplaceTupleWith(p1, p0, {1, 0}));
EXPECT_THAT(
new_tuple,
op::Tuple(op::GetTupleElement(op::Parameter(0), 0),
op::Tuple(op::Parameter(1),
op::GetTupleElement(
op::GetTupleElement(op::Parameter(0), 1), 1))));
}
TEST_F(TupleUtilTest, AddGetTupleElements) {
const std::string hlo_string = R"(
HloModule Module
ENTRY entry {
p0 = (f32[32,32]{1,0}, (f32[32,32]{1,0}, f32[32,32]{1,0})) parameter(0)
gte = (f32[32,32]{1,0}, f32[32,32]{1,0}) get-tuple-element(p0), index=1
ROOT root = f32[32,32]{1,0} get-tuple-element(gte), index=1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* p0 = FindInstruction(module.get(), "p0");
HloInstruction* existing_gte = FindInstruction(module.get(), "gte");
HloInstruction* new_gte = TupleUtil::AddGetTupleElements({p0, {1, 0}});
EXPECT_THAT(new_gte, op::GetTupleElement(existing_gte, 0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b2254c0-2234-4559-85fd-63d91a12a555 | cpp | tensorflow/tensorflow | credentials_factory | tensorflow/core/data/service/credentials_factory.cc | tensorflow/core/data/service/credentials_factory_test.cc | #include "tensorflow/core/data/service/credentials_factory.h"
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
namespace data {
namespace {
mutex* get_lock() {
static mutex lock(LINKER_INITIALIZED);
return &lock;
}
using CredentialsFactories =
std::unordered_map<std::string, CredentialsFactory*>;
CredentialsFactories& credentials_factories() {
static auto& factories = *new CredentialsFactories();
return factories;
}
}
void CredentialsFactory::Register(CredentialsFactory* factory) {
mutex_lock l(*get_lock());
if (!credentials_factories().insert({factory->Protocol(), factory}).second) {
LOG(ERROR)
<< "Two credentials factories are being registered with protocol "
<< factory->Protocol() << ". Which one gets used is undefined.";
}
}
Status CredentialsFactory::Get(absl::string_view protocol,
CredentialsFactory** out) {
mutex_lock l(*get_lock());
auto it = credentials_factories().find(std::string(protocol));
if (it != credentials_factories().end()) {
*out = it->second;
return absl::OkStatus();
}
std::vector<string> available_types;
for (const auto& factory : credentials_factories()) {
available_types.push_back(factory.first);
}
return errors::NotFound("No credentials factory has been registered for ",
"protocol ", protocol,
". The available types are: [ ",
absl::StrJoin(available_types, ", "), " ]");
}
Status CredentialsFactory::CreateServerCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ServerCredentials>* out) {
CredentialsFactory* factory;
TF_RETURN_IF_ERROR(CredentialsFactory::Get(protocol, &factory));
TF_RETURN_IF_ERROR(factory->CreateServerCredentials(out));
return absl::OkStatus();
}
Status CredentialsFactory::CreateClientCredentials(
absl::string_view protocol,
std::shared_ptr<::grpc::ChannelCredentials>* out) {
CredentialsFactory* factory;
TF_RETURN_IF_ERROR(CredentialsFactory::Get(protocol, &factory));
TF_RETURN_IF_ERROR(factory->CreateClientCredentials(out));
return absl::OkStatus();
}
bool CredentialsFactory::Exists(absl::string_view protocol) {
mutex_lock l(*get_lock());
return credentials_factories().find(std::string(protocol)) !=
credentials_factories().end();
}
class InsecureCredentialsFactory : public CredentialsFactory {
public:
std::string Protocol() override { return "grpc"; }
Status CreateServerCredentials(
std::shared_ptr<::grpc::ServerCredentials>* out) override {
*out = ::grpc::InsecureServerCredentials();
return absl::OkStatus();
}
Status CreateClientCredentials(
std::shared_ptr<::grpc::ChannelCredentials>* out) override {
*out = ::grpc::InsecureChannelCredentials();
return absl::OkStatus();
}
};
class InsecureCredentialsRegistrar {
public:
InsecureCredentialsRegistrar() {
auto factory = new InsecureCredentialsFactory();
CredentialsFactory::Register(factory);
}
};
static InsecureCredentialsRegistrar registrar;
}
} | #include "tensorflow/core/data/service/credentials_factory.h"
#include <memory>
#include <string>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kFailedToCreateServerCredentials[] =
"Failed to create server credentials.";
constexpr char kFailedToCreateClientCredentials[] =
"Failed to create client credentials.";
class TestCredentialsFactory : public CredentialsFactory {
public:
std::string Protocol() override { return "test"; }
Status CreateServerCredentials(
std::shared_ptr<grpc::ServerCredentials>* out) override {
return errors::Internal(kFailedToCreateServerCredentials);
}
Status CreateClientCredentials(
std::shared_ptr<grpc::ChannelCredentials>* out) override {
return errors::Internal(kFailedToCreateClientCredentials);
}
};
}
TEST(CredentialsFactory, Register) {
TestCredentialsFactory test_factory;
CredentialsFactory::Register(&test_factory);
std::shared_ptr<grpc::ServerCredentials> server_credentials;
ASSERT_EQ(errors::Internal(kFailedToCreateServerCredentials),
CredentialsFactory::CreateServerCredentials(test_factory.Protocol(),
&server_credentials));
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
ASSERT_EQ(errors::Internal(kFailedToCreateClientCredentials),
CredentialsFactory::CreateClientCredentials(test_factory.Protocol(),
&client_credentials));
}
TEST(CredentialsFactory, DefaultGrpcProtocol) {
std::shared_ptr<grpc::ServerCredentials> server_credentials;
TF_ASSERT_OK(
CredentialsFactory::CreateServerCredentials("grpc", &server_credentials));
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
TF_ASSERT_OK(
CredentialsFactory::CreateClientCredentials("grpc", &client_credentials));
}
TEST(CredentialsFactory, MissingServerProtocol) {
std::shared_ptr<grpc::ServerCredentials> server_credentials;
Status s = CredentialsFactory::CreateServerCredentials("unknown_protocol",
&server_credentials);
ASSERT_EQ(error::Code::NOT_FOUND, s.code());
ASSERT_TRUE(
absl::StrContains(s.ToString(),
"No credentials factory has been registered for "
"protocol unknown_protocol"));
}
TEST(CredentialsFactory, MissingClientProtocol) {
std::shared_ptr<grpc::ChannelCredentials> client_credentials;
Status s = CredentialsFactory::CreateClientCredentials("unknown_protocol",
&client_credentials);
ASSERT_EQ(error::Code::NOT_FOUND, s.code());
ASSERT_TRUE(
absl::StrContains(s.ToString(),
"No credentials factory has been registered for "
"protocol unknown_protocol"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/credentials_factory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/credentials_factory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
654bad89-09d6-46bb-b7c5-37040b507d71 | cpp | google/arolla | demangle | arolla/util/demangle.cc | arolla/util/demangle_test.cc | #include "arolla/util/demangle.h"
#include <cstdlib>
#include <string>
#include <typeinfo>
#include "arolla/util/bytes.h"
#if defined(__GXX_RTTI)
#define AROLLA_HAS_CXA_DEMANGLE
#endif
#ifdef AROLLA_HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace arolla {
std::string TypeName(const std::type_info& ti) {
if (ti == typeid(arolla::Bytes)) {
return "arolla::Bytes";
}
int status = 0;
char* demangled = nullptr;
#ifdef AROLLA_HAS_CXA_DEMANGLE
demangled = abi::__cxa_demangle(ti.name(), nullptr, nullptr, &status);
#endif
if (status == 0 && demangled != nullptr) {
std::string out = demangled;
free(demangled);
return out;
} else {
return ti.name();
}
}
} | #include "arolla/util/demangle.h"
#include <cstdint>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace arolla {
namespace {
using ::testing::Eq;
using ::testing::MatchesRegex;
TEST(DemangleTest, TypeName) {
EXPECT_THAT(TypeName<int>(), Eq("int"));
EXPECT_THAT(TypeName<int32_t>(), Eq("int"));
EXPECT_THAT(TypeName<std::vector<int>>(), MatchesRegex("std::.*vector.*"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/demangle.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/demangle_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4152b9d6-a170-4c43-a583-3cc6b5fc12ef | cpp | tensorflow/tensorflow | flip_left_right | tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.cc | tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right_test.cc | #include <cstring>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace ml_adj {
namespace flip_left_right {
namespace {
using ::ml_adj::algo::Algo;
using ::ml_adj::algo::InputPack;
using ::ml_adj::algo::OutputPack;
using ::ml_adj::data::DataRef;
using ::ml_adj::data::MutableDataRef;
using ::ml_adj::data::TypeWidth;
void FlipLeftRight(dim_t batches, dim_t input_height, dim_t input_width,
const char* input_data, char* output_data,
dim_t chunk_size) {
const dim_t row_stride = input_width * chunk_size;
const dim_t batch_stride = row_stride * input_height;
for (int b = 0; b < batches; ++b) {
const char* src_data_prt = input_data + b * batch_stride;
char* dst_data_prt = output_data + b * batch_stride;
for (int y = 0; y < input_height; ++y) {
const char* src_ptr_row =
src_data_prt + y * row_stride + (input_width - 1) * chunk_size;
char* dst_ptr_row = dst_data_prt + y * row_stride;
for (int x = 0; x < input_width; ++x) {
std::memcpy(dst_ptr_row, src_ptr_row, chunk_size);
src_ptr_row -= chunk_size;
dst_ptr_row += chunk_size;
}
}
}
}
void ComputeFlipLeftRight(const InputPack& inputs, const OutputPack& outputs) {
TFLITE_DCHECK(inputs.size() == 1);
TFLITE_DCHECK(outputs.size() == 1);
const DataRef* img = inputs[0];
const char* img_data = reinterpret_cast<const char*>(img->Data());
const dim_t num_batches = img->Dims()[0];
const dim_t height = img->Dims()[1];
const dim_t width = img->Dims()[2];
const dim_t num_channels = img->Dims()[3];
const dim_t chunk_size = TypeWidth(img->Type()) * num_channels;
MutableDataRef* output = outputs[0];
output->Resize({num_batches, height, width, num_channels});
char* output_data = reinterpret_cast<char*>(output->Data());
FlipLeftRight(num_batches, height, width, img_data, output_data, chunk_size);
}
}
const Algo* Impl_FlipLeftRight() {
static const Algo flip_left_right = {&ComputeFlipLeftRight, nullptr};
return &flip_left_right;
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.h"
#include <cstring>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
using ::ml_adj::algo::Algo;
using ::ml_adj::data::OwningVectorRef;
namespace ml_adj {
namespace flip_left_right {
namespace {
struct FlipLeftRightTestParams {
const std::vector<dim_t> img_dims;
const std::vector<float> img_data;
const std::vector<float> expected_data;
const std::vector<dim_t> expected_shape;
};
class FlipLeftRightTest
: public ::testing::TestWithParam<FlipLeftRightTestParams> {};
TEST_P(FlipLeftRightTest, FloatPixelType) {
constexpr float kAbsError = 0.01f;
const FlipLeftRightTestParams& params = GetParam();
OwningVectorRef img(etype_t::f32);
img.Resize(dims_t(params.img_dims));
ASSERT_EQ(img.Bytes(), params.img_data.size() * sizeof(float));
std::memcpy(img.Data(), params.img_data.data(), img.Bytes());
OwningVectorRef output(etype_t::f32);
const Algo* flip_left_right = Impl_FlipLeftRight();
flip_left_right->process({&img}, {&output});
ASSERT_EQ(output.Bytes(), params.expected_data.size() * sizeof(float));
ASSERT_EQ(output.Dims(), params.expected_shape);
const float* out_data = reinterpret_cast<float*>(output.Data());
for (int i = 0; i < output.NumElements(); ++i) {
EXPECT_NEAR(out_data[i], params.expected_data[i], kAbsError)
<< "out_data[" << i << "] = " << out_data[i] << ", expected_data[" << i
<< "] = " << params.expected_data[i];
}
}
INSTANTIATE_TEST_SUITE_P(
FlipLeftRightTests, FlipLeftRightTest,
testing::ValuesIn({
FlipLeftRightTestParams{{1, 3, 3, 1},
{11, 12, 13,
21, 22, 23,
31, 32, 33},
{13, 12, 11,
23, 22, 21,
33, 32, 31},
{1, 3, 3, 1}},
FlipLeftRightTestParams{{1, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{1, 3, 3, 2}},
FlipLeftRightTestParams{{2, 3, 3, 2},
{11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6,
13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4},
{13, 4, 12, 3, 11, 2,
23, 5, 22, 4, 21, 3,
33, 6, 32, 5, 31, 4,
11, 2, 12, 3, 13, 4,
21, 3, 22, 4, 23, 5,
31, 4, 32, 5, 33, 6},
{2, 3, 3, 2}},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/algo/flip_left_right_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
33f82346-9f4f-407e-9852-6d1bd1bf3883 | cpp | tensorflow/tensorflow | url | tensorflow/core/data/service/url.cc | tensorflow/core/data/service/url_test.cc | #include "tensorflow/core/data/service/url.h"
#include <string>
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/regexp.h"
namespace tensorflow {
namespace data {
URL::URL(absl::string_view url) { Parse(url); }
void URL::Parse(absl::string_view url) {
absl::string_view regexp = "(.*):([a-zA-Z0-9_]+|%port(_[a-zA-Z0-9_]+)?%)";
if (!RE2::FullMatch(url, regexp, &host_, &port_)) {
host_ = std::string(url);
port_ = "";
}
}
}
} | #include "tensorflow/core/data/service/url.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(URLTest, ParseUrl) {
URL url("localhost");
EXPECT_EQ(url.host(), "localhost");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseUrlWithProtocol) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseUrlWithPort) {
URL url("localhost:1234");
EXPECT_EQ(url.host(), "localhost");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "1234");
}
TEST(URLTest, ParseUrlWithProtocolAndPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "1234");
}
TEST(URLTest, ParseUrlWithProtocolAndDynamicPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port%");
}
TEST(URLTest, ParseBorgAddress) {
URL url("/worker/task/0");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseBorgAddressWithCustomProtocol) {
URL url("worker:/worker/task/0");
EXPECT_EQ(url.host(), "worker:/worker/task/0");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseBorgAddressWithNamedPort) {
URL url("/worker/task/0:worker");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "worker");
}
TEST(URLTest, ParseBorgAddressWithDynamicPort) {
URL url("/worker/task/0:%port%");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port%");
}
TEST(URLTest, ParseBorgAddressWithDynamicNamedPort) {
URL url("/worker/task/0:%port_worker%");
EXPECT_EQ(url.host(), "/worker/task/0");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port_worker%");
}
TEST(URLTest, ParseIPv4Address) {
URL url("127.0.0.1");
EXPECT_EQ(url.host(), "127.0.0.1");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv4AddressWithPort) {
URL url("127.0.0.1:8000");
EXPECT_EQ(url.host(), "127.0.0.1");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "8000");
}
TEST(URLTest, ParseIPv6Address) {
URL url("[::1]");
EXPECT_EQ(url.host(), "[::1]");
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv6AddressWithProtocol) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseIPv6AddressWithPort) {
URL url("[::1]:23456");
EXPECT_EQ(url.host(), "[::1]");
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "23456");
}
TEST(URLTest, ParseIPv6AddressWithProtocolAndPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "23456");
}
TEST(URLTest, ParseIPv6AddressWithProtocolAndDynamicPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "%port_name%");
}
TEST(URLTest, ParseNonLocalIPv6Address) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseNonLocalIPv6AddressWithNamedPort) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_TRUE(url.has_port());
EXPECT_EQ(url.port(), "worker");
}
TEST(URLTest, ParseEmptyIPv6Address) {
URL url("http:
EXPECT_EQ(url.host(), "http:
EXPECT_FALSE(url.has_port());
}
TEST(URLTest, ParseEmptyAddress) {
URL url("");
EXPECT_EQ(url.host(), "");
EXPECT_FALSE(url.has_port());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/url.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/url_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8323463a-09b1-420b-baaf-d77521ca8e27 | cpp | tensorflow/tensorflow | preemption_sync_manager | third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager.cc | third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager_test.cc | #include "xla/tsl/distributed_runtime/preemption/preemption_sync_manager.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include "xla/tsl/lib/monitoring/gauge.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedTask;
using tensorflow::KeyValueEntry;
constexpr int64_t kPreemptionSyncUnsetCounter = -1;
constexpr char kPreemptionNoticeKey[] = "RECEIVED_PREEMPTION_NOTICE";
constexpr char kPreemptionCounterDirKey[] = "PREEMPTION_CURRENT_COUNTER/";
constexpr char kPreemptionBarrier[] = "PREEMPTION_SYNC_BARRIER";
constexpr absl::Duration kPreemptionBarrierTimeout = absl::Minutes(3);
auto* sync_usage_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/reached_sync_point_usage",
"Records if preempt sync manager's ReachSyncPoint() was called at least "
"once.");
auto* notified_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/notified",
"Records receipt of preemption notification.");
auto* set_sync_point_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/set_sync_point",
"Records that sync point is set.");
auto* reached_sync_point_metric = monitoring::Gauge<bool, 0>::New(
"/coordination_service/preempt_manager/reached_sync_point",
"Records that sync point is reached.");
constexpr absl::Duration kProtocolDuration = absl::Minutes(15);
class PreemptionSyncManagerImpl : public PreemptionSyncManager {
public:
PreemptionSyncManagerImpl() = default;
~PreemptionSyncManagerImpl() override {
shutdown_.Notify();
}
absl::Status Initialize(CoordinationServiceAgent* agent) override;
absl::Status Initialize(CoordinationServiceAgent* agent,
const std::string& preemption_notifier_type) override;
absl::Status Initialize(
CoordinationServiceAgent* agent,
std::unique_ptr<PreemptionNotifier> notifier) override;
bool ReachedSyncPoint(int step_counter) override;
private:
void ComputeSyncCallCounter(absl::Time death_time);
void CancelPreemptionBarrier();
absl::Mutex mu_;
int64_t call_counter_ ABSL_GUARDED_BY(mu_) = 0;
int64_t preemption_sync_counter_ ABSL_GUARDED_BY(mu_) =
kPreemptionSyncUnsetCounter;
std::string current_call_counter_key_;
Env* env_;
CoordinationServiceAgent* agent_;
absl::Notification shutdown_;
std::unique_ptr<Thread> sync_protocol_thread_;
std::unique_ptr<PreemptionNotifier> preemption_notifier_;
std::shared_ptr<CallOptions> call_opts_;
};
absl::Status PreemptionSyncManagerImpl::Initialize(
CoordinationServiceAgent* agent) {
return Initialize(agent, "sigterm");
}
absl::Status PreemptionSyncManagerImpl::Initialize(
CoordinationServiceAgent* agent,
const std::string& preemption_notifier_type) {
TF_ASSIGN_OR_RETURN(Env * env, agent->GetEnv());
return Initialize(agent, PreemptionNotifier::CreatePreemptionNotifier(
preemption_notifier_type, env));
}
absl::Status PreemptionSyncManagerImpl::Initialize(
CoordinationServiceAgent* agent,
std::unique_ptr<PreemptionNotifier> notifier) {
TF_ASSIGN_OR_RETURN(Env * env, agent->GetEnv());
env_ = env;
agent_ = agent;
preemption_notifier_ = std::move(notifier);
TF_ASSIGN_OR_RETURN(CoordinatedTask own_task, agent->GetOwnTask());
const std::string task_name =
absl::StrCat("/job:", own_task.job_name(), "/task:", own_task.task_id());
current_call_counter_key_ = absl::StrCat(kPreemptionCounterDirKey, task_name);
preemption_notifier_->WillBePreemptedAtAsync(
[agent = agent_, task_name](absl::StatusOr<absl::Time> death_time) {
if (!death_time.ok()) {
if (absl::IsCancelled(death_time.status())) {
LOG(INFO) << "Preemption sync protocol cancelled by notifier: "
<< death_time.status()
<< ". This is expected during program shutdown.";
} else {
LOG(ERROR) << "Error from preemption notifier: "
<< death_time.status();
}
return;
}
notified_metric->GetCell()->Set(true);
const absl::Status s = agent->InsertKeyValue(
kPreemptionNoticeKey, absl::FormatTime(*death_time));
LOG(INFO) << "Notified coordination service that this task will "
"be preempted at "
<< *death_time << ". absl::Status: " << s;
});
call_opts_ = agent_->GetKeyValueAsync(
kPreemptionNoticeKey,
[this, agent = agent_](absl::StatusOr<std::string> status_or_death_time) {
if (absl::IsCancelled(status_or_death_time.status())) {
LOG(INFO) << "Cancelled call to retrieve preemption notice. This is "
"expected upon program shutdown.";
return;
} else if (!status_or_death_time.ok()) {
LOG(WARNING)
<< "Failed to retrieve preemption notice from "
"coordination service: "
<< status_or_death_time.status()
<< ". This is only expected if one of the tasks is unhealthy."
" Check the logs for the actual root cause.";
agent->CancelBarrierAsync(
kPreemptionBarrier, [](const absl::Status& status) {
if (!status.ok()) {
LOG(ERROR)
<< "Failed to cancel preemption barrier: " << status;
}
});
return;
}
std::string err;
absl::Time death_time;
if (absl::ParseTime(absl::RFC3339_full, *status_or_death_time,
&death_time, &err)) {
LOG(INFO) << "Received preemption notice with death_time "
<< death_time;
} else {
LOG(ERROR) << "Unable to parse preemption notice's death time: "
<< err;
CancelPreemptionBarrier();
return;
}
sync_protocol_thread_ = absl::WrapUnique(env_->StartThread(
{}, "PreemptionSyncManager_SyncProtocol",
std::bind(&PreemptionSyncManagerImpl::ComputeSyncCallCounter, this,
death_time)));
});
return absl::OkStatus();
}
void PreemptionSyncManagerImpl::ComputeSyncCallCounter(absl::Time death_time) {
const absl::Duration remaining_time = death_time - absl::Now();
if (remaining_time > kProtocolDuration) {
LOG(INFO) << "Will begin preemption sync protocol in " << remaining_time;
const absl::Duration sleep_time = remaining_time - kProtocolDuration;
if (shutdown_.WaitForNotificationWithTimeout(sleep_time)) {
LOG(WARNING)
<< "Shutdown is triggered before preemption sync protocol has begun.";
CancelPreemptionBarrier();
return;
}
}
absl::MutexLock l(&mu_);
const absl::Status notified_status = agent_->InsertKeyValue(
current_call_counter_key_, std::to_string(call_counter_));
if (!notified_status.ok()) {
LOG(ERROR) << "Preemption sync failed - could not inform service of "
"current call counter: "
<< notified_status;
CancelPreemptionBarrier();
return;
}
const absl::Status barrier_status =
agent_->WaitAtBarrier(kPreemptionBarrier, kPreemptionBarrierTimeout, {});
if (!barrier_status.ok()) {
LOG(ERROR) << "Preemption sync barrier failed: " << barrier_status;
return;
}
absl::StatusOr<std::vector<KeyValueEntry>> all_counters =
agent_->GetKeyValueDir(kPreemptionCounterDirKey);
if (!all_counters.ok()) {
LOG(ERROR) << "Preemption sync failed - unable to retrieve call counters: "
<< all_counters.status();
return;
}
int64_t max_counter = kPreemptionSyncUnsetCounter;
for (const auto& kv : *all_counters) {
int64_t call_counter;
if (!absl::SimpleAtoi(kv.value(), &call_counter)) {
LOG(ERROR) << "Preemption sync failed - failed to parse preemption call "
"counter: "
<< kv.DebugString();
return;
}
max_counter = std::max(max_counter, call_counter);
}
if (max_counter == kPreemptionSyncUnsetCounter) {
LOG(ERROR) << "Preemption sync failed - no call counters found.";
return;
}
preemption_sync_counter_ = max_counter + 1;
LOG(INFO) << "Preemption sync counter is set: " << preemption_sync_counter_;
set_sync_point_metric->GetCell()->Set(true);
}
void PreemptionSyncManagerImpl::CancelPreemptionBarrier() {
agent_->CancelBarrierAsync(
kPreemptionBarrier, [](const absl::Status& status) {
if (!status.ok()) {
LOG(ERROR) << "Failed to cancel preemption barrier: " << status;
}
});
}
bool PreemptionSyncManagerImpl::ReachedSyncPoint(int step_counter) {
sync_usage_metric->GetCell()->Set(true);
absl::MutexLock l(&mu_);
call_counter_ = step_counter;
VLOG(3) << "Current call counter: " << call_counter_
<< ", Preemption sync point: " << preemption_sync_counter_;
const bool reached_sync_point = preemption_sync_counter_ == call_counter_;
if (reached_sync_point) {
reached_sync_point_metric->GetCell()->Set(true);
}
return reached_sync_point;
}
}
std::unique_ptr<PreemptionSyncManager> CreatePreemptionSyncManager() {
return std::make_unique<PreemptionSyncManagerImpl>();
}
} | #include "xla/tsl/distributed_runtime/preemption/preemption_sync_manager.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/support/channel_arguments.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/distributed_runtime/preemption/preemption_notifier.h"
#include "xla/tsl/distributed_runtime/rpc/async_service_interface.h"
#include "xla/tsl/distributed_runtime/rpc/coordination/grpc_coordination_client.h"
#include "xla/tsl/distributed_runtime/rpc/coordination/grpc_coordination_service_impl.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedJob;
using tensorflow::CoordinatedTask;
using tensorflow::CoordinationServiceConfig;
constexpr char kJobName[] = "test_worker";
class FakePreemptionNotifier : public PreemptionNotifier {
public:
FakePreemptionNotifier() : PreemptionNotifier(nullptr) {}
~FakePreemptionNotifier() override {
NotifyRegisteredListeners(
absl::CancelledError("~FakePreemptionNotifier() was called."));
}
void AnnounceDeath(absl::Time death_time) {
LOG(WARNING) << "Received preemption notice with death time: "
<< death_time;
NotifyRegisteredListeners(death_time);
}
};
class PreemptionSyncManagerTest : public ::testing::Test {
protected:
PreemptionSyncManagerTest() {
StartCoordinationService();
InitializeAndConnectCoordinationAgents();
auto preempt_notifier = std::make_unique<FakePreemptionNotifier>();
preempt_notifier_ = preempt_notifier.get();
CHECK_OK(preempt_sync_mgr_->Initialize(coord_agent_.get(),
std::move(preempt_notifier)));
auto preempt_notifier2 = std::make_unique<FakePreemptionNotifier>();
preempt_notifier2_ = preempt_notifier2.get();
CHECK_OK(preempt_sync_mgr2_->Initialize(coord_agent2_.get(),
std::move(preempt_notifier2)));
}
~PreemptionSyncManagerTest() override {
preempt_sync_mgr_ = nullptr;
preempt_sync_mgr2_ = nullptr;
coord_agent_ = nullptr;
coord_agent2_ = nullptr;
coord_service_ = nullptr;
static_cast<tsl::GrpcCoordinationServiceImpl*>(coord_rpc_service_.get())
->SetCoordinationServiceInstance(nullptr);
grpc_server_->Shutdown();
coord_rpc_service_->Shutdown();
}
void SendPreemptionNotice(absl::Time death_time = absl::Now(),
bool to_task1 = true) {
if (to_task1) {
preempt_notifier_->AnnounceDeath(death_time);
} else {
preempt_notifier2_->AnnounceDeath(death_time);
}
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(absl::Seconds(1)));
}
void SimulateUnhealthyTaskTwo() {
CoordinatedTask task2;
task2.set_job_name(kJobName);
task2.set_task_id(1);
CHECK_OK(coord_service_->ReportTaskError(
task2, absl::InternalError("test_error")));
}
std::unique_ptr<PreemptionSyncManager> preempt_sync_mgr_ =
CreatePreemptionSyncManager();
std::unique_ptr<PreemptionSyncManager> preempt_sync_mgr2_ =
CreatePreemptionSyncManager();
protected:
void StartCoordinationService() {
::grpc::ServerBuilder builder;
coord_service_ = EnableCoordinationService();
coord_compute_pool_ = std::make_unique<thread::ThreadPool>(
Env::Default(), "CoordinationServiceRpcHandler",
1);
coord_rpc_service_ = std::make_unique<GrpcCoordinationServiceImpl>(
coord_compute_pool_.get(), &builder);
auto* grpc_coord_service =
static_cast<GrpcCoordinationServiceImpl*>(coord_rpc_service_.get());
grpc_coord_service->SetCoordinationServiceInstance(coord_service_.get());
grpc_server_ = builder.BuildAndStart();
coord_rpc_thread_ = absl::WrapUnique(Env::Default()->StartThread(
{}, "CoordinationServiceHandleRPCsLoop",
[service = coord_rpc_service_.get()]() { service->HandleRPCsLoop(); }));
}
std::unique_ptr<CoordinationServiceInterface> EnableCoordinationService() {
CoordinationServiceConfig config;
config.set_service_type("standalone");
CoordinatedJob* job = config.mutable_coordinated_job_list()->Add();
job->set_name(kJobName);
job->set_num_tasks(2);
return CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, nullptr);
}
void InitializeAndConnectCoordinationAgents() {
std::unique_ptr<CoordinationClient> coord_client =
absl::WrapUnique(NewGrpcCoordinationClient(
grpc_server_->InProcessChannel(::grpc::ChannelArguments())));
std::unique_ptr<CoordinationClient> coord_client2 =
absl::WrapUnique(NewGrpcCoordinationClient(
grpc_server_->InProcessChannel(::grpc::ChannelArguments())));
auto error_fn = [](const absl::Status& status) {
LOG(ERROR) << "Coordination service agent in error status: " << status;
};
CoordinationServiceConfig coord_config;
coord_config.set_service_leader("test_leader");
CHECK_OK(coord_agent_->Initialize(Env::Default(), kJobName,
0, coord_config,
std::move(coord_client), error_fn));
CHECK_OK(coord_agent2_->Initialize(Env::Default(), kJobName,
1, coord_config,
std::move(coord_client2), error_fn));
CHECK_OK(coord_agent_->Connect());
CHECK_OK(coord_agent2_->Connect());
}
std::unique_ptr<CoordinationServiceInterface> coord_service_;
std::unique_ptr<::grpc::Server> grpc_server_;
std::unique_ptr<thread::ThreadPool> coord_compute_pool_;
std::unique_ptr<AsyncServiceInterface> coord_rpc_service_;
std::unique_ptr<Thread> coord_rpc_thread_;
std::unique_ptr<CoordinationServiceAgent> coord_agent_ =
CreateCoordinationServiceAgent();
FakePreemptionNotifier* preempt_notifier_;
std::unique_ptr<CoordinationServiceAgent> coord_agent2_ =
CreateCoordinationServiceAgent();
FakePreemptionNotifier* preempt_notifier2_;
};
TEST_F(PreemptionSyncManagerTest, NoPreemption_NoSyncPoint) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, Preemption_SingleSyncPoint) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
SendPreemptionNotice();
EXPECT_TRUE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, DelayedPreemption_NoSyncPointYet) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
SendPreemptionNotice(absl::Now() + absl::Hours(1));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, UnhealthyTask_NoSyncPoint) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
SimulateUnhealthyTaskTwo();
SendPreemptionNotice();
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, ShutdownTasksWithoutPreemption) {
int step_counter = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
CHECK_OK(coord_agent_->Shutdown());
CHECK_OK(coord_agent2_->Shutdown());
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter++));
}
TEST_F(PreemptionSyncManagerTest, PreemptSlowTask) {
int step_counter0 = 0;
int step_counter2 = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
SendPreemptionNotice();
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
}
TEST_F(PreemptionSyncManagerTest, PreemptFastTask) {
int step_counter0 = 0;
int step_counter2 = 0;
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
EXPECT_FALSE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
SendPreemptionNotice(absl::Now(), false);
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_FALSE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr_->ReachedSyncPoint(step_counter0++));
EXPECT_TRUE(preempt_sync_mgr2_->ReachedSyncPoint(step_counter2++));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/preemption/preemption_sync_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5652e31a-425e-4615-b061-8fea0c9b6ba5 | cpp | tensorflow/tensorflow | legalization_op_config | tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc | tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config_test.cc | #include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include "llvm/ADT/DenseSet.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tpu_embedding_ops_registry.h"
namespace mlir {
namespace mhlo {
namespace {
const llvm::DenseSet<mlir::TypeID>& MlirAlwaysOps() {
static const llvm::DenseSet<mlir::TypeID>* ops = new llvm::DenseSet<
mlir::TypeID>{
TypeID::get<TF::FusedBatchNormV3Op>(),
TypeID::get<TF::FusedBatchNormGradV3Op>(),
TypeID::get<TF::XlaReduceScatterOp>(),
TypeID::get<TF::ModOp>(),
TypeID::get<TF::MatrixDiagPartV3Op>(),
TypeID::get<TF::AbsOp>(),
TypeID::get<TF::AtanOp>(),
TypeID::get<TF::AvgPool3DOp>(),
TypeID::get<TF::BiasAddGradOp>(),
TypeID::get<TF::CeilOp>(),
TypeID::get<TF::CheckNumericsOp>(),
TypeID::get<TF::CosOp>(),
TypeID::get<TF::TanOp>(),
TypeID::get<TF::DiagPartOp>(),
TypeID::get<TF::EinsumOp>(),
TypeID::get<TF::ExpOp>(),
TypeID::get<TF::Expm1Op>(),
TypeID::get<TF::FakeQuantWithMinMaxArgsOp>(),
TypeID::get<TF::FloorOp>(),
TypeID::get<TF::IFFTOp>(),
TypeID::get<TF::ImagOp>(),
TypeID::get<TF::IsFiniteOp>(),
TypeID::get<TF::IsInfOp>(),
TypeID::get<TF::IsNanOp>(),
TypeID::get<TF::LgammaOp>(),
TypeID::get<TF::Log1pOp>(),
TypeID::get<TF::LogSoftmaxOp>(),
TypeID::get<TF::MatrixBandPartOp>(),
TypeID::get<TF::MaxPool3DGradOp>(),
TypeID::get<TF::PreventGradientOp>(),
TypeID::get<TF::RandomShuffleOp>(),
TypeID::get<TF::RealOp>(),
TypeID::get<TF::ReciprocalOp>(),
TypeID::get<TF::ReluOp>(),
TypeID::get<TF::Relu6Op>(),
TypeID::get<TF::ReluGradOp>(),
TypeID::get<TF::RsqrtOp>(),
TypeID::get<TF::SelectOp>(),
TypeID::get<TF::SigmoidOp>(),
TypeID::get<TF::SignOp>(),
TypeID::get<TF::SoftmaxOp>(),
TypeID::get<TF::SqrtOp>(),
TypeID::get<TF::TanhOp>(),
TypeID::get<TF::XlaConvV2Op>(),
TypeID::get<TF::XlaDotOp>(),
TypeID::get<TF::XlaDotV2Op>(),
TypeID::get<TF::XlaDynamicSliceOp>(),
TypeID::get<TF::XlaEinsumOp>(),
TypeID::get<TF::XlaReduceWindowOp>(),
TypeID::get<TF::XlaReplicaIdOp>(),
TypeID::get<TF::XlaRngBitGeneratorOp>(),
TypeID::get<TF::XlaSelectAndScatterOp>(),
TypeID::get<TF::XlaSortOp>(),
TypeID::get<TF::XlaVariadicReduceV2Op>(),
TypeID::get<TF::XlaVariadicSortOp>(),
TypeID::get<TF::RiscAddOp>(),
TypeID::get<TF::RiscDotOp>(),
TypeID::get<TF::ConstOp>(),
TypeID::get<TF::AssertOp>(),
TypeID::get<TF::CrossReplicaSumOp>(),
TypeID::get<TF::InfeedDequeueTupleOp>(),
TypeID::get<TF::OutfeedEnqueueTupleOp>(),
TypeID::get<TF::XlaShardingOp>(),
TypeID::get<TF::IfRegionOp>(),
TypeID::get<TF::WhileRegionOp>(),
TypeID::get<TF::CaseRegionOp>(),
TypeID::get<TF::YieldOp>(),
};
return *ops;
}
bool IsOpTypeAllowedTf2XlaFallback(const TypeID& type_id) {
static auto* ops = [] {
llvm::SmallDenseSet<mlir::TypeID, 512>* ops_set = new llvm::SmallDenseSet<
mlir::TypeID, 512>{
TypeID::get<TF::AcoshOp>(),
TypeID::get<TF::AcosOp>(),
TypeID::get<TF::AddNOp>(),
TypeID::get<TF::AddV2Op>(),
TypeID::get<TF::AngleOp>(),
TypeID::get<TF::AdjustContrastv2Op>(),
TypeID::get<TF::AdjustHueOp>(),
TypeID::get<TF::AdjustSaturationOp>(),
TypeID::get<TF::ApproximateEqualOp>(),
TypeID::get<TF::ApproxTopKOp>(),
TypeID::get<TF::ArgMaxOp>(),
TypeID::get<TF::ArgMinOp>(),
TypeID::get<TF::AsinhOp>(),
TypeID::get<TF::AsinOp>(),
TypeID::get<TF::Atan2Op>(),
TypeID::get<TF::AtanhOp>(),
TypeID::get<TF::BatchMatMulV2Op>(),
TypeID::get<TF::BatchMatMulV3Op>(),
TypeID::get<TF::BatchToSpaceOp>(),
TypeID::get<TF::BesselI0eOp>(),
TypeID::get<TF::BesselI1eOp>(),
TypeID::get<TF::BetaincOp>(),
TypeID::get<TF::BiasAddOp>(),
TypeID::get<TF::BitwiseAndOp>(),
TypeID::get<TF::BitwiseOrOp>(),
TypeID::get<TF::BitwiseXorOp>(),
TypeID::get<TF::BucketizeOp>(),
TypeID::get<TF::CaseOp>(),
TypeID::get<TF::CastOp>(),
TypeID::get<TF::ClipByValueOp>(),
TypeID::get<TF::CholeskyOp>(),
TypeID::get<TF::CollectiveReduceV2Op>(),
TypeID::get<TF::ComplexAbsOp>(),
TypeID::get<TF::ConjugateTransposeOp>(),
TypeID::get<TF::ConcatV2Op>(),
TypeID::get<TF::ConvOp>(),
TypeID::get<TF::CoshOp>(),
TypeID::get<TF::CrossOp>(),
TypeID::get<TF::CumulativeLogsumexpOp>(),
TypeID::get<TF::DataFormatDimMapOp>(),
TypeID::get<TF::DataFormatVecPermuteOp>(),
TypeID::get<TF::DepthToSpaceOp>(),
TypeID::get<TF::DepthwiseConv2dNativeBackpropFilterOp>(),
TypeID::get<TF::DepthwiseConv2dNativeBackpropInputOp>(),
TypeID::get<TF::DiagOp>(),
TypeID::get<TF::DigammaOp>(),
TypeID::get<TF::DivNoNanOp>(),
TypeID::get<TF::DynamicPartitionOp>(),
TypeID::get<TF::EluGradOp>(),
TypeID::get<TF::EluOp>(),
TypeID::get<TF::EnsureShapeOp>(),
TypeID::get<TF::EqualOp>(),
TypeID::get<TF::ErfcOp>(),
TypeID::get<TF::ErfinvOp>(),
TypeID::get<TF::ErfOp>(),
TypeID::get<TF::ExtractImagePatchesOp>(),
TypeID::get<TF::FFT2DOp>(),
TypeID::get<TF::FFT3DOp>(),
TypeID::get<TF::FFTOp>(),
TypeID::get<TF::FakeParamOp>(),
TypeID::get<TF::FakeQuantWithMinMaxArgsGradientOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsGradientOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsPerChannelOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsPerChannelGradientOp>(),
TypeID::get<TF::FloorDivOp>(),
TypeID::get<TF::FloorModOp>(),
TypeID::get<TF::GetMinibatchesInCsrWithPhysicalReplicaOp>(),
TypeID::get<TF::GetMinibatchSplitsWithPhysicalReplicaOp>(),
TypeID::get<TF::GreaterOp>(),
TypeID::get<TF::HSVToRGBOp>(),
TypeID::get<TF::IFFT2DOp>(),
TypeID::get<TF::IFFT3DOp>(),
TypeID::get<TF::IRFFT2DOp>(),
TypeID::get<TF::IRFFT3DOp>(),
TypeID::get<TF::IgammaOp>(),
TypeID::get<TF::IgammacOp>(),
TypeID::get<TF::IgammaGradAOp>(),
TypeID::get<TF::InplaceAddOp>(),
TypeID::get<TF::InTopKV2Op>(),
TypeID::get<TF::InvertOp>(),
TypeID::get<TF::InvOp>(),
TypeID::get<TF::KthOrderStatisticOp>(),
TypeID::get<TF::LRNOp>(),
TypeID::get<TF::LRNGradOp>(),
TypeID::get<TF::LeakyReluGradOp>(),
TypeID::get<TF::LeakyReluOp>(),
TypeID::get<TF::LeftShiftOp>(),
TypeID::get<TF::LessOp>(),
TypeID::get<TF::ListDiffOp>(),
TypeID::get<TF::LogicalAndOp>(),
TypeID::get<TF::LogicalNotOp>(),
TypeID::get<TF::LogOp>(),
TypeID::get<TF::LowerBoundOp>(),
TypeID::get<TF::MakeUniqueOp>(),
TypeID::get<TF::MatMulOp>(),
TypeID::get<TF::MatrixDiagV3Op>(),
TypeID::get<TF::MatrixInverseOp>(),
TypeID::get<TF::MatrixSetDiagV3Op>(),
TypeID::get<TF::MatrixSolveOp>(),
TypeID::get<TF::MatrixTriangularSolveOp>(),
TypeID::get<TF::MaxPool3DGradGradOp>(),
TypeID::get<TF::MaxPoolGradOp>(),
TypeID::get<TF::MaxPoolGradGradOp>(),
TypeID::get<TF::MirrorPadOp>(),
TypeID::get<TF::MirrorPadGradOp>(),
TypeID::get<TF::MulOp>(),
TypeID::get<TF::MultinomialOp>(),
TypeID::get<TF::NdtriOp>(),
TypeID::get<TF::NegOp>(),
TypeID::get<TF::NextAfterOp>(),
TypeID::get<TF::NonMaxSuppressionV4Op>(),
TypeID::get<TF::NotEqualOp>(),
TypeID::get<TF::PadOp>(),
TypeID::get<TF::ParameterizedTruncatedNormalOp>(),
TypeID::get<TF::PlaceholderWithDefaultOp>(),
TypeID::get<TF::PolygammaOp>(),
TypeID::get<TF::PopulationCountOp>(),
TypeID::get<TF::PowOp>(),
TypeID::get<TF::QrOp>(),
TypeID::get<TF::QuantizeAndDequantizeOp>(),
TypeID::get<TF::QuantizeAndDequantizeV2Op>(),
TypeID::get<TF::QuantizeAndDequantizeV3Op>(),
TypeID::get<TF::QuantizeAndDequantizeV4Op>(),
TypeID::get<TF::RFFT2DOp>(),
TypeID::get<TF::RFFT3DOp>(),
TypeID::get<TF::RGBToHSVOp>(),
TypeID::get<TF::RandomUniformIntOp>(),
TypeID::get<TF::RandomUniformOp>(),
TypeID::get<TF::RealDivOp>(),
TypeID::get<TF::ReciprocalGradOp>(),
TypeID::get<TF::Relu6GradOp>(),
TypeID::get<TF::ResizeBilinearOp>(),
TypeID::get<TF::ResizeBilinearGradOp>(),
TypeID::get<TF::ResizeNearestNeighborOp>(),
TypeID::get<TF::ResizeNearestNeighborGradOp>(),
TypeID::get<TF::ReverseSequenceOp>(),
TypeID::get<TF::RightShiftOp>(),
TypeID::get<TF::RintOp>(),
TypeID::get<TF::RollOp>(),
TypeID::get<TF::RoundOp>(),
TypeID::get<TF::SegmentSumV2Op>(),
TypeID::get<TF::SegmentProdV2Op>(),
TypeID::get<TF::SegmentMinV2Op>(),
TypeID::get<TF::SegmentMaxV2Op>(),
TypeID::get<TF::SelectV2Op>(),
TypeID::get<TF::SelfAdjointEigV2Op>(),
TypeID::get<TF::SeluGradOp>(),
TypeID::get<TF::SeluOp>(),
TypeID::get<TF::SigmoidGradOp>(),
TypeID::get<TF::SinOp>(),
TypeID::get<TF::SliceOp>(),
TypeID::get<TF::SoftplusGradOp>(),
TypeID::get<TF::SoftsignGradOp>(),
TypeID::get<TF::SoftsignOp>(),
TypeID::get<TF::SpaceToBatchNDOp>(),
TypeID::get<TF::SpaceToBatchOp>(),
TypeID::get<TF::SpaceToDepthOp>(),
TypeID::get<TF::SparseToDenseOp>(),
TypeID::get<TF::SquareOp>(),
TypeID::get<TF::StatelessMultinomialOp>(),
TypeID::get<TF::StatelessParameterizedTruncatedNormalOp>(),
TypeID::get<TF::StatelessRandomGetAlgOp>(),
TypeID::get<TF::StatelessRandomGetKeyCounterOp>(),
TypeID::get<TF::StatelessRandomGetKeyCounterAlgOp>(),
TypeID::get<TF::StatelessRandomNormalOp>(),
TypeID::get<TF::StatelessRandomNormalV2Op>(),
TypeID::get<TF::StatelessRandomUniformOp>(),
TypeID::get<TF::StatelessRandomUniformFullIntOp>(),
TypeID::get<TF::StatelessRandomUniformFullIntV2Op>(),
TypeID::get<TF::StatelessRandomUniformV2Op>(),
TypeID::get<TF::StatelessRandomUniformIntOp>(),
TypeID::get<TF::StatelessRandomUniformIntV2Op>(),
TypeID::get<TF::StatelessTruncatedNormalOp>(),
TypeID::get<TF::StatelessTruncatedNormalV2Op>(),
TypeID::get<TF::StoreMinibatchStatisticsInFdoOp>(),
TypeID::get<TF::StridedSliceOp>(),
TypeID::get<TF::SubOp>(),
TypeID::get<TF::SvdOp>(),
TypeID::get<TF::TanOp>(),
TypeID::get<TF::TensorScatterAddOp>(),
TypeID::get<TF::TensorScatterSubOp>(),
TypeID::get<TF::TPUEmbeddingActivationsOp>(),
TypeID::get<TF::TopKUniqueOp>(),
TypeID::get<TF::TopKWithUniqueOp>(),
TypeID::get<TF::TransposeOp>(),
TypeID::get<TF::TridiagonalSolveOp>(),
TypeID::get<TF::TridiagonalMatMulOp>(),
TypeID::get<TF::TruncateDivOp>(),
TypeID::get<TF::TruncatedNormalOp>(),
TypeID::get<TF::TruncateModOp>(),
TypeID::get<TF::UniqueOp>(),
TypeID::get<TF::UnpackOp>(),
TypeID::get<TF::UpperBoundOp>(),
TypeID::get<TF::WhereOp>(),
TypeID::get<TF::XlaSendTPUEmbeddingGradientsOp>(),
TypeID::get<TF::XlaBroadcastHelperOp>(),
TypeID::get<TF::XlaCallModuleOp>(),
TypeID::get<TF::XlaCustomCallV2Op>(),
TypeID::get<TF::XlaDynamicUpdateSliceOp>(),
TypeID::get<TF::XlaKeyValueSortOp>(),
TypeID::get<TF::XlaPadOp>(),
TypeID::get<TF::XlaSetBoundOp>(),
TypeID::get<TF::XlaSetDynamicDimensionSizeOp>(),
TypeID::get<TF::XlaSparseCoreAdagradMomentumOp>(),
TypeID::get<TF::XlaSparseCoreAdagradOp>(),
TypeID::get<TF::XlaSparseCoreAdamOp>(),
TypeID::get<TF::XlaSparseCoreFtrlOp>(),
TypeID::get<TF::XlaSparseCoreSgdOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithAdagradAndCsrInputOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithAdamAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithFtrlAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithSgdAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulWithCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulWithStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdamAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithCsrInputOp>(),
TypeID::get<TF::XlaSpmdFullToShardShapeOp>(),
TypeID::get<TF::XlaSpmdShardToFullShapeOp>(),
TypeID::get<TF::XlaSvdOp>(),
};
for (auto op_type_id :
TF::TPUEmbeddingOpsRegistry::Global().GetOpsTypeIds()) {
ops_set->insert(op_type_id);
}
return ops_set;
}();
return ops->count(type_id);
}
bool IsOpTypeAllowedTf2XlaPreferred(const TypeID& type_id) {
static auto* ops =
new llvm::SmallDenseSet<mlir::TypeID, 512>{
TypeID::get<TF::AllOp>(),
TypeID::get<TF::AllToAllOp>(),
TypeID::get<TF::AnyOp>(),
TypeID::get<TF::AvgPoolOp>(),
TypeID::get<TF::AvgPool3DGradOp>(),
TypeID::get<TF::AvgPoolGradOp>(),
TypeID::get<TF::BatchToSpaceNDOp>(),
TypeID::get<TF::BitcastOp>(),
TypeID::get<TF::BroadcastToOp>(),
TypeID::get<TF::CollectivePermuteOp>(),
TypeID::get<TF::ComplexOp>(),
TypeID::get<TF::ConcatV2Op>(),
TypeID::get<TF::ConjOp>(),
TypeID::get<TF::Conv2DOp>(),
TypeID::get<TF::Conv2DBackpropFilterOp>(),
TypeID::get<TF::Conv2DBackpropInputOp>(),
TypeID::get<TF::Conv3DOp>(),
TypeID::get<TF::Conv3DBackpropFilterV2Op>(),
TypeID::get<TF::Conv3DBackpropInputV2Op>(),
TypeID::get<TF::CumprodOp>(),
TypeID::get<TF::CumsumOp>(),
TypeID::get<TF::DepthwiseConv2dNativeOp>(),
TypeID::get<TF::DivOp>(),
TypeID::get<TF::DynamicStitchOp>(),
TypeID::get<TF::_EagerConstOp>(),
TypeID::get<TF::EmptyOp>(),
TypeID::get<TF::ExpandDimsOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsOp>(),
TypeID::get<TF::FillOp>(),
TypeID::get<TF::FusedBatchNormOp>(),
TypeID::get<TF::FusedBatchNormGradOp>(),
TypeID::get<TF::FusedBatchNormGradV2Op>(),
TypeID::get<TF::FusedBatchNormV2Op>(),
TypeID::get<TF::_FusedConv2DOp>(),
TypeID::get<TF::GatherNdOp>(),
TypeID::get<TF::GatherV2Op>(),
TypeID::get<TF::GreaterEqualOp>(),
TypeID::get<TF::IdentityOp>(),
TypeID::get<TF::IdentityNOp>(),
TypeID::get<TF::InplaceUpdateOp>(),
TypeID::get<TF::InvertPermutationOp>(),
TypeID::get<TF::IRFFTOp>(),
TypeID::get<TF::L2LossOp>(),
TypeID::get<TF::LegacyCallOp>(),
TypeID::get<TF::LessEqualOp>(),
TypeID::get<TF::LinSpaceOp>(),
TypeID::get<TF::LogicalOrOp>(),
TypeID::get<TF::MaxOp>(),
TypeID::get<TF::MaximumOp>(),
TypeID::get<TF::MaxPoolOp>(),
TypeID::get<TF::MaxPool3DOp>(),
TypeID::get<TF::MeanOp>(),
TypeID::get<TF::MinOp>(),
TypeID::get<TF::MinimumOp>(),
TypeID::get<TF::MulNoNanOp>(),
TypeID::get<TF::OneHotOp>(),
TypeID::get<TF::OnesLikeOp>(),
TypeID::get<TF::PackOp>(),
TypeID::get<TF::PadV2Op>(),
TypeID::get<TF::ParallelDynamicStitchOp>(),
TypeID::get<TF::PartitionedCallOp>(),
TypeID::get<TF::ProdOp>(),
TypeID::get<TF::QrOp>(),
TypeID::get<TF::RandomStandardNormalOp>(),
TypeID::get<TF::RandomUniformOp>(),
TypeID::get<TF::RangeOp>(),
TypeID::get<TF::ReshapeOp>(),
TypeID::get<TF::ReverseV2Op>(),
TypeID::get<TF::RFFTOp>(),
TypeID::get<TF::RsqrtGradOp>(),
TypeID::get<TF::ScatterNdOp>(),
TypeID::get<TF::ShapeOp>(),
TypeID::get<TF::SinhOp>(),
TypeID::get<TF::SizeOp>(),
TypeID::get<TF::SliceOp>(),
TypeID::get<TF::SoftmaxCrossEntropyWithLogitsOp>(),
TypeID::get<TF::SoftplusOp>(),
TypeID::get<TF::SparseMatMulOp>(),
TypeID::get<TF::SparseSoftmaxCrossEntropyWithLogitsOp>(),
TypeID::get<TF::SplitOp>(),
TypeID::get<TF::SplitVOp>(),
TypeID::get<TF::SqrtGradOp>(),
TypeID::get<TF::SquaredDifferenceOp>(),
TypeID::get<TF::SqueezeOp>(),
TypeID::get<TF::StatelessParameterizedTruncatedNormalOp>(),
TypeID::get<TF::StatefulPartitionedCallOp>(),
TypeID::get<TF::StopGradientOp>(),
TypeID::get<TF::StridedSliceOp>(),
TypeID::get<TF::StridedSliceGradOp>(),
TypeID::get<TF::SumOp>(),
TypeID::get<TF::TanhGradOp>(),
TypeID::get<TF::TensorScatterUpdateOp>(),
TypeID::get<TF::TileOp>(),
TypeID::get<TF::TopKV2Op>(),
TypeID::get<TF::_UnaryOpsCompositionOp>(),
TypeID::get<TF::UnsortedSegmentMaxOp>(),
TypeID::get<TF::UnsortedSegmentMinOp>(),
TypeID::get<TF::UnsortedSegmentProdOp>(),
TypeID::get<TF::UnsortedSegmentSumOp>(),
TypeID::get<TF::XdivyOp>(),
TypeID::get<TF::XlaSendTPUEmbeddingGradientsOp>(),
TypeID::get<TF::XlaAllReduceOp>(),
TypeID::get<TF::XlaGatherOp>(),
TypeID::get<TF::Xlog1pyOp>(),
TypeID::get<TF::XlogyOp>(),
TypeID::get<TF::ZerosLikeOp>(),
TypeID::get<TF::ZetaOp>(),
};
return ops->contains(type_id);
}
const llvm::DenseSet<mlir::TypeID>& DynamicTensorflowOps() {
static const llvm::DenseSet<mlir::TypeID>* ops =
new llvm::DenseSet<mlir::TypeID>{
TypeID::get<mlir::TF::DynamicPartitionOp>(),
TypeID::get<mlir::TF::UniqueOp>(),
TypeID::get<mlir::TF::WhereOp>(),
TypeID::get<mlir::TF::XlaSetDynamicDimensionSizeOp>(),
};
return *ops;
}
}
bool HasTf2XlaFallback(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaFallback(type_id) ||
IsOpTypeAllowedTf2XlaPreferred(type_id);
}
bool IsOpLegalizedWithMlir(Operation& op) {
auto abstractOp = op.getRegisteredInfo();
if (!abstractOp) return false;
return IsTypeLegalizedWithMlir(abstractOp->getTypeID());
}
bool IsTypeLegalizedWithMlir(const TypeID& type_id) {
return MlirAlwaysOps().contains(type_id);
}
bool IsOpAllowedTf2xlaFallback(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaFallback(type_id);
}
bool IsOpAllowedTf2xlaPreferred(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaPreferred(type_id);
}
bool IsDynamicPadderOp(const TypeID& type_id) {
return DynamicTensorflowOps().contains(type_id);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include <optional>
#include <set>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
using func::FuncOp;
using mlir::ModuleOp;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<3xi64> {tf._user_specified_name = "resource", tf.aliasing_output = 3 : i64}) -> () attributes {tf.entry_function = {control_outputs = "stateful_normal/RngReadAndSkip,stateful_uniform/RngReadAndSkip,stateful_uniform_full_int/RngReadAndSkip", inputs = "stateful_normal_rngreadandskip_resource", outputs = "identity_RetVal,identity_1_RetVal,identity_2_RetVal"}} {
%0:3 = "tf.Unpack"(%arg0) {axis = 0 : i64} : (tensor<3xi64>) -> (tensor<i64>, tensor<i64>, tensor<i64>)
return
}
})";
class LegalizationOpConfigTest : public ::testing::Test {
public:
absl::Status CreateMlirModule(std::string module_string = kMlirModuleStr) {
TF_ASSIGN_OR_RETURN(
module_, test::GetMlirModuleFromString(module_string, &context_));
context_.loadAllAvailableDialects();
return absl::OkStatus();
}
absl::StatusOr<FuncOp> GetMain() {
func::FuncOp main = module_->lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return absl::NotFoundError("Could not find main function");
}
return main;
}
protected:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
};
TEST_F(LegalizationOpConfigTest, FailsWithExpectsLegalizationWithMlir) {
TF_EXPECT_OK(CreateMlirModule());
EXPECT_FALSE(IsOpLegalizedWithMlir(*module_->getOperation()));
}
TEST_F(LegalizationOpConfigTest, ExpectsFalseForNonMlirOps) {
TF_EXPECT_OK(CreateMlirModule());
TF_ASSERT_OK_AND_ASSIGN(FuncOp main, GetMain());
main.walk([&](Operation* op) { EXPECT_FALSE(IsOpLegalizedWithMlir(*op)); });
}
TEST_F(LegalizationOpConfigTest, ExpectsTrueForMlirTypeID) {
EXPECT_TRUE(IsTypeLegalizedWithMlir(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(HasTf2XlaFallback(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(IsOpAllowedTf2xlaFallback(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(IsOpAllowedTf2xlaPreferred(TypeID::get<TF::ModOp>()));
}
TEST_F(LegalizationOpConfigTest, ExpectsTrueForTF2XLATypeID) {
EXPECT_TRUE(HasTf2XlaFallback(TypeID::get<TF::AllOp>()));
EXPECT_TRUE(IsOpAllowedTf2xlaPreferred(TypeID::get<TF::AllOp>()));
EXPECT_FALSE(IsTypeLegalizedWithMlir(TypeID::get<TF::AllOp>()));
}
TEST_F(LegalizationOpConfigTest, ChecksDynamicPadderOps) {
EXPECT_TRUE(
IsDynamicPadderOp(TypeID::get<TF::XlaSetDynamicDimensionSizeOp>()));
EXPECT_FALSE(IsDynamicPadderOp(TypeID::get<TF::ConstOp>()));
}
TEST_F(LegalizationOpConfigTest, CountLoweringsSet) {
int mlir_lowering_count = 0;
int tf2xla_fallback_count = 0;
int non_categorized_count = 0;
DialectRegistry dialect_registry;
dialect_registry.insert<mlir::TF::TensorFlowDialect>();
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
for (auto operation : context.getRegisteredOperations()) {
if (IsTypeLegalizedWithMlir(operation.getTypeID())) {
mlir_lowering_count++;
} else if (HasTf2XlaFallback(operation.getTypeID())) {
tf2xla_fallback_count++;
} else {
non_categorized_count++;
}
}
EXPECT_EQ(mlir_lowering_count, 67);
EXPECT_EQ(tf2xla_fallback_count, 323);
EXPECT_EQ(non_categorized_count, 430);
}
TEST_F(LegalizationOpConfigTest, CountTypesWhichHaveBothMlirAndTf2xlaFallback) {
int double_lowering_count = 0;
DialectRegistry dialect_registry;
dialect_registry.insert<mlir::TF::TensorFlowDialect>();
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
for (auto operation : context.getRegisteredOperations()) {
if (IsTypeLegalizedWithMlir(operation.getTypeID()) &&
HasTf2XlaFallback(operation.getTypeID())) {
double_lowering_count++;
}
}
EXPECT_EQ(double_lowering_count, 1);
}
TEST_F(LegalizationOpConfigTest, CountAllMlirLoweringPatterns) {
DialectRegistry dialect_registry;
mlir::RegisterCommonToolingDialects(dialect_registry);
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
RewritePatternSet mlir_legalize_lower_patterns(&context);
PopulateLegalizeTfPatterns(&context, &mlir_legalize_lower_patterns);
int mlir_only_patterns = 0;
for (auto& pattern : mlir_legalize_lower_patterns.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
if (!pat_op_name) {
continue;
}
if (!HasTf2XlaFallback(pat_op_name->getRegisteredInfo()->getTypeID())) {
mlir_only_patterns++;
}
}
EXPECT_EQ(mlir_only_patterns, 63);
}
TEST_F(LegalizationOpConfigTest, MlirLoweringWithoutXlaKernel) {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
std::vector<const tensorflow::KernelDef*> kernel_defs =
tensorflow::XlaOpRegistry::DeviceKernels(
tensorflow::DEVICE_CPU_XLA_JIT,
true);
std::set<std::string> xla_op_kernels;
for (auto kernel_def : kernel_defs) {
std::string tf_name = "tf." + kernel_def->op();
xla_op_kernels.insert(tf_name);
}
DialectRegistry dialect_registry;
mlir::RegisterCommonToolingDialects(dialect_registry);
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
RewritePatternSet mlir_legalize_lower_patterns(&context);
PopulateLegalizeTfPatterns(&context, &mlir_legalize_lower_patterns);
int mlir_without_xla_count = 0;
for (auto& pattern : mlir_legalize_lower_patterns.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
if (!pat_op_name) {
continue;
}
if (xla_op_kernels.find(pat_op_name->getStringRef().str()) ==
xla_op_kernels.end()) {
mlir_without_xla_count++;
}
}
EXPECT_EQ(mlir_without_xla_count, 13);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d854d58e-93ef-48d2-b091-d6dc7fbb5240 | cpp | google/arolla | math | arolla/qexpr/operators/math/math.h | arolla/qexpr/operators/math/math_test.cc | #ifndef AROLLA_OPERATORS_MATH_MATH_H_
#define AROLLA_OPERATORS_MATH_MATH_H_
#include <cmath>
namespace arolla {
struct LogOp {
float operator()(float x) const { return std::log(x); }
double operator()(double x) const {
return std::log(x);
}
};
struct Log2Op {
template <typename T>
T operator()(T x) const {
return std::log2(x);
}
};
struct Log10Op {
template <typename T>
T operator()(T x) const {
return std::log10(x);
}
};
struct Log1pOp {
template <typename T>
T operator()(T x) const {
return std::log1p(x);
}
};
struct Symlog1pOp {
template <typename T>
T operator()(T x) const {
return x >= 0 ? std::log1p(x) : -std::log1p(-x);
}
};
struct ExpOp {
float operator()(float x) const {
return std::exp(x);
}
double operator()(double x) const {
return std::exp(x);
}
};
struct Expm1Op {
template <typename T>
T operator()(T x) const {
return std::expm1(x);
}
};
struct PowOp {
template <typename T>
T operator()(T a, T b) const {
return std::pow(a, b);
}
double operator()(double a, double b) const {
return std::pow(a, b);
}
};
struct SigmoidOp {
template <typename T>
T operator()(T value, T half, T slope) const {
return 1.0f / (1.0f + ExpOp()(slope * (half - value)));
}
};
struct LogitOp {
template <typename T>
T operator()(T p) const {
return LogOp()(p) - std::log1p(-p);
}
};
struct LogSigmoidOp {
template <typename T>
T operator()(T x) const {
if (x >= 0) {
return -std::log1p(std::exp(-x));
}
return x - std::log1p(std::exp(x));
}
};
struct SinOp {
template <typename T>
T operator()(T x) const {
return std::sin(x);
}
};
struct CosOp {
template <typename T>
T operator()(T x) const {
return std::cos(x);
}
};
struct SinhOp {
template <typename T>
T operator()(T x) const {
return std::sinh(x);
}
};
struct AtanOp {
template <typename T>
T operator()(T x) const {
return std::atan(x);
}
};
}
#endif | #include <cmath>
#include <limits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::AllOf;
using ::testing::DoubleEq;
using ::testing::DoubleNear;
using ::testing::Eq;
using ::testing::FloatEq;
using ::testing::FloatNear;
using ::testing::Ge;
using ::testing::Gt;
using ::testing::IsNan;
using ::testing::Le;
using ::testing::Lt;
const float kPi = 3.1415927f;
TEST(ArithmeticOperatorsTest, Log) {
EXPECT_THAT(InvokeOperator<float>("math.log", 1.f), IsOkAndHolds(0.f));
EXPECT_THAT(InvokeOperator<float>("math.log", 2.f),
IsOkAndHolds(FloatEq(std::log(2.f))));
EXPECT_THAT(InvokeOperator<float>("math.log", 0.f),
IsOkAndHolds(-std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<float>("math.log", -5.f), IsOkAndHolds(IsNan()));
EXPECT_THAT(InvokeOperator<double>("math.log", 2.),
IsOkAndHolds(DoubleEq(std::log(2.))));
EXPECT_THAT(InvokeOperator<double>("math.log", 1.), IsOkAndHolds(0.));
EXPECT_THAT(InvokeOperator<double>("math.log", 0.),
IsOkAndHolds(-std::numeric_limits<double>::infinity()));
EXPECT_THAT(InvokeOperator<double>("math.log", -4.), IsOkAndHolds(IsNan()));
}
TEST(ArithmeticOperatorsTest, Log2) {
EXPECT_THAT(InvokeOperator<float>("math.log2", 1.f), IsOkAndHolds(0.f));
EXPECT_THAT(InvokeOperator<float>("math.log2", 2.f),
IsOkAndHolds(FloatEq(std::log2(2.f))));
EXPECT_THAT(InvokeOperator<float>("math.log2", 0.f),
IsOkAndHolds(-std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<float>("math.log2", -5.f), IsOkAndHolds(IsNan()));
EXPECT_THAT(InvokeOperator<double>("math.log2", 1.), IsOkAndHolds(0.));
EXPECT_THAT(InvokeOperator<double>("math.log2", 2.),
IsOkAndHolds(DoubleEq(std::log2(2.))));
EXPECT_THAT(InvokeOperator<double>("math.log2", 0.),
IsOkAndHolds(-std::numeric_limits<double>::infinity()));
EXPECT_THAT(InvokeOperator<double>("math.log2", -4.), IsOkAndHolds(IsNan()));
}
TEST(ArithmeticOperatorsTest, Log10) {
EXPECT_THAT(InvokeOperator<float>("math.log10", 1.f), IsOkAndHolds(0.f));
EXPECT_THAT(InvokeOperator<float>("math.log10", 2.f),
IsOkAndHolds(FloatEq(std::log10(2.f))));
EXPECT_THAT(InvokeOperator<float>("math.log10", 0.f),
IsOkAndHolds(-std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<float>("math.log10", -5.f), IsOkAndHolds(IsNan()));
EXPECT_THAT(InvokeOperator<double>("math.log10", 1.), IsOkAndHolds(0.));
EXPECT_THAT(InvokeOperator<double>("math.log10", 2.),
IsOkAndHolds(DoubleEq(std::log10(2.))));
EXPECT_THAT(InvokeOperator<double>("math.log10", 0.),
IsOkAndHolds(-std::numeric_limits<double>::infinity()));
EXPECT_THAT(InvokeOperator<double>("math.log10", -4.), IsOkAndHolds(IsNan()));
}
TEST(ArithmeticOperatorsTest, Log1p) {
EXPECT_THAT(InvokeOperator<float>("math.log1p", 0.f), IsOkAndHolds(0.f));
EXPECT_THAT(InvokeOperator<float>("math.log1p", 2.f),
IsOkAndHolds(FloatEq(std::log1p(2.f))));
EXPECT_THAT(InvokeOperator<float>("math.log1p", -1.f),
IsOkAndHolds(-std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<float>("math.log1p", -5.f), IsOkAndHolds(IsNan()));
EXPECT_THAT(InvokeOperator<double>("math.log1p", 0.), IsOkAndHolds(0.));
EXPECT_THAT(InvokeOperator<double>("math.log1p", 2.),
IsOkAndHolds(DoubleEq(std::log1p(2.))));
EXPECT_THAT(InvokeOperator<double>("math.log1p", -1.),
IsOkAndHolds(-std::numeric_limits<double>::infinity()));
EXPECT_THAT(InvokeOperator<double>("math.log1p", -4.), IsOkAndHolds(IsNan()));
}
TEST(ArithmeticOperatorsTest, Symlog1p) {
EXPECT_THAT(InvokeOperator<float>("math.symlog1p", 0.f), IsOkAndHolds(0.));
EXPECT_THAT(InvokeOperator<float>("math.symlog1p", 2.f),
IsOkAndHolds(FloatEq(std::log1p(2.))));
EXPECT_THAT(InvokeOperator<float>("math.symlog1p", -2.f),
IsOkAndHolds(FloatEq(-std::log1p(2.))));
EXPECT_THAT(InvokeOperator<double>("math.symlog1p", 0.), IsOkAndHolds(0.));
EXPECT_THAT(InvokeOperator<double>("math.symlog1p", 2.),
IsOkAndHolds(DoubleEq(std::log1p(2.))));
EXPECT_THAT(InvokeOperator<double>("math.symlog1p", -2.),
IsOkAndHolds(DoubleEq(-std::log1p(2.))));
}
TEST(MathOperatorsTest, Exp) {
EXPECT_THAT(InvokeOperator<float>("math.exp", 0.f), IsOkAndHolds(1.f));
EXPECT_THAT(InvokeOperator<float>("math.exp", 2.f),
IsOkAndHolds(FloatEq(std::exp(2.f))));
EXPECT_THAT(InvokeOperator<double>("math.exp", 0.), IsOkAndHolds(Eq(1.)));
EXPECT_THAT(InvokeOperator<double>("math.exp", 2.),
IsOkAndHolds(DoubleEq(std::exp(2.))));
}
TEST(MathOperatorsTest, Expm1) {
EXPECT_THAT(InvokeOperator<float>("math.expm1", 0.f), IsOkAndHolds(0.f));
EXPECT_THAT(InvokeOperator<float>("math.expm1", 2.f),
IsOkAndHolds(FloatEq(std::expm1(2.f))));
EXPECT_THAT(InvokeOperator<double>("math.expm1", 0.), IsOkAndHolds(Eq(0.)));
EXPECT_THAT(InvokeOperator<double>("math.expm1", 2.),
IsOkAndHolds(DoubleEq(std::expm1(2.))));
}
TEST(MathOperatorsTest, Sigmoid) {
for (float slope = 1; slope < 5; slope++) {
EXPECT_THAT(InvokeOperator<float>("math.sigmoid", 10.f, 10.f, slope),
IsOkAndHolds(0.5f))
<< slope;
EXPECT_THAT(InvokeOperator<double>("math.sigmoid", 10., 10., double{slope}),
IsOkAndHolds(0.5))
<< slope;
float epsilon = 0.001;
EXPECT_THAT(InvokeOperator<float>("math.sigmoid", -10.f, 10.f, slope),
IsOkAndHolds(AllOf(Lt(epsilon), Ge(0))))
<< slope;
EXPECT_THAT(InvokeOperator<float>("math.sigmoid", 20.f, 10.f, slope),
IsOkAndHolds(AllOf(Gt(1. - epsilon), Le(1.))))
<< slope;
}
EXPECT_THAT(InvokeOperator<float>("math.sigmoid", 2.f, 4.f, 5.f),
IsOkAndHolds(FloatEq(1.f / (1.f + std::exp(5.f * (2.f))))));
EXPECT_THAT(InvokeOperator<double>("math.sigmoid", 2., 4., 5.),
IsOkAndHolds(DoubleEq(1. / (1. + std::exp(5. * (2.))))));
}
TEST(MathOperatorsTest, LogSigmoid) {
EXPECT_THAT(
InvokeOperator<float>("math.log_sigmoid", 5.f),
IsOkAndHolds(FloatNear(std::log(1 / (1 + std::exp(-5.f))), 1e-5)));
EXPECT_THAT(
InvokeOperator<float>("math.log_sigmoid", 0.f),
IsOkAndHolds(FloatNear(std::log(1 / (1 + std::exp(-0.f))), 1e-5)));
EXPECT_THAT(InvokeOperator<float>("math.log_sigmoid", -5.f),
IsOkAndHolds(FloatNear(std::log(1 / (1 + std::exp(5.f))), 1e-5)));
EXPECT_THAT(
InvokeOperator<double>("math.log_sigmoid", 5.),
IsOkAndHolds(DoubleNear(std::log(1 / (1 + std::exp(-5.))), 1e-5)));
EXPECT_THAT(
InvokeOperator<double>("math.log_sigmoid", 0.),
IsOkAndHolds(DoubleNear(std::log(1 / (1 + std::exp(-0.))), 1e-5)));
EXPECT_THAT(InvokeOperator<double>("math.log_sigmoid", -5.),
IsOkAndHolds(DoubleNear(std::log(1 / (1 + std::exp(5.))), 1e-5)));
EXPECT_THAT(InvokeOperator<float>("math.log_sigmoid", -1000.f),
IsOkAndHolds(FloatNear(-1000.f, 1e-05)));
EXPECT_THAT(InvokeOperator<double>("math.log_sigmoid", -1000.),
IsOkAndHolds(DoubleNear(-1000., 1e-05)));
EXPECT_THAT(InvokeOperator<float>("math.log_sigmoid", 100.f),
IsOkAndHolds(FloatNear(-std::exp(-100.f), 1e-50)));
EXPECT_THAT(InvokeOperator<double>("math.log_sigmoid", 100.),
IsOkAndHolds(DoubleNear(-std::exp(-100.), 1e-50)));
}
TEST(MathOperatorsTest, Logit) {
EXPECT_THAT(InvokeOperator<float>("math.logit", 0.f),
IsOkAndHolds(-std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<float>("math.logit", 1.f),
IsOkAndHolds(std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<float>("math.logit", 0.5f),
IsOkAndHolds(FloatNear(0.f, 1e-05)));
EXPECT_THAT(InvokeOperator<float>("math.logit", -1.f), IsOkAndHolds(IsNan()));
EXPECT_THAT(InvokeOperator<float>("math.logit", 2.f), IsOkAndHolds(IsNan()));
EXPECT_THAT(InvokeOperator<double>("math.logit", 0.),
IsOkAndHolds(-std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<double>("math.logit", 1.),
IsOkAndHolds(std::numeric_limits<float>::infinity()));
EXPECT_THAT(InvokeOperator<double>("math.logit", 0.5),
IsOkAndHolds(DoubleNear(0., 1e-05)));
EXPECT_THAT(InvokeOperator<double>("math.logit", -1.), IsOkAndHolds(IsNan()));
EXPECT_THAT(InvokeOperator<double>("math.logit", 2.), IsOkAndHolds(IsNan()));
}
TEST(MathOperatorsTest, Sin) {
EXPECT_THAT(InvokeOperator<float>("math.trig.sin", kPi),
IsOkAndHolds(FloatNear(0.f, 1e-05)));
EXPECT_THAT(InvokeOperator<float>("math.trig.sin", 1.f),
IsOkAndHolds(FloatEq(std::sin(1.f))));
EXPECT_THAT(InvokeOperator<double>("math.trig.sin", double{kPi}),
IsOkAndHolds(DoubleNear(0.f, 1e-05)));
EXPECT_THAT(InvokeOperator<double>("math.trig.sin", 1.),
IsOkAndHolds(DoubleEq(std::sin(1.))));
}
TEST(MathOperatorsTest, Cos) {
EXPECT_THAT(InvokeOperator<float>("math.trig.cos", kPi),
IsOkAndHolds(FloatNear(-1.f, 1e-05)));
EXPECT_THAT(InvokeOperator<float>("math.trig.cos", 1.f),
IsOkAndHolds(FloatEq(std::cos(1.f))));
EXPECT_THAT(InvokeOperator<double>("math.trig.cos", double{kPi}),
IsOkAndHolds(DoubleNear(-1.f, 1e-05)));
EXPECT_THAT(InvokeOperator<double>("math.trig.cos", 1.),
IsOkAndHolds(DoubleEq(std::cos(1.))));
}
TEST(MathOperatorsTest, Sinh) {
EXPECT_THAT(InvokeOperator<float>("math.trig.sinh", 0.f),
IsOkAndHolds(FloatNear(0.f, 1e-05)));
EXPECT_THAT(InvokeOperator<float>("math.trig.sinh", 1.f),
IsOkAndHolds(FloatEq(std::sinh(1.f))));
EXPECT_THAT(InvokeOperator<double>("math.trig.sinh", 0.),
IsOkAndHolds(DoubleNear(0.f, 1e-05)));
EXPECT_THAT(InvokeOperator<double>("math.trig.sinh", 1.),
IsOkAndHolds(DoubleEq(std::sinh(1.))));
}
TEST(MathOperatorsTest, atan) {
EXPECT_THAT(InvokeOperator<float>("math.trig.atan", 0.f),
IsOkAndHolds(FloatNear(0.f, 1e-05)));
EXPECT_THAT(InvokeOperator<float>("math.trig.atan", 1.f),
IsOkAndHolds(FloatEq(std::atan(1.f))));
EXPECT_THAT(InvokeOperator<double>("math.trig.atan", 0.),
IsOkAndHolds(DoubleNear(0.f, 1e-05)));
EXPECT_THAT(InvokeOperator<double>("math.trig.atan", 1.),
IsOkAndHolds(DoubleEq(std::atan(1.))));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/math/math.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/math/math_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
77850688-3555-4dde-8343-88ffd0e70fbc | cpp | google/tensorstore | std_tuple | tensorstore/internal/json_binding/std_tuple.h | tensorstore/internal/json_binding/std_tuple_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_STD_TUPLE_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_STD_TUPLE_H_
#include <stddef.h>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
inline absl::Status MaybeAnnotateTupleElementError(absl::Status status,
size_t i, bool is_loading) {
return status.ok()
? status
: MaybeAnnotateStatus(
status, tensorstore::StrCat(
"Error ", is_loading ? "parsing" : "converting",
" value at position ", i));
}
template <bool IsLoading>
Result<::nlohmann::json::array_t*> EnsureJsonTupleRepresentationImpl(
std::integral_constant<bool, IsLoading> is_loading, ::nlohmann::json* j,
size_t n) {
if constexpr (is_loading) {
auto* array_ptr = j->get_ptr<::nlohmann::json::array_t*>();
if (!array_ptr) return internal_json::ExpectedError(*j, "array");
TENSORSTORE_RETURN_IF_ERROR(
internal_json::JsonValidateArrayLength(array_ptr->size(), n));
return array_ptr;
} else {
*j = ::nlohmann::json::array_t(n);
return j->get_ptr<::nlohmann::json::array_t*>();
}
}
template <size_t... Is, typename... ElementBinder>
constexpr auto TupleJsonBinderImpl(std::index_sequence<Is...>,
ElementBinder... element_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(is_loading, j, sizeof...(Is)));
if (absl::Status status;
(((status = element_binder(is_loading, options, &std::get<Is>(*obj),
&(*array_ptr)[Is]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, Is, is_loading)),
false)) &&
...)) {
return status;
}
return absl::OkStatus();
};
}
template <size_t... Is>
constexpr auto TupleDefaultJsonBinderImpl(std::index_sequence<Is...>) {
return [](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(is_loading, j, sizeof...(Is)));
using std::get;
if (absl::Status status;
(((status = DefaultBinder<>(is_loading, options, &get<Is>(*obj),
&(*array_ptr)[Is]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, Is, is_loading)),
false)) &&
...)) {
return status;
}
return absl::OkStatus();
};
}
template <size_t... Is, typename... ElementBinder>
constexpr auto HeterogeneousArrayJsonBinderImpl(
std::index_sequence<Is...>, ElementBinder... element_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) -> absl::Status {
TENSORSTORE_ASSIGN_OR_RETURN(
::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(is_loading, j, sizeof...(Is)));
if (absl::Status status;
(((status = element_binder(is_loading, options, obj, &(*array_ptr)[Is]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, Is, is_loading)),
false)) &&
...)) {
return status;
}
return absl::OkStatus();
};
}
template <typename... ElementBinder>
constexpr auto Tuple(ElementBinder... element_binder) {
return TupleJsonBinderImpl(std::index_sequence_for<ElementBinder...>{},
std::move(element_binder)...);
}
constexpr auto Tuple() {
return [](auto is_loading, const auto& options, auto* obj, auto* j) {
constexpr size_t N =
std::tuple_size_v<absl::remove_cvref_t<decltype(*obj)>>;
return TupleDefaultJsonBinderImpl(std::make_index_sequence<N>{})(
is_loading, options, obj, j);
};
}
template <typename... ElementBinder>
constexpr auto HeterogeneousArray(ElementBinder... element_binder) {
return [=](auto is_loading, const auto& options, auto* obj,
::nlohmann::json* j) {
TENSORSTORE_ASSIGN_OR_RETURN(::nlohmann::json::array_t * array_ptr,
EnsureJsonTupleRepresentationImpl(
is_loading, j, sizeof...(ElementBinder)));
absl::Status status;
size_t i = 0;
[[maybe_unused]] bool ok =
(((status =
element_binder(is_loading, options, obj, &(*array_ptr)[i++]))
.ok() ||
((status = MaybeAnnotateTupleElementError(status, i - 1, is_loading)),
false)) &&
...);
return status;
};
}
template <typename... T>
constexpr inline auto DefaultBinder<std::tuple<T...>> = Tuple();
template <typename T, typename U>
constexpr inline auto DefaultBinder<std::pair<T, U>> = Tuple();
}
}
#endif | #include "tensorstore/internal/json_binding/std_tuple.h"
#include <string>
#include <tuple>
#include <utility>
#include <gtest/gtest.h>
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
TEST(TupleDefaultJsonBinderTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTrip<std::pair<int, int>>({
{{5, 5}, {5, 5}},
{{5, 3}, {5, 3}},
});
tensorstore::TestJsonBinderRoundTrip<std::tuple<int, int, std::string>>({
{{5, 5, "a"}, {5, 5, "a"}},
{{5, 3, "b"}, {5, 3, "b"}},
});
}
TEST(TupleJsonBinderTest, RoundTrip) {
const auto binder =
jb::Tuple(jb::Integer<int>(0, 9), jb::Integer<int>(10, 19));
tensorstore::TestJsonBinderRoundTrip<std::pair<int, int>>(
{
{{5, 15}, {5, 15}},
{{5, 13}, {5, 13}},
},
binder);
}
TEST(HeterogeneousArrayJsonBinderTest, RoundTrip) {
struct X {
int a;
std::string b;
};
tensorstore::TestJsonBinderRoundTripJsonOnly<X>(
{
{5, "a"},
{5, "b"},
},
jb::HeterogeneousArray(jb::Projection<&X::a>(), jb::Projection<&X::b>()));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_tuple.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/std_tuple_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7a1745d7-8808-458a-b1a5-20bfab632659 | cpp | tensorflow/tensorflow | ragged_to_dense_util | tensorflow/core/util/ragged_to_dense_util.cc | tensorflow/core/util/ragged_to_dense_util_test.cc | #include "tensorflow/core/util/ragged_to_dense_util.h"
#include <algorithm>
#include <vector>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
namespace tensorflow {
using errors::InvalidArgument;
tensorflow::Status GetRowPartitionTypesHelper(
const std::vector<string>& row_partition_type_strings,
std::vector<RowPartitionType>* row_partition_types) {
*row_partition_types = GetRowPartitionTypesHelper(row_partition_type_strings);
if (row_partition_types->size() != row_partition_type_strings.size()) {
return InvalidArgument(
"Unknown string for partition info type: ",
row_partition_type_strings.at(row_partition_types->size()));
}
return absl::OkStatus();
}
tensorflow::Status CombineRaggedTensorToTensorShapes(
int ragged_rank, const TensorShapeProto& shape,
const TensorShapeProto& value_shape, TensorShapeProto* output_shape) {
if (value_shape.unknown_rank() && shape.unknown_rank()) {
output_shape->Clear();
output_shape->set_unknown_rank(true);
return absl::OkStatus();
}
if (shape.unknown_rank()) {
while (output_shape->dim_size() < ragged_rank + value_shape.dim_size()) {
output_shape->add_dim()->set_size(-1);
}
} else {
*output_shape = shape;
}
if (value_shape.unknown_rank()) {
return absl::OkStatus();
}
if (ragged_rank + value_shape.dim_size() != output_shape->dim_size()) {
return InvalidArgument(
"rt_input.shape and shape=", TensorShape::DebugString(shape),
" are incompatible: rt_input.rank = ",
ragged_rank + value_shape.dim_size(),
" but shape.rank = ", output_shape->dim_size());
}
for (int i = 1; i < value_shape.dim_size(); ++i) {
const TensorShapeProto::Dim& value_dim = value_shape.dim(i);
TensorShapeProto::Dim* output_shape_dim = output_shape->mutable_dim(
output_shape->dim_size() - value_shape.dim_size() + i);
if (value_dim.size() >= 0) {
if (output_shape_dim->size() >= 0) {
if (output_shape_dim->size() != value_dim.size()) {
return InvalidArgument(
"rt_input.shape and shape=", TensorShape::DebugString(shape),
" are incompatible: rt_input.shape[", i + ragged_rank,
"] = ", value_dim.size(), " but shape[", i + ragged_rank,
"] = ", output_shape_dim->size());
}
} else {
output_shape_dim->set_size(value_dim.size());
}
}
}
return absl::OkStatus();
}
tensorflow::Status ValidateDefaultValueShape(
const TensorShapeProto& default_value_shape,
const TensorShapeProto& value_shape) {
if (default_value_shape.unknown_rank() || value_shape.unknown_rank()) {
return absl::OkStatus();
}
int default_ndims = default_value_shape.dim_size();
int values_ndims = value_shape.dim_size();
if (default_ndims >= values_ndims) {
return InvalidArgument(
"default_value.shape=", TensorShape::DebugString(default_value_shape),
" and rt_input.flat_values.shape=",
TensorShape::DebugString(value_shape),
" are incompatible: default_value.rank = ", default_ndims,
" must be less than rt_input.flat_values.rank = ", values_ndims);
}
for (int i = 0; i < std::min(default_ndims, values_ndims - 1); ++i) {
int default_dim = default_value_shape.dim(i).size();
int value_dim = value_shape.dim(i + 1).size();
if (default_dim >= 0 && value_dim >= 0 && default_dim != 1 &&
default_dim != value_dim) {
return InvalidArgument(
"default_value.shape=", TensorShape::DebugString(default_value_shape),
" and rt_input.flat_values.shape=",
TensorShape::DebugString(value_shape),
" are incompatible: default_value.shape[",
i - default_value_shape.dim_size(), "] = ", default_dim,
" but rt_input.flat_values.shape[",
i - default_value_shape.dim_size(), "] = ", value_dim);
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/ragged_to_dense_util.h"
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CombineRaggedTensorToTensorShapes, UnknownShapeUnknownValue) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.set_unknown_rank(true);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
EXPECT_EQ(true, actual_output_shape_proto.unknown_rank());
}
TEST(CombineRaggedTensorToTensorShapes, UnknownShape) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
ASSERT_EQ(actual_output_shape_proto.dim_size(), 2);
EXPECT_EQ(actual_output_shape_proto.dim(0).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(1).size(), -1);
}
TEST(CombineRaggedTensorToTensorShapes, UnknownShapeDenseValue) {
TensorShapeProto shape_proto;
shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
value_shape_proto.add_dim()->set_size(3);
int ragged_rank = 1;
TensorShapeProto actual_output_shape_proto;
TF_ASSERT_OK(CombineRaggedTensorToTensorShapes(
ragged_rank, shape_proto, value_shape_proto, &actual_output_shape_proto));
ASSERT_EQ(actual_output_shape_proto.dim_size(), 3);
EXPECT_EQ(actual_output_shape_proto.dim(0).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(1).size(), -1);
EXPECT_EQ(actual_output_shape_proto.dim(2).size(), 3);
}
TEST(GetRowPartitionTypesHelper, BasicTest) {
const std::vector<string> row_partition_type_strings = {
"FIRST_DIM_SIZE", "VALUE_ROWIDS", "ROW_SPLITS"};
std::vector<RowPartitionType> row_partition_types;
TF_ASSERT_OK(GetRowPartitionTypesHelper(row_partition_type_strings,
&row_partition_types));
EXPECT_THAT(row_partition_types,
::testing::ElementsAre(RowPartitionType::FIRST_DIM_SIZE,
RowPartitionType::VALUE_ROWIDS,
RowPartitionType::ROW_SPLITS));
}
TEST(RowPartitionTypeToString, BasicTest) {
EXPECT_EQ("FIRST_DIM_SIZE",
RowPartitionTypeToString(RowPartitionType::FIRST_DIM_SIZE));
EXPECT_EQ("VALUE_ROWIDS",
RowPartitionTypeToString(RowPartitionType::VALUE_ROWIDS));
EXPECT_EQ("ROW_SPLITS",
RowPartitionTypeToString(RowPartitionType::ROW_SPLITS));
}
TEST(ValidateDefaultValueShape, UnknownDefaultValueShape) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.set_unknown_rank(true);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(6);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, UnknownValueShape) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(5);
TensorShapeProto value_shape_proto;
value_shape_proto.set_unknown_rank(true);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, ScalarShape) {
TensorShapeProto default_value_shape_proto;
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorShapeEqual) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(2);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(2);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionUnknown) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(2);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionUnknownForValue) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(2);
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, TensorDimensionFewDims) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(3);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
TEST(ValidateDefaultValueShape, WrongNumberOfDimensions) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(-1);
default_value_shape_proto.add_dim()->set_size(-1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(-1);
value_shape_proto.add_dim()->set_size(-1);
EXPECT_FALSE(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)
.ok());
}
TEST(ValidateDefaultValueShape, WrongDimensionSize) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
default_value_shape_proto.add_dim()->set_size(-1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(6);
value_shape_proto.add_dim()->set_size(-1);
EXPECT_FALSE(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)
.ok());
}
TEST(ValidateDefaultValueShape, WrongDimensionSizeBut1) {
TensorShapeProto default_value_shape_proto;
default_value_shape_proto.add_dim()->set_size(3);
default_value_shape_proto.add_dim()->set_size(1);
TensorShapeProto value_shape_proto;
value_shape_proto.add_dim()->set_size(5);
value_shape_proto.add_dim()->set_size(3);
value_shape_proto.add_dim()->set_size(7);
TF_EXPECT_OK(
ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/ragged_to_dense_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/ragged_to_dense_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
887f9b4a-0eba-4a16-8403-657c911b9331 | cpp | google/tensorstore | cache_pool_resource | tensorstore/internal/cache/cache_pool_resource.cc | tensorstore/internal/cache/cache_pool_resource_test.cc | #include "tensorstore/internal/cache/cache_pool_resource.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace internal {
namespace {
struct CachePoolResourceTraits
: public ContextResourceTraits<CachePoolResource> {
using Spec = CachePool::Limits;
using Resource = typename CachePoolResource::Resource;
static constexpr Spec Default() { return {}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(
jb::Member("total_bytes_limit",
jb::Projection(&Spec::total_bytes_limit,
jb::DefaultValue([](auto* v) { *v = 0; }))));
}
static Result<Resource> Create(const Spec& limits,
ContextResourceCreationContext context) {
return CachePool::WeakPtr(CachePool::Make(limits));
}
static Spec GetSpec(const Resource& pool, const ContextSpecBuilder& builder) {
return pool->limits();
}
static void AcquireContextReference(const Resource& p) {
internal_cache::StrongPtrTraitsCachePool::increment(p.get());
}
static void ReleaseContextReference(const Resource& p) {
internal_cache::StrongPtrTraitsCachePool::decrement(p.get());
}
};
const ContextResourceRegistration<CachePoolResourceTraits> registration;
}
}
} | #include "tensorstore/internal/cache/cache_pool_resource.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Context;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::CachePoolResource;
TEST(CachePoolResourceTest, Default) {
auto resource_spec = Context::Resource<CachePoolResource>::DefaultSpec();
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(0u, (*cache)->limits().total_bytes_limit);
}
TEST(CachePoolResourceTest, EmptyObject) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<CachePoolResource>::FromJson(
::nlohmann::json::object_t{}));
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(0u, (*cache)->limits().total_bytes_limit);
}
TEST(CachePoolResourceTest, TotalBytesLimitOnly) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<CachePoolResource>::FromJson(
{{"total_bytes_limit", 100}}));
auto cache = Context::Default().GetResource(resource_spec).value();
EXPECT_EQ(100u, (*cache)->limits().total_bytes_limit);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache_pool_resource.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/cache/cache_pool_resource_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f32ef733-e25f-4261-9b9a-e3a16b86671f | cpp | tensorflow/tensorflow | resize_bilinear | tensorflow/lite/kernels/resize_bilinear.cc | tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/resize_bilinear.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace resize_bilinear {
enum KernelType {
kReference,
kOptimized,
};
constexpr int kInputTensor = 0;
constexpr int kSizeTensor = 1;
constexpr int kOutputTensor = 0;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* size,
TfLiteTensor* output) {
const int32* size_data = GetTensorData<int32>(size);
TF_LITE_ENSURE(context, size_data[0] > 0);
TF_LITE_ENSURE(context, size_data[1] > 0);
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = input->dims->data[0];
output_size->data[1] = size_data[0];
output_size->data[2] = size_data[1];
output_size->data[3] = input->dims->data[3];
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1);
TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32);
output->type = input->type;
if (!IsConstantOrPersistentTensor(size)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (params->half_pixel_centers && params->align_corners) {
TF_LITE_KERNEL_LOG(
context, "If half_pixel_centers is True, align_corners must be False.");
return kTfLiteError;
}
return ResizeOutputTensor(context, input, size, output);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteTensor* size;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSizeTensor, &size));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor(context, input, size, output));
}
if (output->type == kTfLiteFloat32) {
#define TF_LITE_RESIZE_BILINEAR(type, opname, datatype) \
tflite::ResizeBilinearParams op_params; \
op_params.align_corners = params->align_corners; \
op_params.half_pixel_centers = params->half_pixel_centers; \
type::opname(op_params, GetTensorShape(input), \
GetTensorData<datatype>(input), GetTensorShape(size), \
GetTensorData<int32>(size), GetTensorShape(output), \
GetTensorData<datatype>(output))
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, float);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, float);
}
} else if (output->type == kTfLiteUInt8) {
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, uint8_t);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, uint8_t);
}
} else if (output->type == kTfLiteInt8) {
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int8_t);
} else if (kernel_type == kOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, int8_t);
}
} else if (output->type == kTfLiteInt16) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int16_t);
#undef TF_LITE_RESIZE_BILINEAR
} else {
TF_LITE_KERNEL_LOG(context, "Output type is %d, requires float.",
output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RESIZE_BILINEAR_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_bilinear::Prepare,
resize_bilinear::Eval<resize_bilinear::kReference>};
return &r;
}
TfLiteRegistration* Register_RESIZE_BILINEAR() {
static TfLiteRegistration r = {
nullptr, nullptr, resize_bilinear::Prepare,
resize_bilinear::Eval<resize_bilinear::kOptimized>};
return &r;
}
}
}
} | #include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/resize_bilinear_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(ResizeBilinear, AlignCenters) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.HalfPixelCenters(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCentersTF1X) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, AlignCorners) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.AlignCorners(true)
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
TEST(ResizeBilinear, TransientIndirectionBuffer) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
delegate_options.flags |=
TFLITE_XNNPACK_DELEGATE_FLAG_TRANSIENT_INDIRECTION_BUFFER;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto size_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 10), std::ref(rng));
auto channel_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 16), std::ref(rng));
ResizeBilinearTester()
.InputHeight(size_rng())
.InputWidth(size_rng())
.OutputHeight(size_rng())
.OutputWidth(size_rng())
.Channels(channel_rng())
.Test(xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/resize_bilinear.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/resize_bilinear_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94b41683-66eb-49e3-8f28-76f38f147e83 | cpp | tensorflow/tensorflow | presized_cuckoo_map | tensorflow/core/util/presized_cuckoo_map.h | tensorflow/core/util/presized_cuckoo_map_test.cc | #ifndef TENSORFLOW_CORE_UTIL_PRESIZED_CUCKOO_MAP_H_
#define TENSORFLOW_CORE_UTIL_PRESIZED_CUCKOO_MAP_H_
#include <algorithm>
#include <vector>
#include "absl/base/prefetch.h"
#include "absl/numeric/int128.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
template <class value>
class PresizedCuckooMap {
public:
typedef uint64 key_type;
explicit PresizedCuckooMap(uint64 num_entries) { Clear(num_entries); }
void Clear(uint64 num_entries) {
cpq_.reset(new CuckooPathQueue());
double n(num_entries);
n /= kLoadFactor;
num_buckets_ = (static_cast<uint64>(n) / kSlotsPerBucket);
num_buckets_ += 32;
Bucket empty_bucket;
for (int i = 0; i < kSlotsPerBucket; i++) {
empty_bucket.keys[i] = kUnusedSlot;
}
buckets_.clear();
buckets_.resize(num_buckets_, empty_bucket);
}
bool InsertUnique(const key_type k, const value& v) {
uint64 tk = key_transform(k);
uint64 b1 = fast_map_to_buckets(tk);
uint64 b2 = fast_map_to_buckets(h2(tk));
uint64 target_bucket = 0;
int target_slot = kNoSpace;
for (auto bucket : {b1, b2}) {
Bucket* bptr = &buckets_[bucket];
for (int slot = 0; slot < kSlotsPerBucket; slot++) {
if (bptr->keys[slot] == k) {
return false;
} else if (target_slot == kNoSpace && bptr->keys[slot] == kUnusedSlot) {
target_bucket = bucket;
target_slot = slot;
}
}
}
if (target_slot != kNoSpace) {
InsertInternal(tk, v, target_bucket, target_slot);
return true;
}
return CuckooInsert(tk, v, b1, b2);
}
bool Find(const key_type k, value* out) const {
uint64 tk = key_transform(k);
return FindInBucket(k, fast_map_to_buckets(tk), out) ||
FindInBucket(k, fast_map_to_buckets(h2(tk)), out);
}
void PrefetchKey(const key_type k) const {
const uint64 tk = key_transform(k);
absl::PrefetchToLocalCache(&buckets_[fast_map_to_buckets(tk)].keys);
absl::PrefetchToLocalCache(&buckets_[fast_map_to_buckets(h2(tk))].keys);
}
int64_t MemoryUsed() const {
return sizeof(PresizedCuckooMap<value>) + sizeof(CuckooPathQueue);
}
private:
static constexpr int kSlotsPerBucket = 4;
static constexpr double kLoadFactor = 0.85;
static constexpr uint8 kMaxBFSPathLen = 5;
static constexpr int kMaxQueueSize = 682;
static constexpr int kVisitedListSize = 170;
static constexpr int kNoSpace = -1;
static constexpr uint64 kUnusedSlot = ~(0ULL);
struct Bucket {
key_type keys[kSlotsPerBucket];
value values[kSlotsPerBucket];
};
struct CuckooPathEntry {
uint64 bucket;
int depth;
int parent;
int parent_slot;
};
class CuckooPathQueue {
public:
CuckooPathQueue() : head_(0), tail_(0) {}
void push_back(CuckooPathEntry e) {
queue_[tail_] = e;
tail_ = (tail_ + 1) % kMaxQueueSize;
}
CuckooPathEntry pop_front() {
CuckooPathEntry& e = queue_[head_];
head_ = (head_ + 1) % kMaxQueueSize;
return e;
}
bool empty() const { return head_ == tail_; }
bool full() const { return ((tail_ + 1) % kMaxQueueSize) == head_; }
void reset() { head_ = tail_ = 0; }
private:
CuckooPathEntry queue_[kMaxQueueSize];
int head_;
int tail_;
};
typedef std::array<CuckooPathEntry, kMaxBFSPathLen> CuckooPath;
inline uint64 key_transform(const key_type k) const {
return k + (k == kUnusedSlot);
}
inline uint64 h2(uint64 h) const {
const uint64 m = 0xc6a4a7935bd1e995;
return m * ((h >> 32) | (h << 32));
}
inline uint64 alt_bucket(key_type k, uint64 b) const {
if (fast_map_to_buckets(k) != b) {
return fast_map_to_buckets(k);
}
return fast_map_to_buckets(h2(k));
}
inline void InsertInternal(key_type k, const value& v, uint64 b, int slot) {
Bucket* bptr = &buckets_[b];
bptr->keys[slot] = k;
bptr->values[slot] = v;
}
bool FindInBucket(key_type k, uint64 b, value* out) const {
const Bucket& bref = buckets_[b];
for (int i = 0; i < kSlotsPerBucket; i++) {
if (bref.keys[i] == k) {
*out = bref.values[i];
return true;
}
}
return false;
}
inline int SpaceAvailable(uint64 bucket) const {
const Bucket& bref = buckets_[bucket];
for (int i = 0; i < kSlotsPerBucket; i++) {
if (bref.keys[i] == kUnusedSlot) {
return i;
}
}
return kNoSpace;
}
inline void CopyItem(uint64 src_bucket, int src_slot, uint64 dst_bucket,
int dst_slot) {
Bucket& src_ref = buckets_[src_bucket];
Bucket& dst_ref = buckets_[dst_bucket];
dst_ref.keys[dst_slot] = src_ref.keys[src_slot];
dst_ref.values[dst_slot] = src_ref.values[src_slot];
}
bool CuckooInsert(key_type k, const value& v, uint64 b1, uint64 b2) {
int visited_end = 0;
cpq_->reset();
cpq_->push_back({b1, 1, 0, 0});
cpq_->push_back({b2, 1, 0, 0});
while (!cpq_->empty()) {
CuckooPathEntry e = cpq_->pop_front();
int free_slot;
free_slot = SpaceAvailable(e.bucket);
if (free_slot != kNoSpace) {
while (e.depth > 1) {
CuckooPathEntry parent = visited_[e.parent];
CopyItem(parent.bucket, e.parent_slot, e.bucket, free_slot);
free_slot = e.parent_slot;
e = parent;
}
InsertInternal(k, v, e.bucket, free_slot);
return true;
} else {
if (e.depth < (kMaxBFSPathLen)) {
auto parent_index = visited_end;
visited_[visited_end] = e;
visited_end++;
int start_slot = (k + e.bucket) % kSlotsPerBucket;
const Bucket& bref = buckets_[e.bucket];
for (int i = 0; i < kSlotsPerBucket; i++) {
int slot = (start_slot + i) % kSlotsPerBucket;
uint64 next_bucket = alt_bucket(bref.keys[slot], e.bucket);
uint64 e_parent_bucket = visited_[e.parent].bucket;
if (next_bucket != e_parent_bucket) {
cpq_->push_back({next_bucket, e.depth + 1, parent_index, slot});
}
}
}
}
}
LOG(WARNING) << "Cuckoo path finding failed: Table too small?";
return false;
}
inline uint64 fast_map_to_buckets(uint64 x) const {
return absl::Uint128High64(absl::uint128(x) * absl::uint128(num_buckets_));
}
uint64 num_buckets_;
std::vector<Bucket> buckets_;
std::unique_ptr<CuckooPathQueue> cpq_;
CuckooPathEntry visited_[kVisitedListSize];
PresizedCuckooMap(const PresizedCuckooMap&) = delete;
void operator=(const PresizedCuckooMap&) = delete;
};
}
#endif | #include "tensorflow/core/util/presized_cuckoo_map.h"
#include <array>
#include <vector>
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
TEST(PresizedCuckooMapTest, Basic) {
PresizedCuckooMap<int> pscm(1000);
EXPECT_TRUE(pscm.InsertUnique(1, 2));
int out;
EXPECT_TRUE(pscm.Find(1, &out));
EXPECT_EQ(out, 2);
}
TEST(PresizedCuckooMapTest, Prefetch) {
PresizedCuckooMap<int64_t> pscm(2);
EXPECT_TRUE(pscm.InsertUnique(1, 2));
pscm.PrefetchKey(1);
pscm.PrefetchKey(2);
}
TEST(PresizedCuckooMapTest, TooManyItems) {
static constexpr int kTableSize = 1000;
PresizedCuckooMap<int> pscm(kTableSize);
for (uint64 i = 0; i < kTableSize; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
ASSERT_TRUE(pscm.InsertUnique(key, i));
}
uint64 failed_at = 0;
for (uint64 i = kTableSize; i < (2 * kTableSize); i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
if (!pscm.InsertUnique(key, i)) {
failed_at = i;
break;
}
}
EXPECT_NE(failed_at, 0);
for (uint64 i = 0; i < failed_at; i++) {
int out;
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
EXPECT_TRUE(pscm.Find(key, &out));
EXPECT_EQ(out, i);
}
}
TEST(PresizedCuckooMapTest, ZeroSizeMap) {
PresizedCuckooMap<int> pscm(0);
int out;
for (uint64 i = 0; i < 100; i++) {
EXPECT_FALSE(pscm.Find(i, &out));
}
}
TEST(PresizedCuckooMapTest, RepeatedClear) {
PresizedCuckooMap<int> pscm(2);
int out;
for (int i = 0; i < 100; ++i) {
pscm.InsertUnique(0, 0);
pscm.InsertUnique(1, 1);
EXPECT_TRUE(pscm.Find(0, &out));
EXPECT_EQ(0, out);
EXPECT_TRUE(pscm.Find(1, &out));
EXPECT_EQ(1, out);
pscm.Clear(2);
EXPECT_FALSE(pscm.Find(0, &out));
EXPECT_FALSE(pscm.Find(1, &out));
}
}
void RunFill(int64_t table_size) {
PresizedCuckooMap<int> pscm(table_size);
for (int64_t i = 0; i < table_size; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
EXPECT_TRUE(pscm.InsertUnique(key, i));
}
for (int64_t i = 0; i < table_size; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(int64_t)));
int out;
EXPECT_TRUE(pscm.Find(key, &out));
EXPECT_EQ(out, i);
}
}
TEST(PresizedCuckooMapTest, Fill) {
for (int64_t table_size = 10; table_size <= 5000000; table_size *= 71) {
RunFill(table_size);
}
}
TEST(PresizedCuckooMapTest, Duplicates) {
static constexpr int kSmallTableSize = 1000;
PresizedCuckooMap<int> pscm(kSmallTableSize);
for (uint64 i = 0; i < kSmallTableSize; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(uint64)));
EXPECT_TRUE(pscm.InsertUnique(key, i));
}
for (uint64 i = 0; i < kSmallTableSize; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(uint64)));
EXPECT_FALSE(pscm.InsertUnique(key, i));
}
}
static void CalculateKeys(uint64 num, std::vector<uint64> *dst) {
dst->resize(num);
for (uint64 i = 0; i < num; i++) {
uint64 key =
Fingerprint64(string(reinterpret_cast<char *>(&i), sizeof(uint64)));
dst->at(i) = key;
}
}
void BM_CuckooFill(::testing::benchmark::State &state) {
const int arg = state.range(0);
uint64 table_size = arg;
std::vector<uint64> calculated_keys;
CalculateKeys(table_size, &calculated_keys);
for (auto s : state) {
PresizedCuckooMap<int> pscm(table_size);
for (uint64 i = 0; i < table_size; i++) {
pscm.InsertUnique(calculated_keys[i], i);
}
}
}
BENCHMARK(BM_CuckooFill)->Arg(1000)->Arg(10000000);
void BM_CuckooRead(::testing::benchmark::State &state) {
const int arg = state.range(0);
uint64 table_size = arg;
std::vector<uint64> calculated_keys;
CalculateKeys(table_size, &calculated_keys);
PresizedCuckooMap<int> pscm(table_size);
for (uint64 i = 0; i < table_size; i++) {
pscm.InsertUnique(calculated_keys[i], i);
}
int i = 0;
for (auto s : state) {
uint64 key_index = i;
++i;
if (i == table_size) i = 0;
int out = 0;
pscm.Find(calculated_keys[key_index], &out);
tensorflow::testing::DoNotOptimize(out);
}
}
BENCHMARK(BM_CuckooRead)->Arg(1000)->Arg(10000000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/presized_cuckoo_map.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/presized_cuckoo_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a79985e8-6645-4d2c-b1cb-b94726ee944a | cpp | google/arolla | model_executor | arolla/expr/eval/model_executor.cc | arolla/expr/eval/model_executor_test.cc | #include "arolla/expr/eval/model_executor.h"
#include <algorithm>
#include <map>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/operators/bootstrap_operators.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/simple_executable.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/string.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::model_executor_impl {
namespace {
struct CompiledOutputCastings {
std::unique_ptr<BoundExpr> casting_executable_expr;
absl::flat_hash_map<std::string, TypedSlot> named_output_slots;
};
absl::StatusOr<CompiledOutputCastings> CompiledOutputCastsIfNeeded(
const ModelExecutorOptions& options, TypedSlot given_output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& given_named_output_slots,
TypedSlot desired_output_slot,
const absl::flat_hash_map<std::string, QTypePtr>&
desired_named_output_types,
FrameLayout::Builder& layout_builder) {
constexpr absl::string_view kMainOutputLeafName = "main_output";
constexpr absl::string_view kSideOutputPrefix = "_";
absl::flat_hash_map<std::string, TypedSlot> casting_input_slots;
ExprNodePtr main_casting_expr;
TypedSlot casting_expr_output_slot = TypedSlot::UnsafeFromOffset(
GetQType<Unit>(), 0);
absl::flat_hash_map<std::string, ExprNodePtr> named_output_casting_exprs;
absl::flat_hash_map<std::string, TypedSlot> named_output_slots;
for (const auto& [name, slot] : given_named_output_slots) {
if (auto type_it = desired_named_output_types.find(name);
type_it != desired_named_output_types.end()) {
QTypePtr desired_qtype = type_it->second;
if (desired_qtype != slot.GetType()) {
std::string input_name = absl::StrCat(kSideOutputPrefix, name);
ASSIGN_OR_RETURN(
auto casted_named_output,
expr_operators::CoreCast(Leaf(input_name), Literal(desired_qtype)));
casting_input_slots.emplace(input_name, slot);
named_output_casting_exprs.emplace(name, casted_named_output);
} else {
named_output_slots.emplace(name, slot);
}
}
}
if (!named_output_casting_exprs.empty() &&
!options.allow_side_outputs_casting) {
std::vector<std::string> names;
names.reserve(named_output_casting_exprs.size());
for (const auto& [name, _] : named_output_casting_exprs) {
names.push_back(name);
}
std::sort(names.begin(), names.end());
return absl::InvalidArgumentError(absl::StrCat(
"side outputs casting is not allowed: ", absl::StrJoin(names, ", "),
"; to fix add explicit `AllowSideOutputsCasting()` in model compiler"));
}
if (given_output_slot != desired_output_slot) {
bool allow_casting = options.allow_output_casting;
if (given_output_slot.GetType() == desired_output_slot.GetType()) {
allow_casting = true;
}
if (!allow_casting) {
return absl::InvalidArgumentError(absl::StrCat(
"output casting is not allowed: ",
given_output_slot.GetType()->name(), " -> ",
desired_output_slot.GetType()->name(),
"; to fix add explicit `AllowOutputCasting()` in model compiler"));
}
ASSIGN_OR_RETURN(
main_casting_expr,
expr_operators::CoreCast(Leaf(kMainOutputLeafName),
Literal(desired_output_slot.GetType())));
casting_input_slots.emplace(std::string(kMainOutputLeafName),
given_output_slot);
casting_expr_output_slot = desired_output_slot;
} else {
if (casting_input_slots.empty()) {
return CompiledOutputCastings{nullptr, given_named_output_slots};
}
main_casting_expr = Literal(kUnit);
casting_expr_output_slot = AddSlot(GetQType<Unit>(), &layout_builder);
}
ASSIGN_OR_RETURN(auto casting_executable_expr,
CompileAndBindForDynamicEvaluation(
options.eval_options, &layout_builder, main_casting_expr,
casting_input_slots,
casting_expr_output_slot,
named_output_casting_exprs));
named_output_slots.insert(
casting_executable_expr->named_output_slots().begin(),
casting_executable_expr->named_output_slots().end());
return CompiledOutputCastings{std::move(casting_executable_expr),
named_output_slots};
}
class DecayOptionalBoundExpr : public BoundExpr {
public:
static std::unique_ptr<BoundExpr> Create(std::unique_ptr<BoundExpr> expr) {
QTypePtr out_type = expr->output_slot().GetType();
if (IsOptionalQType(out_type) && out_type->type_fields().size() == 2 &&
out_type->type_fields()[0].GetType() == GetQType<bool>()) {
return std::unique_ptr<BoundExpr>(
new DecayOptionalBoundExpr(std::move(expr)));
} else {
return expr;
}
}
void InitializeLiterals(EvaluationContext* ctx,
FramePtr frame) const override {
expr_->InitializeLiterals(ctx, frame);
}
void Execute(EvaluationContext* ctx, FramePtr frame) const override {
expr_->Execute(ctx, frame);
if (!frame.Get(presence_) && ctx->status().ok()) {
ctx->set_status(absl::FailedPreconditionError(
"expects a present value, got missing"));
}
}
private:
explicit DecayOptionalBoundExpr(std::unique_ptr<BoundExpr> expr)
: BoundExpr(expr->input_slots(), expr->output_slot().SubSlot(1),
expr->named_output_slots()),
expr_(std::move(expr)),
presence_(expr_->output_slot().SubSlot(0).UnsafeToSlot<bool>()) {}
std::unique_ptr<BoundExpr> expr_;
FrameLayout::Slot<bool> presence_;
};
class CastingCompiledExpr : public CompiledExpr {
public:
CastingCompiledExpr(
const CompiledExpr& compiled_expr, QTypePtr output_type,
absl::flat_hash_map<std::string, QTypePtr> side_output_types,
const ModelExecutorOptions& options)
: CompiledExpr(compiled_expr.input_types(), output_type,
side_output_types),
compiled_expr_(compiled_expr),
options_(options) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> Bind(
FrameLayout::Builder* layout_builder,
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
std::optional<TypedSlot> output_slot) const final {
TypedSlot inner_output_slot =
TypedSlot::UnsafeFromOffset(output_type(), 0);
if (output_slot.has_value() &&
output_slot->GetType() == compiled_expr_.output_type()) {
inner_output_slot = *output_slot;
} else {
inner_output_slot = AddSlot(compiled_expr_.output_type(), layout_builder);
if (!output_slot.has_value()) {
if (output_type() == inner_output_slot.GetType()) {
output_slot = inner_output_slot;
} else if (options_.force_non_optional_output &&
output_type() ==
DecayOptionalQType(inner_output_slot.GetType()) &&
inner_output_slot.SubSlotCount() == 2) {
output_slot = inner_output_slot.SubSlot(1);
} else {
output_slot = AddSlot(output_type(), layout_builder);
}
}
}
ASSIGN_OR_RETURN(
std::unique_ptr<BoundExpr> main_executable_expr,
compiled_expr_.Bind(layout_builder, input_slots, inner_output_slot));
if (IsOptionalQType(compiled_expr_.output_type()) &&
IsScalarQType(output_slot->GetType())) {
if (options_.force_non_optional_output) {
main_executable_expr =
DecayOptionalBoundExpr::Create(std::move(main_executable_expr));
inner_output_slot = main_executable_expr->output_slot();
} else {
return absl::InvalidArgumentError(
"model output is deduced to optional, while non-optional is "
"requested; to fix either wrap the desired output type with "
"std::optional<...>/arolla::OptionalValue<...>, or pass "
"ForceNonOptionalOutput() to model compiler, or make the model "
"full");
}
}
ASSIGN_OR_RETURN(
(auto [casting_executable_expr, named_output_slots]),
CompiledOutputCastsIfNeeded(options_, inner_output_slot,
main_executable_expr->named_output_slots(),
*output_slot, named_output_types(),
*layout_builder),
_ << "while casting model outputs due to `AllowOutputCasting()` or "
"`AllowSideOutputsCasting()` options");
if (casting_executable_expr == nullptr) {
return main_executable_expr;
} else {
std::vector<std::unique_ptr<BoundExpr>> subexprs;
subexprs.push_back(std::move(main_executable_expr));
subexprs.push_back(std::move(casting_executable_expr));
return std::make_unique<CombinedBoundExpr>(input_slots, *output_slot,
std::move(named_output_slots),
std::move(subexprs));
}
}
private:
const CompiledExpr& compiled_expr_;
ModelExecutorOptions options_;
};
struct FirstFormatter {
template <typename Pair>
void operator()(std::string* out, const Pair& p) const {
out->append(p.first);
}
};
}
std::unique_ptr<CompiledExpr> CastOutputsIfNeeded(
const CompiledExpr& expr, QTypePtr desired_output_type,
absl::Nullable<const SlotListenerBase*> slot_listener,
const ModelExecutorOptions& options) {
absl::flat_hash_map<std::string, QTypePtr> side_output_types;
side_output_types.reserve(expr.named_output_types().size());
if (slot_listener != nullptr) {
for (const auto& [name, desired_qtype] : expr.named_output_types()) {
const QType* available_qtype =
slot_listener->GetQTypeOf(name, desired_qtype);
if (available_qtype != nullptr) {
side_output_types.emplace(name, available_qtype);
}
}
}
return std::make_unique<CastingCompiledExpr>(expr, desired_output_type,
side_output_types, options);
}
absl::Status VerifyAllNamedOutputsAreListened(
const absl::flat_hash_map<std::string, QTypePtr>&
available_named_output_types,
const SlotListenerBase& slot_listener) {
std::set<std::string> not_listened_named_outputs;
for (const auto& [name, desired_qtype] : available_named_output_types) {
if (slot_listener.GetQTypeOf(name, desired_qtype) == nullptr) {
not_listened_named_outputs.emplace(name);
}
}
if (!not_listened_named_outputs.empty()) {
return absl::FailedPreconditionError(absl::StrFormat(
"slot listener does not listen for named outputs {%s} (it listens to "
"{%s}); check that output/export names of your nodes match the slot "
"listener names (pay attention to slashes) or set "
"IgnoreNotListenedNamedOutputs() to disable this check if you have "
"a good reason",
Truncate(absl::StrJoin(not_listened_named_outputs, ", "), 100),
Truncate(absl::StrJoin(slot_listener.SuggestAvailableNames(), ", "),
100)));
}
return absl::OkStatus();
}
} | #include "arolla/expr/eval/model_executor.h"
#include <sys/types.h>
#include <algorithm>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/side_output.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operators/type_meta_eval_strategies.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/io/accessors_input_loader.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/slot_listener.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operator_factory.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/testing/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/qtype/unspecified_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::TypedValueWith;
using ::arolla::testing::WithExportAnnotation;
using ::testing::_;
using ::testing::AllOf;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsFalse;
using ::testing::IsTrue;
struct TestInputs {
int64_t x;
int64_t y;
std::optional<int64_t> optional_z;
};
absl::StatusOr<std::unique_ptr<InputLoader<TestInputs>>>
CreateTestInputLoader() {
return CreateAccessorsInputLoader<TestInputs>(
"x", [](const TestInputs& in) { return in.x; },
"y", [](const TestInputs& in) { return in.y; });
}
absl::StatusOr<std::unique_ptr<InputLoader<TestInputs>>>
CreateTestInt32InputLoader() {
return CreateAccessorsInputLoader<TestInputs>(
"x", [](const TestInputs& in) -> int32_t { return in.x; },
"y", [](const TestInputs& in) -> int32_t { return in.y; });
}
TEST(ModelExecutorTest, Move) {
ASSERT_OK_AND_ASSIGN(auto x_plus_y,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, int64_t>::Compile(x_plus_y, *input_loader)));
ASSERT_THAT(executor.IsValid(), IsTrue());
EXPECT_THAT(executor.Execute(TestInputs{50, 7}), IsOkAndHolds(57));
ModelExecutor<TestInputs, int64_t> other_executor{std::move(executor)};
ASSERT_THAT(other_executor.IsValid(), IsTrue());
EXPECT_THAT(other_executor.Execute(TestInputs{50, 7}), IsOkAndHolds(57));
ASSERT_THAT(executor.IsValid(), IsFalse());
executor = std::move(other_executor);
ASSERT_THAT(executor.IsValid(), IsTrue());
EXPECT_THAT(executor.Execute(TestInputs{50, 7}), IsOkAndHolds(57));
ASSERT_THAT(other_executor.IsValid(), IsFalse());
}
TEST(ModelExecutorTest, MissingInputs) {
ASSERT_OK_AND_ASSIGN(auto x_plus_y, CallOp("math.add", {Leaf("unknown_x"),
Leaf("unknown_y")}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
EXPECT_THAT(
(ModelExecutor<TestInputs, int64_t>::Compile(x_plus_y, *input_loader)),
StatusIs(absl::StatusCode::kInvalidArgument,
"unknown inputs: unknown_x, unknown_y (available: x, y)"));
}
TEST(ModelExecutorTest, SimpleExpr) {
ASSERT_OK_AND_ASSIGN(auto x_plus_y,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
ModelExecutorOptions options;
options.allow_output_casting = true;
EXPECT_THAT(
(ModelExecutor<TestInputs, Bytes>::Compile(x_plus_y, *input_loader,
nullptr, options)),
StatusIs(
absl::StatusCode::kInvalidArgument,
AllOf(HasSubstr("casting from INT64 to BYTES is not allowed"),
HasSubstr(
"while casting model outputs due to `AllowOutputCasting()` "
"or `AllowSideOutputsCasting()` options"))));
{
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, int64_t>::Compile(x_plus_y, *input_loader)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(12));
}
{
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, int64_t>::Compile(x_plus_y, *input_loader)));
EXPECT_THAT(executor.ExecuteOnHeap({}, TestInputs{5, 7}), IsOkAndHolds(12));
}
{
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, int64_t>::Compile(x_plus_y, *input_loader)));
EXPECT_FALSE(executor.CanExecuteOnStack(8));
EXPECT_TRUE(executor.CanExecuteOnStack(24));
EXPECT_THAT(executor.ExecuteOnStack<24>({}, TestInputs{5, 7}),
IsOkAndHolds(12));
}
{
ASSERT_OK_AND_ASSIGN(auto executor, (CompileModelExecutor<int64_t>(
x_plus_y, *input_loader)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(12));
}
{
ASSERT_OK_AND_ASSIGN(auto executor,
(ModelExecutor<TestInputs, TypedValue>::Compile(
x_plus_y, *input_loader)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}),
IsOkAndHolds(TypedValueWith<int64_t>(12)));
}
{
ModelExecutorOptions options;
options.allow_output_casting = true;
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, OptionalValue<int64_t>>::Compile(
x_plus_y, *input_loader, nullptr, options)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}),
IsOkAndHolds(OptionalValue<int64_t>(12)));
}
{
ModelExecutorOptions options;
EXPECT_THAT(
(ModelExecutor<TestInputs, OptionalValue<int64_t>>::Compile(
x_plus_y, *input_loader, nullptr, options)),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("output casting is not allowed: INT64 -> OPTIONAL_INT64; "
"to fix add explicit `AllowOutputCasting()` in model "
"compiler")));
}
}
TEST(ModelExecutorTest, ReturnsStdOptional) {
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
{
ASSERT_OK_AND_ASSIGN(auto optional_x,
CallOp("core.to_optional", {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(
(ModelExecutor<TestInputs, std::optional<int64_t>> executor),
CompileModelExecutor<std::optional<int64_t>>(optional_x,
*input_loader));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(Eq(5)));
}
{
ASSERT_OK_AND_ASSIGN(auto empty_like_x,
CallOp("core.empty_like", {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(
(ModelExecutor<TestInputs, std::optional<int64_t>> executor),
CompileModelExecutor<std::optional<int64_t>>(empty_like_x,
*input_loader));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}),
IsOkAndHolds(Eq(std::nullopt)));
}
}
TEST(ModelExecutorTest, ReturnsStdVectorOfOptional) {
ASSERT_OK_AND_ASSIGN(
auto input_loader,
CreateAccessorsInputLoader<TestInputs>(
"x",
[](const TestInputs& in) {
return CreateDenseArray<int64_t>({0, in.x, std::nullopt});
},
"y",
[](const TestInputs& in) {
return CreateDenseArray<int64_t>({0, in.y, std::nullopt});
}));
ASSERT_OK_AND_ASSIGN(auto x_mul_y,
CallOp("math.multiply", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(
(ModelExecutor<TestInputs, std::vector<std::optional<int64_t>>> executor),
CompileModelExecutor<std::vector<std::optional<int64_t>>>(x_mul_y,
*input_loader));
EXPECT_THAT(executor.Execute(TestInputs{3, 19}),
IsOkAndHolds(ElementsAre(0, 57, std::nullopt)));
}
TEST(ModelExecutorTest, ReturnsStdVector) {
ASSERT_OK_AND_ASSIGN(
auto input_loader,
CreateAccessorsInputLoader<TestInputs>("x", [](const TestInputs& in) {
return CreateDenseArray<int64_t>({in.x, in.y, in.optional_z});
}));
ASSERT_OK_AND_ASSIGN(auto x_mul_x,
CallOp("math.multiply", {Leaf("x"), Leaf("x")}));
EXPECT_THAT(
(CompileModelExecutor<std::vector<int64_t>>(x_mul_x, *input_loader)),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("non-optional std::vector model output is supported "
"only with ForceNonOptionalOutput() setting")));
ModelExecutorOptions options;
options.force_non_optional_output = true;
ASSERT_OK_AND_ASSIGN(
(ModelExecutor<TestInputs, std::vector<int64_t>> executor),
CompileModelExecutor<std::vector<int64_t>>(x_mul_x, *input_loader,
options));
EXPECT_THAT(executor.Execute(TestInputs{1, 0, std::nullopt}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"non-full model output (element 2 is missing) while "
"full std::vector output is requested"));
EXPECT_THAT(executor.Execute(TestInputs{1, 0, 16}),
IsOkAndHolds(ElementsAre(1, 0, 256)));
}
class MockOperatorDirectory : public OperatorDirectory {
public:
MockOperatorDirectory() {
ON_CALL(*this, DoLookupOperator)
.WillByDefault([](absl::string_view name,
absl::Span<const QTypePtr> input_types,
QTypePtr output_type) {
return OperatorRegistry::GetInstance()->LookupOperator(
name, input_types, output_type);
});
}
MOCK_METHOD(absl::StatusOr<OperatorPtr>, DoLookupOperator,
(absl::string_view name, absl::Span<const QTypePtr> input_types,
QTypePtr output_type),
(const, override));
};
TEST(ModelExecutorTest, OptionsPropagatedToCasting) {
ASSERT_OK_AND_ASSIGN(auto x_plus_y,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
MockOperatorDirectory operator_directory;
ModelExecutorOptions options;
options.allow_output_casting = true;
options.eval_options.operator_directory = &operator_directory;
EXPECT_CALL(operator_directory, DoLookupOperator(_, _, _)).Times(AnyNumber());
EXPECT_CALL(operator_directory,
DoLookupOperator("core.to_optional._scalar", _, _))
.Times(AtLeast(1));
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, OptionalValue<int64_t>>::Compile(
x_plus_y, *input_loader, nullptr, options)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}),
IsOkAndHolds(OptionalValue<int64_t>(12)));
}
TEST(ModelExecutorTest, ExternalBufferFactory) {
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("array.as_dense_array",
{CallOp("core.make_tuple", {Leaf("x"), Leaf("y")})}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
ASSERT_OK_AND_ASSIGN(auto executor,
(ModelExecutor<TestInputs, DenseArray<int64_t>>::Compile(
expr, *input_loader)));
UnsafeArenaBufferFactory arena(64 << 10);
auto [buf1, data1] = arena.CreateRawBuffer(8);
auto [buf2, data2] = arena.CreateRawBuffer(8);
ASSERT_OK_AND_ASSIGN(
DenseArray<int64_t> res,
executor.Execute({.buffer_factory = &arena}, TestInputs{5, 7}));
auto [buf3, data3] = arena.CreateRawBuffer(8);
EXPECT_NE(reinterpret_cast<char*>(data2) - reinterpret_cast<char*>(data1),
reinterpret_cast<char*>(data3) - reinterpret_cast<char*>(data2));
EXPECT_TRUE(res.is_owned());
}
TEST(ModelExecutorTest, ReturnsNonOptional) {
ASSERT_OK_AND_ASSIGN(
auto input_loader,
CreateAccessorsInputLoader<TestInputs>(
"y",
[](const TestInputs& in) { return OptionalValue<int64_t>(in.y); },
"z",
[](const TestInputs& in) {
return OptionalValue<int64_t>(in.optional_z);
}));
ASSERT_OK_AND_ASSIGN(auto y_mul_z,
CallOp("math.multiply", {Leaf("y"), Leaf("z")}));
EXPECT_THAT((CompileModelExecutor<int64_t>(y_mul_z, *input_loader)),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("model output is deduced to optional, while "
"non-optional is requested")));
ModelExecutorOptions options;
options.force_non_optional_output = true;
ASSERT_OK_AND_ASSIGN(
(ModelExecutor<TestInputs, int64_t> executor),
CompileModelExecutor<int64_t>(y_mul_z, *input_loader, options));
EXPECT_THAT(executor.Execute(TestInputs{1, 0, std::nullopt}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"expects a present value, got missing"));
EXPECT_THAT(executor.Execute(TestInputs{1, 2, 3}), IsOkAndHolds(6));
EXPECT_THAT(
(CompileModelExecutor<TypedValue>(y_mul_z, *input_loader, options)),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("ForceNonOptionalOutput() is not supported for "
"TypedValue outputs")));
}
TEST(ModelExecutorTest, ReturnsStdVectorBytes) {
ASSERT_OK_AND_ASSIGN(
auto input_loader,
CreateAccessorsInputLoader<TestInputs>(
"x",
[](const TestInputs& in) {
return CreateDenseArray<Bytes>(
{Bytes{"foo"}, Bytes{absl::StrCat(in.x)}, std::nullopt});
},
"y",
[](const TestInputs& in) {
return CreateDenseArray<Bytes>(
{Bytes{"bar"}, Bytes{absl::StrCat(in.y)}, std::nullopt});
}));
ASSERT_OK_AND_ASSIGN(auto x_plus_y,
CallOp("strings.join", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(
(ModelExecutor<TestInputs, std::vector<std::optional<Bytes>>> executor),
CompileModelExecutor<std::vector<std::optional<Bytes>>>(x_plus_y,
*input_loader));
EXPECT_THAT(
executor.Execute(TestInputs{5, 7}),
IsOkAndHolds(ElementsAre(Bytes{"foobar"}, Bytes{"57"}, std::nullopt)));
EXPECT_THAT(
executor.ExecuteOnHeap({}, TestInputs{5, 7}),
IsOkAndHolds(ElementsAre(Bytes{"foobar"}, Bytes{"57"}, std::nullopt)));
EXPECT_TRUE(executor.CanExecuteOnStack(1024));
EXPECT_THAT(
executor.ExecuteOnStack<1024>({}, TestInputs{5, 7}),
IsOkAndHolds(ElementsAre(Bytes{"foobar"}, Bytes{"57"}, std::nullopt)));
}
TEST(ModelExecutorTest, SimpleExprBind) {
ASSERT_OK_AND_ASSIGN(auto x_plus_y,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
ASSERT_OK_AND_ASSIGN(
auto output_types,
GetInputLoaderQTypes(*input_loader, GetLeafKeys(x_plus_y)));
ASSERT_OK_AND_ASSIGN(auto compiled_expr, CompileForDynamicEvaluation(
DynamicEvaluationEngineOptions(),
x_plus_y, output_types));
{
ASSERT_OK_AND_ASSIGN(auto executor,
(ModelExecutor<TestInputs, int64_t>::Bind(
*compiled_expr, *input_loader)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(12));
}
{
ASSERT_OK_AND_ASSIGN(auto executor, BindModelExecutor<int64_t>(
*compiled_expr, *input_loader));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(12));
}
{
ASSERT_OK_AND_ASSIGN(auto executor, BindModelExecutor<int64_t>(
*compiled_expr, *input_loader));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(12));
}
{
ModelExecutorOptions options;
options.allow_output_casting = true;
ASSERT_OK_AND_ASSIGN(auto executor,
BindModelExecutor<OptionalValue<int64_t>>(
*compiled_expr, *input_loader, options));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(12));
}
}
struct SideOutput {
OptionalValue<int64_t> out_x;
OptionalValue<int64_t> out_xpy;
};
template <class OutXT, class OutYT>
struct TestSlotListener : public SlotListener<SideOutput> {
TestSlotListener() = default;
explicit TestSlotListener(
absl::flat_hash_map<std::string, QTypePtr> input_types)
: input_types(std::move(input_types)) {}
absl::Nullable<const QType*> GetQTypeOf(
absl::string_view name, const QType* desired_qtype) const final {
auto it = input_types.find(name);
if (it == input_types.end()) {
return nullptr;
}
return it->second == GetUnspecifiedQType() ? desired_qtype : it->second;
}
std::vector<std::string> SuggestAvailableNames() const final {
std::vector<std::string> names;
names.reserve(input_types.size());
for (const auto& [name, _] : input_types) {
names.emplace_back(name);
}
std::sort(names.begin(), names.end());
return names;
}
absl::StatusOr<BoundSlotListener<SideOutput>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& input_slots)
const final {
return [input_slots](::arolla::ConstFramePtr frame,
SideOutput* output) -> absl::Status {
if (input_slots.contains("out_x")) {
ASSIGN_OR_RETURN(auto slot, input_slots.at("out_x").ToSlot<OutXT>());
output->out_x = frame.Get(slot);
}
if (input_slots.contains("out_xpy")) {
ASSIGN_OR_RETURN(auto slot, input_slots.at("out_xpy").ToSlot<OutYT>());
output->out_xpy = frame.Get(slot);
}
return absl::OkStatus();
};
}
absl::flat_hash_map<std::string, QTypePtr> input_types = {
{"out_x", GetQType<OutXT>()}, {"out_xpy", GetQType<OutYT>()}};
};
TEST(ModelExecutorTest, SimpleExprWithSlotListener) {
ASSERT_OK_AND_ASSIGN(auto x, WithExportAnnotation(Leaf("x"), "out_x"));
auto y = Leaf("y");
ASSERT_OK_AND_ASSIGN(
auto x_plus_y,
WithExportAnnotation(CallOp("math.add", {x, y}), "out_xpy"));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x_plus_y, y}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
TestSlotListener<int64_t, int64_t> slot_listener;
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor, (ModelExecutor<TestInputs, int64_t, SideOutput>::Compile(
expr, *input_loader)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("slot listener was not provided")));
}
{
TestSlotListener<int64_t, int64_t> wrong_slot_listener{
{{"out_x", GetQType<int64_t>()},
{"out_xpy", GetQType<::arolla::Bytes>()}}};
EXPECT_THAT(
CompileModelExecutor<int64_t>(expr, *input_loader, wrong_slot_listener),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("casting from INT64 to BYTES is not allowed")));
}
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor, (ModelExecutor<TestInputs, int64_t, SideOutput>::Compile(
expr, *input_loader, &slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, nullptr), IsOkAndHolds(19));
}
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor, (ModelExecutor<TestInputs, int64_t, SideOutput>::Compile(
expr, *input_loader, &slot_listener)));
EXPECT_THAT(executor.ExecuteOnHeap({}, TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
EXPECT_THAT(executor.ExecuteOnHeap({}, TestInputs{5, 7}, nullptr),
IsOkAndHolds(19));
}
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor, (ModelExecutor<TestInputs, int64_t, SideOutput>::Compile(
expr, *input_loader, &slot_listener)));
EXPECT_FALSE(executor.CanExecuteOnStack(8));
EXPECT_TRUE(executor.CanExecuteOnStack(64));
EXPECT_THAT(executor.ExecuteOnStack<64>({}, TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
EXPECT_THAT(executor.ExecuteOnStack<64>({}, TestInputs{5, 7}, nullptr),
IsOkAndHolds(19));
}
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor,
(CompileModelExecutor<int64_t>(expr, *input_loader, slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
}
{
TestSlotListener<OptionalValue<int64_t>, OptionalValue<int64_t>>
optional_slot_listener;
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor, (CompileModelExecutor<int64_t>(expr, *input_loader,
optional_slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
}
{
TestSlotListener<int64_t, int64_t> slot_listener{
{{"out_x", GetQType<int64_t>()}, {"out_xpy", GetUnspecifiedQType()}}};
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor,
(CompileModelExecutor<int64_t>(expr, *input_loader, slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
}
{
TestSlotListener<OptionalValue<int64_t>, OptionalValue<int64_t>>
optional_slot_listener;
ASSERT_OK_AND_ASSIGN(auto int32_loader, CreateTestInt32InputLoader());
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(auto executor,
(CompileModelExecutor<int>(expr, *int32_loader,
optional_slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
}
{
TestSlotListener<int64_t, int64_t> limited_slot_listener{
{{"out_xpy", GetQType<int64_t>()}}};
SideOutput side_output;
ModelExecutorOptions options;
options.ignore_not_listened_named_outputs = true;
ASSERT_OK_AND_ASSIGN(auto executor, (CompileModelExecutor<int64_t>(
expr, *input_loader,
limited_slot_listener, options)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x, std::nullopt);
EXPECT_EQ(side_output.out_xpy.value, 12);
}
}
TEST(ModelExecutorTest, SimpleExprBindWithSlotListener) {
ASSERT_OK_AND_ASSIGN(auto x, WithExportAnnotation(Leaf("x"), "out_x"));
auto y = Leaf("y");
ASSERT_OK_AND_ASSIGN(
auto x_plus_y,
WithExportAnnotation(CallOp("math.add", {x, y}), "out_xpy"));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x_plus_y, y}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
TestSlotListener<int64_t, int64_t> slot_listener;
ASSERT_OK_AND_ASSIGN((auto [stripped_expr, side_outputs]),
ExtractSideOutputs(expr));
ASSERT_OK_AND_ASSIGN(
auto output_types,
GetInputLoaderQTypes(*input_loader, GetLeafKeys(x_plus_y)));
ASSERT_OK_AND_ASSIGN(auto compiled_expr, CompileForDynamicEvaluation(
DynamicEvaluationEngineOptions(),
stripped_expr, output_types,
{}));
ASSERT_OK_AND_ASSIGN(
auto compiled_expr_with_side_output,
CompileForDynamicEvaluation(DynamicEvaluationEngineOptions(),
stripped_expr, output_types, side_outputs));
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, int64_t, SideOutput>::Bind(
*compiled_expr, *input_loader,
nullptr, &slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_FALSE(side_output.out_x.present);
EXPECT_FALSE(side_output.out_xpy.present);
}
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, int64_t, SideOutput>::Bind(
*compiled_expr_with_side_output, *input_loader,
nullptr, &slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
}
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(
auto executor,
(ModelExecutor<TestInputs, int64_t, SideOutput>::Bind(
*compiled_expr, *input_loader, compiled_expr_with_side_output.get(),
&slot_listener)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x.value, 5);
EXPECT_EQ(side_output.out_xpy.value, 12);
}
{
SideOutput side_output;
ASSERT_OK_AND_ASSIGN(auto executor, BindModelExecutor<int64_t>(
*compiled_expr_with_side_output,
*input_loader, slot_listener));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x, int64_t{5});
EXPECT_EQ(side_output.out_xpy, int64_t{12});
}
{
TestSlotListener<OptionalValue<int64_t>, OptionalValue<int64_t>>
optional_slot_listener;
SideOutput side_output;
ModelExecutorOptions options;
options.allow_side_outputs_casting = true;
ASSERT_OK_AND_ASSIGN(auto executor,
(BindModelExecutor<int64_t>(
*compiled_expr_with_side_output, *input_loader,
optional_slot_listener, options)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x, int64_t{5});
EXPECT_EQ(side_output.out_xpy, int64_t{12});
}
{
TestSlotListener<OptionalValue<int64_t>, OptionalValue<int64_t>>
irrelevant_slot_listener(
{{"foo", GetOptionalQType<int64_t>()},
{"bar", GetOptionalQType<int64_t>()}});
ModelExecutorOptions options;
options.ignore_not_listened_named_outputs = false;
EXPECT_THAT(
(ModelExecutor<TestInputs, int64_t, SideOutput>::Bind(
*compiled_expr_with_side_output, *input_loader,
nullptr,
&irrelevant_slot_listener, options)),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr("slot listener does not listen for named outputs {out_x, "
"out_xpy} (it listens to {bar, foo});")));
EXPECT_THAT(
(ModelExecutor<TestInputs, int64_t, SideOutput>::Bind(
*compiled_expr, *input_loader,
compiled_expr_with_side_output.get(), &irrelevant_slot_listener,
options)),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr("slot listener does not listen for named outputs {out_x, "
"out_xpy} (it listens to {bar, foo});")));
options.ignore_not_listened_named_outputs = true;
EXPECT_THAT(
(ModelExecutor<TestInputs, int64_t, SideOutput>::Bind(
*compiled_expr_with_side_output, *input_loader,
nullptr, &irrelevant_slot_listener, options)),
IsOk());
EXPECT_THAT((ModelExecutor<TestInputs, int64_t, SideOutput>::Bind(
*compiled_expr, *input_loader,
compiled_expr_with_side_output.get(),
&irrelevant_slot_listener, options)),
IsOk());
}
{
TestSlotListener<OptionalValue<int64_t>, OptionalValue<int64_t>>
optional_slot_listener;
EXPECT_THAT(
(BindModelExecutor<int64_t>(*compiled_expr_with_side_output,
*input_loader, optional_slot_listener)),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"side outputs casting is not allowed: out_x, out_xpy; to fix "
"add explicit `AllowSideOutputsCasting()` in model compiler")));
}
{
TestSlotListener<OptionalValue<int64_t>, OptionalValue<int64_t>>
optional_slot_listener;
SideOutput side_output;
ModelExecutorOptions options;
options.allow_output_casting = true;
options.allow_side_outputs_casting = true;
ASSERT_OK_AND_ASSIGN(auto executor,
(BindModelExecutor<OptionalValue<int64_t>>(
*compiled_expr_with_side_output, *input_loader,
optional_slot_listener, options)));
EXPECT_THAT(executor.Execute(TestInputs{5, 7}, &side_output),
IsOkAndHolds(19));
EXPECT_EQ(side_output.out_x, int64_t{5});
EXPECT_EQ(side_output.out_xpy, int64_t{12});
}
}
struct CreateTestDenseArrayOp {
DenseArray<int> operator()(EvaluationContext* ctx, int64_t) const {
CHECK(dynamic_cast<UnsafeArenaBufferFactory*>(&ctx->buffer_factory()));
auto res = CreateConstDenseArray<int>(3, 1, &ctx->buffer_factory());
CHECK(!res.is_owned());
return res;
}
};
TEST(ModelExecutorTest, ArenaMakeOwned) {
constexpr absl::string_view op_name = "test.create_test_dense_array";
ASSERT_OK_AND_ASSIGN(
auto op_factory,
(QExprOperatorFromFunctor<CreateTestDenseArrayOp, int64_t>()));
ASSERT_OK(::arolla::OperatorRegistry::GetInstance()->RegisterOperator(
op_name, op_factory));
auto ReturnsDenseArray = [](absl::Span<const QTypePtr>)
-> absl::StatusOr<expr_operators::type_meta::QTypes> {
return expr_operators::type_meta::QTypes{GetDenseArrayQType<int>()};
};
ASSERT_OK(expr_operators::RegisterBackendOperator(
op_name, ExprOperatorSignature::MakeArgsN(1), ReturnsDenseArray));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op_name, {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(auto input_loader, CreateTestInputLoader());
ModelExecutorOptions options;
options.arena_page_size = 64 << 10;
ASSERT_OK_AND_ASSIGN(auto executor, (CompileModelExecutor<DenseArray<int>>(
expr, *input_loader, options)));
ASSERT_OK_AND_ASSIGN(DenseArray<int> res, executor.Execute(TestInputs{5, 7}));
EXPECT_EQ(res.size(), 3);
EXPECT_TRUE(res.is_owned());
}
static RawBufferFactory* kLastOpUsedFactory = nullptr;
static void* kLastOpAllocatedBuffer = nullptr;
struct FactorySideEffectOp {
template <class T>
T operator()(EvaluationContext* ctx, T a) const {
kLastOpUsedFactory = &ctx->buffer_factory();
kLastOpAllocatedBuffer =
std::get<1>(ctx->buffer_factory().CreateRawBuffer(1024));
return a;
}
};
static RawBufferFactory* kLastLoaderUsedFactory = nullptr;
static void* kLastLoaderAllocatedBuffer = nullptr;
absl::StatusOr<std::unique_ptr<InputLoader<TestInputs>>>
CreateArenaSideEffectTestInputLoader() {
return CreateAccessorsInputLoader<TestInputs>(
"x", [](const TestInputs& in, RawBufferFactory* factory) {
kLastLoaderUsedFactory = factory;
kLastLoaderAllocatedBuffer = std::get<1>(factory->CreateRawBuffer(128));
return in.x;
});
}
TEST(ModelExecutorTest, ArenaPropagated) {
using ::arolla::expr_operators::type_meta::Nth;
constexpr absl::string_view op_name = "test.factory_side_effect";
ASSERT_OK_AND_ASSIGN(
auto factory_side_effect,
(QExprOperatorFromFunctor<FactorySideEffectOp, int64_t>()));
ASSERT_OK(::arolla::OperatorRegistry::GetInstance()->RegisterOperator(
op_name, factory_side_effect));
ASSERT_OK_AND_ASSIGN(auto x_sig, ExprOperatorSignature::Make("x"));
ASSERT_OK(expr_operators::RegisterBackendOperator(op_name, x_sig, Nth(0)));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op_name, {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(auto input_loader,
CreateArenaSideEffectTestInputLoader());
ModelExecutorOptions options;
options.arena_page_size = 64 << 10;
ASSERT_OK_AND_ASSIGN(auto executor, CompileModelExecutor<int64_t>(
expr, *input_loader, options));
EXPECT_EQ(kLastOpUsedFactory, nullptr);
EXPECT_EQ(kLastOpAllocatedBuffer, nullptr);
EXPECT_EQ(kLastLoaderUsedFactory, nullptr);
EXPECT_EQ(kLastLoaderAllocatedBuffer, nullptr);
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(5));
EXPECT_NE(kLastOpUsedFactory, nullptr);
EXPECT_NE(kLastOpAllocatedBuffer, nullptr);
EXPECT_NE(kLastLoaderUsedFactory, nullptr);
EXPECT_NE(kLastLoaderAllocatedBuffer, nullptr);
EXPECT_NE(kLastOpUsedFactory, GetHeapBufferFactory());
EXPECT_NE(kLastLoaderUsedFactory, GetHeapBufferFactory());
EXPECT_EQ(std::string(typeid(*kLastLoaderUsedFactory).name()),
std::string(typeid(UnsafeArenaBufferFactory).name()));
EXPECT_EQ(std::string(typeid(*kLastOpUsedFactory).name()),
std::string(typeid(UnsafeArenaBufferFactory).name()));
RawBufferFactory* prev_used_op_factory = kLastOpUsedFactory;
void* prev_allocated_op_buffer = kLastOpAllocatedBuffer;
RawBufferFactory* prev_used_loader_factory = kLastLoaderUsedFactory;
void* prev_allocated_loader_buffer = kLastLoaderAllocatedBuffer;
{
EXPECT_THAT(executor.Execute(TestInputs{5, 7}), IsOkAndHolds(5));
EXPECT_EQ(kLastOpUsedFactory, prev_used_op_factory);
EXPECT_EQ(kLastOpAllocatedBuffer, prev_allocated_op_buffer);
EXPECT_EQ(kLastLoaderUsedFactory, prev_used_loader_factory);
EXPECT_EQ(kLastLoaderAllocatedBuffer, prev_allocated_loader_buffer);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/model_executor.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/model_executor_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
3e1b80d3-4273-4518-9418-e44c3c9984a2 | cpp | google/cel-cpp | optional_types | runtime/optional_types.cc | runtime/optional_types_test.cc | #include "runtime/optional_types.h"
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "base/function_adapter.h"
#include "common/casting.h"
#include "common/type.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/casts.h"
#include "internal/number.h"
#include "internal/status_macros.h"
#include "runtime/function_registry.h"
#include "runtime/internal/errors.h"
#include "runtime/internal/runtime_friend_access.h"
#include "runtime/internal/runtime_impl.h"
#include "runtime/runtime_builder.h"
#include "runtime/runtime_options.h"
namespace cel::extensions {
namespace {
Value OptionalOf(ValueManager& value_manager, const Value& value) {
return OptionalValue::Of(value_manager.GetMemoryManager(), value);
}
Value OptionalNone(ValueManager&) { return OptionalValue::None(); }
Value OptionalOfNonZeroValue(ValueManager& value_manager, const Value& value) {
if (value.IsZeroValue()) {
return OptionalNone(value_manager);
}
return OptionalOf(value_manager, value);
}
absl::StatusOr<Value> OptionalGetValue(ValueManager& value_manager,
const OpaqueValue& opaque_value) {
if (auto optional_value = As<OptionalValue>(opaque_value); optional_value) {
return optional_value->Value();
}
return ErrorValue{runtime_internal::CreateNoMatchingOverloadError("value")};
}
absl::StatusOr<Value> OptionalHasValue(ValueManager& value_manager,
const OpaqueValue& opaque_value) {
if (auto optional_value = As<OptionalValue>(opaque_value); optional_value) {
return BoolValue{optional_value->HasValue()};
}
return ErrorValue{
runtime_internal::CreateNoMatchingOverloadError("hasValue")};
}
absl::StatusOr<Value> SelectOptionalFieldStruct(ValueManager& value_manager,
const StructValue& struct_value,
const StringValue& key) {
std::string field_name;
auto field_name_view = key.NativeString(field_name);
CEL_ASSIGN_OR_RETURN(auto has_field,
struct_value.HasFieldByName(field_name_view));
if (!has_field) {
return OptionalValue::None();
}
CEL_ASSIGN_OR_RETURN(
auto field, struct_value.GetFieldByName(value_manager, field_name_view));
return OptionalValue::Of(value_manager.GetMemoryManager(), std::move(field));
}
absl::StatusOr<Value> SelectOptionalFieldMap(ValueManager& value_manager,
const MapValue& map,
const StringValue& key) {
Value value;
bool ok;
CEL_ASSIGN_OR_RETURN(std::tie(value, ok), map.Find(value_manager, key));
if (ok) {
return OptionalValue::Of(value_manager.GetMemoryManager(),
std::move(value));
}
return OptionalValue::None();
}
absl::StatusOr<Value> SelectOptionalField(ValueManager& value_manager,
const OpaqueValue& opaque_value,
const StringValue& key) {
if (auto optional_value = As<OptionalValue>(opaque_value); optional_value) {
if (!optional_value->HasValue()) {
return OptionalValue::None();
}
auto container = optional_value->Value();
if (auto map_value = As<MapValue>(container); map_value) {
return SelectOptionalFieldMap(value_manager, *map_value, key);
}
if (auto struct_value = As<StructValue>(container); struct_value) {
return SelectOptionalFieldStruct(value_manager, *struct_value, key);
}
}
return ErrorValue{runtime_internal::CreateNoMatchingOverloadError("_[?_]")};
}
absl::StatusOr<Value> MapOptIndexOptionalValue(ValueManager& value_manager,
const MapValue& map,
const Value& key) {
Value value;
bool ok;
if (auto double_key = cel::As<DoubleValue>(key); double_key) {
auto number = internal::Number::FromDouble(double_key->NativeValue());
if (number.LosslessConvertibleToInt()) {
CEL_ASSIGN_OR_RETURN(std::tie(value, ok),
map.Find(value_manager, IntValue{number.AsInt()}));
if (ok) {
return OptionalValue::Of(value_manager.GetMemoryManager(),
std::move(value));
}
}
if (number.LosslessConvertibleToUint()) {
CEL_ASSIGN_OR_RETURN(std::tie(value, ok),
map.Find(value_manager, UintValue{number.AsUint()}));
if (ok) {
return OptionalValue::Of(value_manager.GetMemoryManager(),
std::move(value));
}
}
} else {
CEL_ASSIGN_OR_RETURN(std::tie(value, ok), map.Find(value_manager, key));
if (ok) {
return OptionalValue::Of(value_manager.GetMemoryManager(),
std::move(value));
}
if (auto int_key = cel::As<IntValue>(key);
int_key && int_key->NativeValue() >= 0) {
CEL_ASSIGN_OR_RETURN(
std::tie(value, ok),
map.Find(value_manager,
UintValue{static_cast<uint64_t>(int_key->NativeValue())}));
if (ok) {
return OptionalValue::Of(value_manager.GetMemoryManager(),
std::move(value));
}
} else if (auto uint_key = cel::As<UintValue>(key);
uint_key &&
uint_key->NativeValue() <=
static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) {
CEL_ASSIGN_OR_RETURN(
std::tie(value, ok),
map.Find(value_manager,
IntValue{static_cast<int64_t>(uint_key->NativeValue())}));
if (ok) {
return OptionalValue::Of(value_manager.GetMemoryManager(),
std::move(value));
}
}
}
return OptionalValue::None();
}
absl::StatusOr<Value> ListOptIndexOptionalInt(ValueManager& value_manager,
const ListValue& list,
int64_t key) {
CEL_ASSIGN_OR_RETURN(auto list_size, list.Size());
if (key < 0 || static_cast<size_t>(key) >= list_size) {
return OptionalValue::None();
}
CEL_ASSIGN_OR_RETURN(auto element,
list.Get(value_manager, static_cast<size_t>(key)));
return OptionalValue::Of(value_manager.GetMemoryManager(),
std::move(element));
}
absl::StatusOr<Value> OptionalOptIndexOptionalValue(
ValueManager& value_manager, const OpaqueValue& opaque_value,
const Value& key) {
if (auto optional_value = As<OptionalValue>(opaque_value); optional_value) {
if (!optional_value->HasValue()) {
return OptionalValue::None();
}
auto container = optional_value->Value();
if (auto map_value = cel::As<MapValue>(container); map_value) {
return MapOptIndexOptionalValue(value_manager, *map_value, key);
}
if (auto list_value = cel::As<ListValue>(container); list_value) {
if (auto int_value = cel::As<IntValue>(key); int_value) {
return ListOptIndexOptionalInt(value_manager, *list_value,
int_value->NativeValue());
}
}
}
return ErrorValue{runtime_internal::CreateNoMatchingOverloadError("_[?_]")};
}
absl::Status RegisterOptionalTypeFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
if (!options.enable_qualified_type_identifiers) {
return absl::FailedPreconditionError(
"optional_type requires "
"RuntimeOptions.enable_qualified_type_identifiers");
}
if (!options.enable_heterogeneous_equality) {
return absl::FailedPreconditionError(
"optional_type requires RuntimeOptions.enable_heterogeneous_equality");
}
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, Value>::CreateDescriptor("optional.of",
false),
UnaryFunctionAdapter<Value, Value>::WrapFunction(&OptionalOf)));
CEL_RETURN_IF_ERROR(
registry.Register(UnaryFunctionAdapter<Value, Value>::CreateDescriptor(
"optional.ofNonZeroValue", false),
UnaryFunctionAdapter<Value, Value>::WrapFunction(
&OptionalOfNonZeroValue)));
CEL_RETURN_IF_ERROR(registry.Register(
VariadicFunctionAdapter<Value>::CreateDescriptor("optional.none", false),
VariadicFunctionAdapter<Value>::WrapFunction(&OptionalNone)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<absl::StatusOr<Value>,
OpaqueValue>::CreateDescriptor("value", true),
UnaryFunctionAdapter<absl::StatusOr<Value>, OpaqueValue>::WrapFunction(
&OptionalGetValue)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<absl::StatusOr<Value>,
OpaqueValue>::CreateDescriptor("hasValue", true),
UnaryFunctionAdapter<absl::StatusOr<Value>, OpaqueValue>::WrapFunction(
&OptionalHasValue)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, StructValue,
StringValue>::CreateDescriptor("_?._", false),
BinaryFunctionAdapter<absl::StatusOr<Value>, StructValue, StringValue>::
WrapFunction(&SelectOptionalFieldStruct)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, MapValue,
StringValue>::CreateDescriptor("_?._", false),
BinaryFunctionAdapter<absl::StatusOr<Value>, MapValue, StringValue>::
WrapFunction(&SelectOptionalFieldMap)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, OpaqueValue,
StringValue>::CreateDescriptor("_?._", false),
BinaryFunctionAdapter<absl::StatusOr<Value>, OpaqueValue,
StringValue>::WrapFunction(&SelectOptionalField)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, MapValue,
Value>::CreateDescriptor("_[?_]", false),
BinaryFunctionAdapter<absl::StatusOr<Value>, MapValue,
Value>::WrapFunction(&MapOptIndexOptionalValue)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, ListValue,
int64_t>::CreateDescriptor("_[?_]", false),
BinaryFunctionAdapter<absl::StatusOr<Value>, ListValue,
int64_t>::WrapFunction(&ListOptIndexOptionalInt)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<absl::StatusOr<Value>, OpaqueValue,
Value>::CreateDescriptor("_[?_]", false),
BinaryFunctionAdapter<absl::StatusOr<Value>, OpaqueValue, Value>::
WrapFunction(&OptionalOptIndexOptionalValue)));
return absl::OkStatus();
}
class OptionalTypeProvider final : public TypeReflector {
protected:
absl::StatusOr<absl::optional<Type>> FindTypeImpl(
TypeFactory&, absl::string_view name) const override {
if (name != "optional_type") {
return absl::nullopt;
}
return OptionalType{};
}
};
}
absl::Status EnableOptionalTypes(RuntimeBuilder& builder) {
auto& runtime = cel::internal::down_cast<runtime_internal::RuntimeImpl&>(
runtime_internal::RuntimeFriendAccess::GetMutableRuntime(builder));
CEL_RETURN_IF_ERROR(RegisterOptionalTypeFunctions(
builder.function_registry(), runtime.expr_builder().options()));
builder.type_registry().AddTypeProvider(
std::make_unique<OptionalTypeProvider>());
runtime.expr_builder().enable_optional_types();
return absl::OkStatus();
}
} | #include "runtime/optional_types.h"
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "base/function.h"
#include "base/function_descriptor.h"
#include "common/kind.h"
#include "common/memory.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "common/values/legacy_value_manager.h"
#include "extensions/protobuf/memory_manager.h"
#include "extensions/protobuf/runtime_adapter.h"
#include "internal/testing.h"
#include "parser/options.h"
#include "parser/parser.h"
#include "runtime/activation.h"
#include "runtime/internal/runtime_impl.h"
#include "runtime/reference_resolver.h"
#include "runtime/runtime.h"
#include "runtime/runtime_builder.h"
#include "runtime/runtime_options.h"
#include "runtime/standard_runtime_builder_factory.h"
#include "google/protobuf/arena.h"
namespace cel::extensions {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::cel::extensions::ProtobufRuntimeAdapter;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::cel::test::BoolValueIs;
using ::cel::test::IntValueIs;
using ::cel::test::OptionalValueIs;
using ::cel::test::OptionalValueIsEmpty;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::parser::Parse;
using ::google::api::expr::parser::ParserOptions;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
MATCHER_P(MatchesOptionalReceiver1, name, "") {
const FunctionDescriptor& descriptor = arg.descriptor;
std::vector<Kind> types{Kind::kOpaque};
return descriptor.name() == name && descriptor.receiver_style() == true &&
descriptor.types() == types;
}
MATCHER_P2(MatchesOptionalReceiver2, name, kind, "") {
const FunctionDescriptor& descriptor = arg.descriptor;
std::vector<Kind> types{Kind::kOpaque, kind};
return descriptor.name() == name && descriptor.receiver_style() == true &&
descriptor.types() == types;
}
MATCHER_P2(MatchesOptionalSelect, kind1, kind2, "") {
const FunctionDescriptor& descriptor = arg.descriptor;
std::vector<Kind> types{kind1, kind2};
return descriptor.name() == "_?._" && descriptor.receiver_style() == false &&
descriptor.types() == types;
}
MATCHER_P2(MatchesOptionalIndex, kind1, kind2, "") {
const FunctionDescriptor& descriptor = arg.descriptor;
std::vector<Kind> types{kind1, kind2};
return descriptor.name() == "_[?_]" && descriptor.receiver_style() == false &&
descriptor.types() == types;
}
TEST(EnableOptionalTypes, HeterogeneousEqualityRequired) {
ASSERT_OK_AND_ASSIGN(auto builder,
CreateStandardRuntimeBuilder(RuntimeOptions{
.enable_qualified_type_identifiers = true,
.enable_heterogeneous_equality = false}));
EXPECT_THAT(EnableOptionalTypes(builder),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(EnableOptionalTypes, QualifiedTypeIdentifiersRequired) {
ASSERT_OK_AND_ASSIGN(auto builder,
CreateStandardRuntimeBuilder(RuntimeOptions{
.enable_qualified_type_identifiers = false,
.enable_heterogeneous_equality = true}));
EXPECT_THAT(EnableOptionalTypes(builder),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(EnableOptionalTypes, PreconditionsSatisfied) {
ASSERT_OK_AND_ASSIGN(auto builder,
CreateStandardRuntimeBuilder(RuntimeOptions{
.enable_qualified_type_identifiers = true,
.enable_heterogeneous_equality = true}));
EXPECT_THAT(EnableOptionalTypes(builder), IsOk());
}
TEST(EnableOptionalTypes, Functions) {
ASSERT_OK_AND_ASSIGN(auto builder,
CreateStandardRuntimeBuilder(RuntimeOptions{
.enable_qualified_type_identifiers = true,
.enable_heterogeneous_equality = true}));
ASSERT_THAT(EnableOptionalTypes(builder), IsOk());
EXPECT_THAT(builder.function_registry().FindStaticOverloads("hasValue", true,
{Kind::kOpaque}),
ElementsAre(MatchesOptionalReceiver1("hasValue")));
EXPECT_THAT(builder.function_registry().FindStaticOverloads("value", true,
{Kind::kOpaque}),
ElementsAre(MatchesOptionalReceiver1("value")));
EXPECT_THAT(builder.function_registry().FindStaticOverloads(
"_?._", false, {Kind::kStruct, Kind::kString}),
ElementsAre(MatchesOptionalSelect(Kind::kStruct, Kind::kString)));
EXPECT_THAT(builder.function_registry().FindStaticOverloads(
"_?._", false, {Kind::kMap, Kind::kString}),
ElementsAre(MatchesOptionalSelect(Kind::kMap, Kind::kString)));
EXPECT_THAT(builder.function_registry().FindStaticOverloads(
"_?._", false, {Kind::kOpaque, Kind::kString}),
ElementsAre(MatchesOptionalSelect(Kind::kOpaque, Kind::kString)));
EXPECT_THAT(builder.function_registry().FindStaticOverloads(
"_[?_]", false, {Kind::kMap, Kind::kAny}),
ElementsAre(MatchesOptionalIndex(Kind::kMap, Kind::kAny)));
EXPECT_THAT(builder.function_registry().FindStaticOverloads(
"_[?_]", false, {Kind::kList, Kind::kInt}),
ElementsAre(MatchesOptionalIndex(Kind::kList, Kind::kInt)));
EXPECT_THAT(builder.function_registry().FindStaticOverloads(
"_[?_]", false, {Kind::kOpaque, Kind::kAny}),
ElementsAre(MatchesOptionalIndex(Kind::kOpaque, Kind::kAny)));
}
struct EvaluateResultTestCase {
std::string name;
std::string expression;
test::ValueMatcher value_matcher;
};
class OptionalTypesTest
: public common_internal::ThreadCompatibleValueTest<EvaluateResultTestCase,
bool> {
public:
const EvaluateResultTestCase& GetTestCase() {
return std::get<1>(GetParam());
}
bool EnableShortCircuiting() { return std::get<2>(GetParam()); }
};
std::ostream& operator<<(std::ostream& os,
const EvaluateResultTestCase& test_case) {
return os << test_case.name;
}
TEST_P(OptionalTypesTest, RecursivePlan) {
RuntimeOptions opts;
opts.use_legacy_container_builders = false;
opts.enable_qualified_type_identifiers = true;
opts.max_recursion_depth = -1;
opts.short_circuiting = EnableShortCircuiting();
const EvaluateResultTestCase& test_case = GetTestCase();
ASSERT_OK_AND_ASSIGN(auto builder, CreateStandardRuntimeBuilder(opts));
ASSERT_OK(EnableOptionalTypes(builder));
ASSERT_OK(
EnableReferenceResolver(builder, ReferenceResolverEnabled::kAlways));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr expr,
Parse(test_case.expression, "<input>",
ParserOptions{.enable_optional_syntax = true}));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Program> program,
ProtobufRuntimeAdapter::CreateProgram(*runtime, expr));
EXPECT_TRUE(runtime_internal::TestOnly_IsRecursiveImpl(program.get()));
cel::common_internal::LegacyValueManager value_factory(
memory_manager(), runtime->GetTypeProvider());
Activation activation;
ASSERT_OK_AND_ASSIGN(Value result,
program->Evaluate(activation, value_factory));
EXPECT_THAT(result, test_case.value_matcher) << test_case.expression;
}
TEST_P(OptionalTypesTest, Defaults) {
RuntimeOptions opts;
opts.use_legacy_container_builders = false;
opts.enable_qualified_type_identifiers = true;
opts.short_circuiting = EnableShortCircuiting();
const EvaluateResultTestCase& test_case = GetTestCase();
ASSERT_OK_AND_ASSIGN(auto builder, CreateStandardRuntimeBuilder(opts));
ASSERT_OK(EnableOptionalTypes(builder));
ASSERT_OK(
EnableReferenceResolver(builder, ReferenceResolverEnabled::kAlways));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr expr,
Parse(test_case.expression, "<input>",
ParserOptions{.enable_optional_syntax = true}));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Program> program,
ProtobufRuntimeAdapter::CreateProgram(*runtime, expr));
common_internal::LegacyValueManager value_factory(this->memory_manager(),
runtime->GetTypeProvider());
Activation activation;
ASSERT_OK_AND_ASSIGN(Value result,
program->Evaluate(activation, value_factory));
EXPECT_THAT(result, test_case.value_matcher) << test_case.expression;
}
INSTANTIATE_TEST_SUITE_P(
Basic, OptionalTypesTest,
testing::Combine(
testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
testing::ValuesIn(std::vector<EvaluateResultTestCase>{
{"optional_none_hasValue", "optional.none().hasValue()",
BoolValueIs(false)},
{"optional_of_hasValue", "optional.of(0).hasValue()",
BoolValueIs(true)},
{"optional_ofNonZeroValue_hasValue",
"optional.ofNonZeroValue(0).hasValue()", BoolValueIs(false)},
{"optional_or_absent",
"optional.ofNonZeroValue(0).or(optional.ofNonZeroValue(0))",
OptionalValueIsEmpty()},
{"optional_or_present", "optional.of(1).or(optional.none())",
OptionalValueIs(IntValueIs(1))},
{"optional_orValue_absent", "optional.ofNonZeroValue(0).orValue(1)",
IntValueIs(1)},
{"optional_orValue_present", "optional.of(1).orValue(2)",
IntValueIs(1)},
{"list_of_optional", "[optional.of(1)][0].orValue(1)",
IntValueIs(1)}}),
testing::Bool()),
OptionalTypesTest::ToString);
class UnreachableFunction final : public cel::Function {
public:
explicit UnreachableFunction(int64_t* count) : count_(count) {}
absl::StatusOr<Value> Invoke(const InvokeContext& context,
absl::Span<const Value> args) const override {
++(*count_);
return ErrorValue{absl::CancelledError()};
}
private:
int64_t* const count_;
};
TEST(OptionalTypesTest, ErrorShortCircuiting) {
RuntimeOptions opts{.enable_qualified_type_identifiers = true};
google::protobuf::Arena arena;
auto memory_manager = ProtoMemoryManagerRef(&arena);
ASSERT_OK_AND_ASSIGN(auto builder, CreateStandardRuntimeBuilder(opts));
int64_t unreachable_count = 0;
ASSERT_OK(EnableOptionalTypes(builder));
ASSERT_OK(
EnableReferenceResolver(builder, ReferenceResolverEnabled::kAlways));
ASSERT_OK(builder.function_registry().Register(
cel::FunctionDescriptor("unreachable", false, {}),
std::make_unique<UnreachableFunction>(&unreachable_count)));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(
ParsedExpr expr,
Parse("optional.of(1 / 0).orValue(unreachable())", "<input>",
ParserOptions{.enable_optional_syntax = true}));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Program> program,
ProtobufRuntimeAdapter::CreateProgram(*runtime, expr));
common_internal::LegacyValueManager value_factory(memory_manager,
runtime->GetTypeProvider());
Activation activation;
ASSERT_OK_AND_ASSIGN(Value result,
program->Evaluate(activation, value_factory));
EXPECT_EQ(unreachable_count, 0);
ASSERT_TRUE(result->Is<ErrorValue>()) << result->DebugString();
EXPECT_THAT(result.GetError().NativeValue(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("divide by zero")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/optional_types.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/optional_types_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e5459b5a-8eaa-4c4a-b2c6-5a56b32239f4 | cpp | tensorflow/tensorflow | matrix | third_party/xla/xla/hlo/builder/lib/matrix.cc | third_party/xla/xla/hlo/builder/lib/matrix_test.cc | #include "xla/hlo/builder/lib/matrix.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <map>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
XlaOp IdentityMatrix(XlaBuilder* builder, PrimitiveType type, int64_t m,
int64_t n) {
auto a = Iota(builder, U32, m);
auto b = Iota(builder, U32, n);
auto indicator = Eq(a, Broadcast(b, {m}), {0});
return ConvertElementType(indicator, type);
}
XlaOp GetDiagonalMask(XlaOp x, int diagonal) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
auto m = shape.dimensions(n_dims - 2);
auto n = shape.dimensions(n_dims - 1);
absl::Span<const int64_t> major_dims =
shape.dimensions().subspan(0, n_dims - 2);
auto a = Iota(builder, S32, n);
auto b = Iota(builder, S32, m) + ConstantR0WithType(builder, S32, diagonal);
auto indicator = Eq(b, Broadcast(a, {m}), {0});
auto mask = Broadcast(indicator, major_dims);
return mask;
});
}
XlaOp GetMatrixDiagonal(XlaOp x, int k) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
if (k <= -m || k >= n) {
auto zero_size_shape = shape;
zero_size_shape.DeleteDimension(n_dims - 1);
zero_size_shape.set_dimensions(n_dims - 2, 0);
return ConstantLiteral(builder, Literal{zero_size_shape});
}
auto mask = GetDiagonalMask(x, k);
int64_t reduce_dim = n_dims - 1;
if ((k == 0 && m >= n) || k < 0) {
reduce_dim = n_dims - 2;
}
auto result = Reduce(
Select(mask, x, Zeros(builder, shape)), ScalarLike(x, 0),
CreateScalarIdentityWithZeroComputation(shape.element_type(), builder),
{reduce_dim});
if (k == 0) {
return result;
}
return SliceInMinorDims(result, {0},
{k > 0 ? std::min(m, n - k) : std::min(n, m + k)});
});
}
XlaOp GetMatrixDiagonalViaGather(XlaOp x, int k) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
const int64_t num_index_dims = 2;
const int64_t axis = n_dims - num_index_dims;
const int64_t diag_len =
std::max(std::min(m + std::min(k, 0), n - std::max(k, 0)), int64_t{0});
XlaOp diag_base_indices = BroadcastInDim(Iota(builder, S32, diag_len),
{diag_len, num_index_dims}, {0});
XlaOp diag_offset =
Broadcast(ConstantR1<int>(builder, {std::max(-k, 0), std::max(k, 0)}),
{diag_len});
XlaOp start_indices = Add(diag_base_indices, diag_offset);
xla::GatherDimensionNumbers dim_numbers;
std::vector<int64_t> slice_sizes;
slice_sizes.reserve(n_dims);
for (int64_t i = 0; i < n_dims; i++) {
int64_t window_bound;
if (axis <= i) {
dim_numbers.add_collapsed_slice_dims(i);
dim_numbers.add_start_index_map(i);
window_bound = (shape.dimensions(i) != 0) ? 1 : 0;
} else {
dim_numbers.add_offset_dims(i);
window_bound = shape.dimensions(i);
}
slice_sizes.push_back(window_bound);
}
dim_numbers.set_index_vector_dim(1);
return Gather(x, start_indices, dim_numbers, slice_sizes,
true);
});
}
XlaOp SetMatrixDiagonal(XlaOp matrix, XlaOp diag, int k) {
XlaBuilder* builder = matrix.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(matrix));
TF_ASSIGN_OR_RETURN(Shape diag_shape, builder->GetShape(diag));
auto n_dims = static_cast<int32_t>(shape.rank());
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
const int64_t d = diag_shape.dimensions(n_dims - 2);
std::vector<int64_t> broadcast_dims(n_dims - 1);
absl::c_iota(broadcast_dims, 0);
int64_t pad_high = m - d;
if (k < 0) {
++(broadcast_dims.back());
pad_high = n - d;
}
if (pad_high != 0) {
PaddingConfig padding_config;
for (int64_t i = 0; i < diag_shape.rank() - 1; ++i) {
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(0);
dims->set_interior_padding(0);
dims->set_edge_padding_high(0);
}
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(0);
dims->set_interior_padding(0);
dims->set_edge_padding_high(pad_high);
diag = Pad(diag, ScalarLike(diag, 0), padding_config);
}
return Select(GetDiagonalMask(matrix, k),
BroadcastInDim(diag, shape.dimensions(), broadcast_dims),
matrix);
});
}
XlaOp TriangleMask(XlaOp x, int diagonal) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
TF_RET_CHECK(n_dims >= 2);
const int64_t m = shape.dimensions(n_dims - 2);
const int64_t n = shape.dimensions(n_dims - 1);
absl::Span<const int64_t> major_dims =
shape.dimensions().subspan(0, n_dims - 2);
auto a = Iota(builder, S32, n);
auto b = Iota(builder, S32, m) + ConstantR0<int32_t>(builder, diagonal);
XlaOp indicator;
indicator = Ge(b, Broadcast(a, {m}), {0});
return Broadcast(indicator, major_dims);
});
}
XlaOp Triangle(XlaOp x, bool lower) {
return lower ? Select(TriangleMask(x, 0), x, ZerosLike(x))
: Select(TriangleMask(x, -1), ZerosLike(x), x);
}
XlaOp UpperTriangle(XlaOp x) { return Triangle(x, false); }
XlaOp LowerTriangle(XlaOp x) { return Triangle(x, true); }
XlaOp Symmetrize(XlaOp x, bool lower) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
if (shape.rank() < 2) {
return InvalidArgument(
"Argument to symmetrize must have >= 2 dimensions, got %s",
shape.ToString());
}
const int64_t m = ShapeUtil::GetDimension(shape, -2);
const int64_t n = ShapeUtil::GetDimension(shape, -1);
if (m != n) {
return InvalidArgument(
"The two most minor dimensions of the argument to symmetrize must be "
"equal size, got %s",
shape.ToString());
}
auto mask = lower ? TriangleMask(x, 0) : Not(TriangleMask(x, -1));
if (primitive_util::IsComplexType(shape.element_type())) {
auto re = Select(mask, Real(x), TransposeInMinorDims(Real(x)));
auto im_mask = lower ? TriangleMask(x, -1) : Not(TriangleMask(x, 0));
auto im = Select(im_mask, Imag(x), ZerosLike(Imag(x)));
im = Select(mask, im, -TransposeInMinorDims(im));
return Complex(re, im);
} else {
return Select(mask, x, TransposeInMinorDims(x));
}
});
}
namespace {
std::optional<std::array<std::vector<int64_t>, 3>> EinsumDiagonalLabels(
absl::Span<const int64_t> config) {
std::vector<int64_t> unique_labels;
std::vector<int64_t> reduce_dims;
std::vector<int64_t> broadcast_dims;
for (auto label = config.begin(); label != config.end(); ++label) {
auto first_label = absl::c_find(config, *label);
auto dim = label - config.begin();
if (first_label == label) {
unique_labels.push_back(*label);
broadcast_dims.push_back(dim);
} else {
reduce_dims.push_back(dim);
}
}
if (unique_labels.size() == config.size()) {
return std::nullopt;
}
return {{unique_labels, reduce_dims, broadcast_dims}};
}
xla::XlaOp EinsumDiagonalMask(XlaOp x, absl::Span<const int64_t> config) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
Shape iota_shape = ShapeUtil::MakeShape(S32, x_shape.dimensions());
XlaOp mask = ConstantR0(builder, true);
for (auto label = config.begin(); label != config.end(); ++label) {
const int64_t dim = label - config.begin();
auto first_label = absl::c_find(config, *label);
if (first_label != label) {
const int64_t first_dim = first_label - config.begin();
mask = And(mask, Eq(Iota(builder, iota_shape, first_dim),
Iota(builder, iota_shape, dim)));
}
}
return Select(mask, x, ZerosLike(x));
});
}
xla::XlaOp EinsumDiagonal(XlaOp x, absl::Span<const int64_t> config) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto labels = EinsumDiagonalLabels(config);
if (!labels) {
return x;
}
auto zero = ScalarLike(x, 0);
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
return Reduce(EinsumDiagonalMask(x, config), zero,
CreateScalarIdentityWithZeroComputation(
x_shape.element_type(), builder),
labels->at(1));
});
}
xla::XlaOp EinsumInverseDiagonal(XlaOp x, absl::Span<const int64_t> config) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto labels = EinsumDiagonalLabels(config);
if (!labels) {
return x;
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
std::vector<int64_t> broadcast_sizes;
int64_t x_dim = 0;
for (auto label = config.begin(); label != config.end(); ++label) {
auto first_label = absl::c_find(config, *label);
if (first_label == label) {
broadcast_sizes.push_back(x_shape.dimensions(x_dim));
++x_dim;
} else {
broadcast_sizes.push_back(
broadcast_sizes[first_label - config.begin()]);
}
}
x = BroadcastInDim(x, broadcast_sizes, labels->at(2));
return EinsumDiagonalMask(x, config);
});
}
}
namespace {
template <typename C>
void DeleteDimsFromContainer(absl::Span<const int64_t> to_delete, Shape* shape,
C* batch_dims, C* contracting_dims) {
if (to_delete.empty()) {
return;
}
for (int64_t i = to_delete.size() - 1; i >= 0; --i) {
int64_t dim = to_delete[i];
shape->DeleteDimension(dim);
for (auto& b : *batch_dims) {
if (b > dim) {
--b;
}
}
for (auto& c : *contracting_dims) {
if (c > dim) {
--c;
}
}
}
}
}
xla::XlaOp Einsum(xla::XlaOp x, absl::Span<const int64_t> x_config,
xla::XlaOp y, absl::Span<const int64_t> y_config,
absl::Span<const int64_t> output_config,
xla::PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type,
bool grad_x, bool grad_y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto x_diagonal_labels = EinsumDiagonalLabels(x_config);
if (x_diagonal_labels) {
return Einsum(EinsumDiagonal(x, x_config), x_diagonal_labels->at(0), y,
y_config, output_config, precision, preferred_element_type,
grad_x, grad_y);
}
auto y_diagonal_labels = EinsumDiagonalLabels(y_config);
if (y_diagonal_labels) {
return Einsum(x, x_config, EinsumDiagonal(y, y_config),
y_diagonal_labels->at(0), output_config, precision,
preferred_element_type, grad_x, grad_y);
}
auto output_diagonal_labels = EinsumDiagonalLabels(output_config);
if (output_diagonal_labels) {
return EinsumInverseDiagonal(
Einsum(x, x_config, y, y_config, output_diagonal_labels->at(0),
precision, preferred_element_type, grad_x, grad_y),
output_config);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
TF_ASSIGN_OR_RETURN(Shape y_shape, builder->GetShape(y));
const int64_t x_rank = x_config.size();
const int64_t y_rank = y_config.size();
const int64_t output_rank = output_config.size();
absl::flat_hash_set<int64_t> x_map;
absl::flat_hash_set<int64_t> y_map;
absl::flat_hash_set<int64_t> output_map;
for (auto d : x_config) {
x_map.insert(d);
}
for (auto d : y_config) {
y_map.insert(d);
}
for (auto d : output_config) {
output_map.insert(d);
}
DotDimensionNumbers dnums;
auto is_batch_dim = [&](int64_t d) {
return x_map.contains(d) && y_map.contains(d) && output_map.contains(d);
};
auto is_contracting = [&](int64_t d) {
return x_map.contains(d) && y_map.contains(d);
};
auto rhs_dimension_number = [&](int64_t d) {
return absl::c_find(y_config, d) - y_config.begin();
};
absl::InlinedVector<int64_t, 8> rhs_outer_dims;
absl::InlinedVector<int64_t, 8> lhs_outer_dims;
absl::InlinedVector<int64_t, 8> rhs_delete_dims;
absl::InlinedVector<int64_t, 8> lhs_delete_dims;
for (int64_t i = 0; i < x_rank; ++i) {
auto dim_name = x_config[i];
const int64_t rhs_dim = rhs_dimension_number(dim_name);
if (is_batch_dim(dim_name)) {
if (x_shape.dimensions(i) == y_shape.dimensions(rhs_dim)) {
dnums.add_lhs_batch_dimensions(i);
dnums.add_rhs_batch_dimensions(rhs_dim);
} else if (x_shape.dimensions(i) == 1) {
rhs_outer_dims.push_back(rhs_dim);
lhs_delete_dims.push_back(i);
} else {
lhs_outer_dims.push_back(i);
rhs_delete_dims.push_back(rhs_dim);
}
} else if (is_contracting(dim_name)) {
if (x_shape.dimensions(i) == y_shape.dimensions(rhs_dim)) {
dnums.add_lhs_contracting_dimensions(i);
dnums.add_rhs_contracting_dimensions(rhs_dim);
} else if (x_shape.dimensions(i) == 1) {
rhs_outer_dims.push_back(rhs_dim);
lhs_delete_dims.push_back(i);
} else {
lhs_outer_dims.push_back(i);
rhs_delete_dims.push_back(rhs_dim);
}
} else {
lhs_outer_dims.push_back(i);
}
}
for (int64_t i = 0; i < y_rank; ++i) {
auto dim_name = y_config[i];
if (!is_batch_dim(dim_name) && !is_contracting(dim_name)) {
rhs_outer_dims.push_back(i);
}
}
absl::c_sort(rhs_outer_dims);
absl::InlinedVector<int64_t, 8> output_transpose_dims;
auto output_dimension_number = [&](int64_t d) -> std::optional<int64_t> {
auto pos = absl::c_find(output_config, d);
if (pos == output_config.end()) {
return std::nullopt;
}
return pos - output_config.begin();
};
for (auto d : dnums.lhs_batch_dimensions()) {
output_transpose_dims.push_back(*output_dimension_number(x_config[d]));
}
for (auto d : lhs_outer_dims) {
if (auto output_dim = output_dimension_number(x_config[d])) {
output_transpose_dims.push_back(*output_dim);
continue;
}
lhs_delete_dims.push_back(d);
}
for (auto d : rhs_outer_dims) {
if (auto output_dim = output_dimension_number(y_config[d])) {
output_transpose_dims.push_back(*output_dim);
continue;
}
rhs_delete_dims.push_back(d);
}
const int64_t transpose_rank = output_transpose_dims.size();
std::vector<int64_t> transpose_dims(output_rank);
for (int64_t i = 0; i < transpose_rank; ++i) {
transpose_dims[output_transpose_dims[i]] = i;
}
absl::c_sort(lhs_delete_dims);
DeleteDimsFromContainer(lhs_delete_dims, &x_shape,
dnums.mutable_lhs_batch_dimensions(),
dnums.mutable_lhs_contracting_dimensions());
absl::c_sort(rhs_delete_dims);
DeleteDimsFromContainer(rhs_delete_dims, &y_shape,
dnums.mutable_rhs_batch_dimensions(),
dnums.mutable_rhs_contracting_dimensions());
if (!lhs_delete_dims.empty()) {
x = Reduce(x, ScalarLike(x, 0),
CreateScalarAddComputation(x_shape.element_type(), builder),
lhs_delete_dims);
}
if (!rhs_delete_dims.empty()) {
y = Reduce(y, ScalarLike(y, 0),
CreateScalarAddComputation(y_shape.element_type(), builder),
rhs_delete_dims);
}
PrecisionConfig precision_proto;
precision_proto.add_operand_precision(precision);
precision_proto.add_operand_precision(precision);
auto dot =
DotGeneral(x, y, dnums, &precision_proto, preferred_element_type);
TF_RETURN_IF_ERROR(builder->SetInstructionFrontendAttribute(
dot, "grad_x", (grad_x ? "true" : "false")));
TF_RETURN_IF_ERROR(builder->SetInstructionFrontendAttribute(
dot, "grad_y", (grad_y ? "true" : "false")));
dot = Transpose(dot, transpose_dims);
if (transpose_rank == output_rank) {
return dot;
}
auto is_output_only = [&](int64_t d) {
return output_map.contains(d) && !x_map.contains(d) && !y_map.contains(d);
};
int64_t dot_dim = 0;
std::vector<int64_t> new_dims;
new_dims.reserve(output_rank);
TF_ASSIGN_OR_RETURN(Shape dot_shape, builder->GetShape(dot));
for (auto d : output_config) {
if (is_output_only(d)) {
new_dims.push_back(1);
} else {
new_dims.push_back(dot_shape.dimensions(dot_dim));
}
}
return Reshape(dot, new_dims);
});
}
XlaOp BatchDot(XlaOp x, XlaOp y, PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type) {
return BatchDot(x, false, y, false, precision, preferred_element_type);
}
XlaOp BatchDot(XlaOp x, bool transpose_x, XlaOp y, bool transpose_y,
PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type, bool grad_x,
bool grad_y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
std::string string("...mk,...kn->...mn");
if (transpose_x) {
std::swap(string[3], string[4]);
}
if (transpose_y) {
std::swap(string[6 + 3], string[6 + 4]);
}
return Einsum(x, y, string, precision, preferred_element_type, grad_x,
grad_y);
});
}
absl::StatusOr<std::array<std::vector<int64_t>, 3>> ParseEinsumString(
absl::string_view einsum_config, int64_t x_rank, int64_t y_rank) {
std::array<std::vector<int64_t>, 3> einsum_config_numeric;
std::vector<absl::string_view> main_split =
absl::StrSplit(einsum_config, ',');
if (main_split.size() != 2) {
return InvalidArgument("Expected one \",\" in einsum_config.");
}
auto maybe_invalid_character = [](char d) -> absl::Status {
if (absl::ascii_isalpha(d)) {
return absl::OkStatus();
}
if (d == '.') {
return InvalidArgument("Unsupported \".\" in einsum config.");
}
return InvalidArgument("Unexpected character in einsum config.");
};
auto string_config_to_numeric =
[&](absl::string_view config, bool is_input_config, int64_t input_rank,
int64_t ellipsis_rank,
std::vector<int64_t>* numeric_config) -> absl::StatusOr<int64_t> {
std::vector<absl::string_view> splits = absl::StrSplit(config, "...");
if (splits.empty()) {
return ellipsis_rank;
}
if (splits.size() > 2) {
return InvalidArgument("Too many ellipses (\"...\") in einsum config.");
}
const bool has_ellipsis = splits.size() > 1;
if (is_input_config && has_ellipsis) {
ellipsis_rank = input_rank -
static_cast<int64_t>(splits[0].size() + splits[1].size());
if (ellipsis_rank < 0) {
return InvalidArgument(
"Too few dimensions in the input for the given einsum config.");
}
}
for (char d : splits[0]) {
TF_RETURN_IF_ERROR(maybe_invalid_character(d));
numeric_config->push_back(static_cast<int64_t>(d));
}
if (has_ellipsis) {
for (int64_t i = ellipsis_rank; i > 0; --i) {
numeric_config->push_back(-i);
}
for (char d : splits[1]) {
TF_RETURN_IF_ERROR(maybe_invalid_character(d));
numeric_config->push_back(static_cast<int64_t>(d));
}
}
return ellipsis_rank;
};
TF_ASSIGN_OR_RETURN(
const int64_t x_ellipsis_rank,
string_config_to_numeric(main_split[0],
true, x_rank,
0, &einsum_config_numeric[0]));
std::vector<absl::string_view> y_output_split =
absl::StrSplit(main_split[1], "->");
if (y_output_split.size() != 2) {
return InvalidArgument("Expected one \"->\" in einsum_config.");
}
TF_ASSIGN_OR_RETURN(
const int64_t y_ellipsis_rank,
string_config_to_numeric(y_output_split[0],
true, y_rank,
0, &einsum_config_numeric[1]));
TF_ASSIGN_OR_RETURN(
std::ignore,
string_config_to_numeric(
y_output_split[1], false,
0,
std::max(x_ellipsis_rank, y_ellipsis_rank),
&einsum_config_numeric[2]));
return einsum_config_numeric;
}
std::string NormalizeEinsumString(absl::string_view einsum_config) {
if (einsum_config.find("->") != einsum_config.npos) {
return "";
}
bool has_ellipsis = einsum_config.find("...") != einsum_config.npos;
std::map<char, int64_t> chars;
for (char c : einsum_config) {
if (absl::ascii_isalpha(c)) {
++chars[c];
}
}
std::string new_config(einsum_config.begin(), einsum_config.end());
new_config.append("->");
if (has_ellipsis) {
new_config.append("...");
}
for (auto p : chars) {
if (p.second == 1) {
new_config.push_back(p.first);
}
}
return new_config;
}
XlaOp Einsum(XlaOp x, XlaOp y, absl::string_view einsum_config,
PrecisionConfig::Precision precision,
std::optional<PrimitiveType> preferred_element_type, bool grad_x,
bool grad_y) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto new_config = NormalizeEinsumString(einsum_config);
if (!new_config.empty()) {
return Einsum(x, y, new_config, precision, preferred_element_type, grad_x,
grad_y);
}
TF_ASSIGN_OR_RETURN(Shape x_shape, builder->GetShape(x));
TF_ASSIGN_OR_RETURN(Shape y_shape, builder->GetShape(y));
TF_ASSIGN_OR_RETURN(
auto einsum_config_numeric,
ParseEinsumString(einsum_config, x_shape.rank(), y_shape.rank()));
return Einsum(x, einsum_config_numeric[0], y, einsum_config_numeric[1],
einsum_config_numeric[2], precision, preferred_element_type,
grad_x, grad_y);
});
}
XlaOp Einsum(XlaOp x, absl::string_view einsum_config,
PrecisionConfig::Precision precision) {
return Einsum(ScalarLike(x, 1), x, absl::StrCat(",", einsum_config),
precision);
}
XlaOp TransposeInMinorDims(XlaOp x) {
XlaBuilder* builder = x.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape shape, builder->GetShape(x));
const int64_t n_dims = shape.rank();
TF_RET_CHECK(n_dims >= 2);
std::vector<int64_t> permutation(n_dims);
std::iota(permutation.begin(), permutation.end(), 0);
std::swap(permutation[n_dims - 1], permutation[n_dims - 2]);
return Transpose(x, permutation);
});
}
XlaOp MaybeTransposeInMinorDims(XlaOp x, bool transpose) {
return transpose ? TransposeInMinorDims(x) : x;
}
} | #include "xla/hlo/builder/lib/matrix.h"
#include <limits>
#include <map>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/test.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/types.h"
namespace xla {
namespace {
class MatrixTest : public ClientLibraryTestBase {
protected:
template <typename T>
void TestMatrixDiagonal();
template <typename T>
void TestMatrixDiagonal4D();
template <typename T>
void TestSetMatrixDiagonal();
template <typename T>
std::map<int, Array2D<T>> k_and_expected() const {
return std::map<int, Array2D<T>>{
{0, {{0, 5, 10}, {12, 17, 22}}},
{1, {{1, 6, 11}, {13, 18, 23}}},
{2, {{2, 7}, {14, 19}}},
{3, {{3}, {15}}},
{4, {{}, {}}},
{-1, {{4, 9}, {16, 21}}},
{-2, {{8}, {20}}},
{-3, {{}, {}}},
{-4, {{}, {}}},
};
}
};
XLA_TEST_F(MatrixTest, Triangle) {
XlaBuilder builder(TestName());
Array3D<int32_t> input(2, 3, 4);
input.FillIota(0);
XlaOp a;
auto a_data = CreateR3Parameter<int32_t>(input, 0, "a", &builder, &a);
LowerTriangle(a);
Array3D<int32_t> expected({{{0, 0, 0, 0}, {4, 5, 0, 0}, {8, 9, 10, 0}},
{{12, 0, 0, 0}, {16, 17, 0, 0}, {20, 21, 22, 0}}});
ComputeAndCompareR3<int32_t>(&builder, expected, {a_data.get()});
}
XLA_TEST_F(MatrixTest, Symmetrize) {
for (bool lower : {false, true}) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
Array<float> input = {
{1, nan, nan},
{2, 3, nan},
{4, 5, 6},
};
XlaOp a;
auto a_data = CreateParameter<float>(input, 0, "a", &builder, &a);
Symmetrize(lower ? a : TransposeInMinorDims(a), lower);
Array<float> expected = {
{1, 2, 4},
{2, 3, 5},
{4, 5, 6},
};
ComputeAndCompare<float>(&builder, expected, {a_data.get()});
}
}
XLA_TEST_F(MatrixTest, SymmetrizeComplex) {
for (bool lower : {false, true}) {
XlaBuilder builder(TestName());
float nan = std::numeric_limits<float>::quiet_NaN();
Array<complex64> input = {
{complex64{1, nan}, nan, nan},
{complex64{2, 7}, complex64{3, nan}, nan},
{complex64{4, 8}, complex64{5, 9}, complex64{6, nan}},
};
XlaOp a;
auto a_data = CreateParameter<complex64>(input, 0, "a", &builder, &a);
Symmetrize(lower ? a : Conj(TransposeInMinorDims(a)), lower);
Array<complex64> expected = {
{1, complex64{2, -7}, complex64{4, -8}},
{complex64{2, 7}, 3, complex64{5, -9}},
{complex64{4, 8}, complex64{5, 9}, 6},
};
ComputeAndCompare<complex64>(&builder, expected, {a_data.get()});
}
}
template <typename T>
void MatrixTest::TestMatrixDiagonal() {
XlaBuilder builder("SetMatrixDiagonal");
Array3D<T> input(2, 3, 4);
input.FillIota(0);
for (const auto& kv : k_and_expected<T>()) {
XlaOp a;
auto a_data = CreateR3Parameter<T>(input, 0, "a", &builder, &a);
GetMatrixDiagonal(a, kv.first);
ComputeAndCompareR2<T>(&builder, kv.second, {a_data.get()});
}
}
template <typename T>
void MatrixTest::TestSetMatrixDiagonal() {
XlaBuilder builder("GetMatrixDiagonal");
Array3D<T> input(2, 3, 4);
input.FillIota(0);
for (const auto& kv : k_and_expected<T>()) {
XlaOp a;
XlaOp b;
auto a_data = CreateR3Parameter<T>(input, 0, "a", &builder, &a);
auto new_diag =
CreateR2Parameter<T>(Array2D<T>{kv.second}, 1, "d", &builder, &b);
GetMatrixDiagonal(SetMatrixDiagonal(a, b + ScalarLike(b, 1), kv.first),
kv.first) -
ScalarLike(b, 1);
ComputeAndCompareR2<T>(&builder, kv.second, {a_data.get(), new_diag.get()});
}
}
XLA_TEST_F(MatrixTest, SetMatrixDiagonal_S32) {
TestSetMatrixDiagonal<int32_t>();
}
XLA_TEST_F(MatrixTest, SetMatrixDiagonal_S64) {
TestSetMatrixDiagonal<int64_t>();
}
XLA_TEST_F(MatrixTest, SetMatrixDiagonal_F32) {
TestSetMatrixDiagonal<float>();
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal_S32) { TestMatrixDiagonal<int32_t>(); }
XLA_TEST_F(MatrixTest, GetMatrixDiagonal_S64) { TestMatrixDiagonal<int64_t>(); }
XLA_TEST_F(MatrixTest, GetMatrixDiagonal_F32) { TestMatrixDiagonal<float>(); }
template <typename T>
void MatrixTest::TestMatrixDiagonal4D() {
XlaBuilder builder("GetMatrixDiagonal");
Array4D<T> input(2, 2, 4, 3);
input.FillIota(0);
std::map<int, Array3D<T>> k_and_expected = {
{0, {{{0, 4, 8}, {12, 16, 20}}, {{24, 28, 32}, {36, 40, 44}}}},
{1, {{{1, 5}, {13, 17}}, {{25, 29}, {37, 41}}}},
{2, {{{2}, {14}}, {{26}, {38}}}},
{3, {{{}, {}}, {{}, {}}}},
{4, {{{}, {}}, {{}, {}}}},
{-1, {{{3, 7, 11}, {15, 19, 23}}, {{27, 31, 35}, {39, 43, 47}}}},
{-2, {{{6, 10}, {18, 22}}, {{30, 34}, {42, 46}}}},
{-3, {{{9}, {21}}, {{33}, {45}}}},
{-4, {{{}, {}}, {{}, {}}}},
};
for (const auto& kv : k_and_expected) {
XlaOp a;
auto a_data = CreateR4Parameter<T>(input, 0, "a", &builder, &a);
GetMatrixDiagonal(a, kv.first);
ComputeAndCompareR3<T>(&builder, kv.second, {a_data.get()});
}
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal4D_S32) {
TestMatrixDiagonal4D<int32_t>();
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal4D_S64) {
TestMatrixDiagonal4D<int64_t>();
}
XLA_TEST_F(MatrixTest, GetMatrixDiagonal4D_F32) {
TestMatrixDiagonal4D<float>();
}
Array3D<float> BatchedAValsFull() {
return {{
{2, 0, 1, 2},
{3, 6, 0, 1},
{4, 7, 9, 0},
{5, 8, 10, 11},
},
{
{16, 24, 8, 12},
{24, 61, 82, 48},
{8, 82, 456, 106},
{12, 48, 106, 62},
}};
}
XLA_TEST_F(MatrixTest, RowBatchDot) {
XlaBuilder builder(TestName());
int n = 4;
XlaOp a, row, index;
auto a_data =
CreateR3Parameter<float>(BatchedAValsFull(), 0, "a", &builder, &a);
auto row_data = CreateR3Parameter<float>({{{9, 1, 0, 0}}, {{2, 4, 0, 0}}}, 1,
"row", &builder, &row);
auto index_data = CreateR0Parameter<int>(1, 2, "index", &builder, &index);
auto l_index = DynamicSliceInMinorDims(
a, {index, ConstantR0<int32_t>(&builder, 0)}, {1, n});
BatchDot(l_index, TransposeInMinorDims(row));
ComputeAndCompareR3<float>(&builder, {{{33}}, {{292}}},
{a_data.get(), row_data.get(), index_data.get()});
}
XLA_TEST_F(MatrixTest, Einsum) {
XlaBuilder builder(TestName());
int n = 4;
XlaOp a, row, index;
auto a_data =
CreateR3Parameter<float>(BatchedAValsFull(), 0, "a", &builder, &a);
auto row_data = CreateR3Parameter<float>({{{9, 1, 0, 0}}, {{2, 4, 0, 0}}}, 1,
"row", &builder, &row);
auto index_data = CreateR0Parameter<int>(1, 2, "index", &builder, &index);
auto l_index = DynamicSliceInMinorDims(
a, {index, ConstantR0<int32_t>(&builder, 0)}, {1, n});
Einsum(l_index, row, "abc,adc->abd");
ComputeAndCompareR3<float>(&builder, {{{33}}, {{292}}},
{a_data.get(), row_data.get(), index_data.get()});
}
XLA_TEST_F(MatrixTest, ParseEinsumString) {
auto to_vec = [](absl::string_view s) {
std::vector<int64_t> v;
v.reserve(s.size());
int e = -3;
for (auto c : s) {
v.push_back(c == '.' ? e++ : int64_t{c});
}
return v;
};
auto to_string = [&](absl::string_view x, absl::string_view y,
absl::string_view o) {
return absl::StrCat(x, ",", y, "->", o);
};
std::vector<std::vector<std::string>> good_test_cases = {
{"ab", "bc", "ac"},
{"Bab", "Bbc", "Bac"},
{"ab", "cd", "dcba"},
{"abc", "abd", "cbd"},
{"...ab", "...bc", "...ac"},
{"a...bc", "...abd", "cbd..."},
{"...ab", "...bc", "ac"},
{"...b", "...bc", "...c"},
{"...abz", "...bc", "...ac"},
{"...ab", "...bcz", "...ac"},
{"abz", "bc", "ac"},
{"ab", "bcz", "ac"},
{"a", "b", "c"},
{"...a", "...b", "...c"},
{"abb", "bcc", "ac"},
{"ab", "bc", "ad"},
};
for (auto test_case : good_test_cases) {
auto parse_result_or_status =
ParseEinsumString(to_string(test_case[0], test_case[1], test_case[2]),
test_case[0].size(), test_case[1].size());
EXPECT_TRUE(parse_result_or_status.status().ok());
auto parse_result = parse_result_or_status.value();
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(parse_result[i], to_vec(test_case[i]));
}
}
std::vector<std::string> einsum_strings_that_fail_parsing = {
"", "a", "ab->ba", "ab,bc,cd->ad", "a...b...,bc->a...c",
};
for (auto test_case : einsum_strings_that_fail_parsing) {
auto parse_result_or_status = ParseEinsumString(test_case, 3, 3);
EXPECT_FALSE(parse_result_or_status.status().ok());
}
}
XLA_TEST_F(MatrixTest, NormalizeEinsumString) {
EXPECT_EQ(NormalizeEinsumString("a,b->ab"), "");
EXPECT_EQ(NormalizeEinsumString("ba"), "ba->ab");
EXPECT_EQ(NormalizeEinsumString("ab,dc"), "ab,dc->abcd");
EXPECT_EQ(NormalizeEinsumString("a,b"), "a,b->ab");
EXPECT_EQ(NormalizeEinsumString("...ba,ca..."), "...ba,ca...->...bc");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/matrix.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/matrix_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
63535286-e4a6-4b54-9d25-9e013a5d5c38 | cpp | google/quiche | quic_packet_creator | quiche/quic/core/quic_packet_creator.cc | quiche/quic/core/quic_packet_creator_test.cc | #include "quiche/quic/core/quic_packet_creator.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/frames/quic_frame.h"
#include "quiche/quic/core/frames/quic_padding_frame.h"
#include "quiche/quic/core/frames/quic_path_challenge_frame.h"
#include "quiche/quic/core/frames/quic_stream_frame.h"
#include "quiche/quic/core/quic_chaos_protector.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_exported_stats.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_server_stats.h"
#include "quiche/common/print_elements.h"
namespace quic {
namespace {
QuicLongHeaderType EncryptionlevelToLongHeaderType(EncryptionLevel level) {
switch (level) {
case ENCRYPTION_INITIAL:
return INITIAL;
case ENCRYPTION_HANDSHAKE:
return HANDSHAKE;
case ENCRYPTION_ZERO_RTT:
return ZERO_RTT_PROTECTED;
case ENCRYPTION_FORWARD_SECURE:
QUIC_BUG(quic_bug_12398_1)
<< "Try to derive long header type for packet with encryption level: "
<< level;
return INVALID_PACKET_TYPE;
default:
QUIC_BUG(quic_bug_10752_1) << level;
return INVALID_PACKET_TYPE;
}
}
void LogCoalesceStreamFrameStatus(bool success) {
QUIC_HISTOGRAM_BOOL("QuicSession.CoalesceStreamFrameStatus", success,
"Success rate of coalesing stream frames attempt.");
}
class ScopedPacketContextSwitcher {
public:
ScopedPacketContextSwitcher(QuicPacketNumber packet_number,
QuicPacketNumberLength packet_number_length,
EncryptionLevel encryption_level,
SerializedPacket* packet)
: saved_packet_number_(packet->packet_number),
saved_packet_number_length_(packet->packet_number_length),
saved_encryption_level_(packet->encryption_level),
packet_(packet) {
packet_->packet_number = packet_number,
packet_->packet_number_length = packet_number_length;
packet_->encryption_level = encryption_level;
}
~ScopedPacketContextSwitcher() {
packet_->packet_number = saved_packet_number_;
packet_->packet_number_length = saved_packet_number_length_;
packet_->encryption_level = saved_encryption_level_;
}
private:
const QuicPacketNumber saved_packet_number_;
const QuicPacketNumberLength saved_packet_number_length_;
const EncryptionLevel saved_encryption_level_;
SerializedPacket* packet_;
};
}
#define ENDPOINT \
(framer_->perspective() == Perspective::IS_SERVER ? "Server: " : "Client: ")
QuicPacketCreator::QuicPacketCreator(QuicConnectionId server_connection_id,
QuicFramer* framer,
DelegateInterface* delegate)
: QuicPacketCreator(server_connection_id, framer, QuicRandom::GetInstance(),
delegate) {}
QuicPacketCreator::QuicPacketCreator(QuicConnectionId server_connection_id,
QuicFramer* framer, QuicRandom* random,
DelegateInterface* delegate)
: delegate_(delegate),
debug_delegate_(nullptr),
framer_(framer),
random_(random),
have_diversification_nonce_(false),
max_packet_length_(0),
next_max_packet_length_(0),
server_connection_id_included_(CONNECTION_ID_PRESENT),
packet_size_(0),
server_connection_id_(server_connection_id),
client_connection_id_(EmptyQuicConnectionId()),
packet_(QuicPacketNumber(), PACKET_1BYTE_PACKET_NUMBER, nullptr, 0, false,
false),
pending_padding_bytes_(0),
needs_full_padding_(false),
next_transmission_type_(NOT_RETRANSMISSION),
flusher_attached_(false),
fully_pad_crypto_handshake_packets_(true),
latched_hard_max_packet_length_(0),
max_datagram_frame_size_(0) {
SetMaxPacketLength(kDefaultMaxPacketSize);
if (!framer_->version().UsesTls()) {
SetMaxDatagramFrameSize(kMaxAcceptedDatagramFrameSize);
}
}
QuicPacketCreator::~QuicPacketCreator() {
DeleteFrames(&packet_.retransmittable_frames);
}
void QuicPacketCreator::SetEncrypter(EncryptionLevel level,
std::unique_ptr<QuicEncrypter> encrypter) {
framer_->SetEncrypter(level, std::move(encrypter));
max_plaintext_size_ = framer_->GetMaxPlaintextSize(max_packet_length_);
}
bool QuicPacketCreator::CanSetMaxPacketLength() const {
return queued_frames_.empty();
}
void QuicPacketCreator::SetMaxPacketLength(QuicByteCount length) {
if (!CanSetMaxPacketLength()) {
next_max_packet_length_ = length;
return;
}
if (length == max_packet_length_) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "Updating packet creator max packet length from "
<< max_packet_length_ << " to " << length;
max_packet_length_ = length;
max_plaintext_size_ = framer_->GetMaxPlaintextSize(max_packet_length_);
QUIC_BUG_IF(
quic_bug_12398_2,
max_plaintext_size_ - PacketHeaderSize() <
MinPlaintextPacketSize(framer_->version(), GetPacketNumberLength()))
<< ENDPOINT << "Attempted to set max packet length too small";
}
void QuicPacketCreator::SetMaxDatagramFrameSize(
QuicByteCount max_datagram_frame_size) {
constexpr QuicByteCount upper_bound =
std::min<QuicByteCount>(std::numeric_limits<QuicPacketLength>::max(),
std::numeric_limits<size_t>::max());
if (max_datagram_frame_size > upper_bound) {
max_datagram_frame_size = upper_bound;
}
max_datagram_frame_size_ = max_datagram_frame_size;
}
void QuicPacketCreator::SetSoftMaxPacketLength(QuicByteCount length) {
QUICHE_DCHECK(CanSetMaxPacketLength()) << ENDPOINT;
if (length > max_packet_length_) {
QUIC_BUG(quic_bug_10752_2)
<< ENDPOINT
<< "Try to increase max_packet_length_ in "
"SetSoftMaxPacketLength, use SetMaxPacketLength instead.";
return;
}
if (framer_->GetMaxPlaintextSize(length) <
PacketHeaderSize() +
MinPlaintextPacketSize(framer_->version(), GetPacketNumberLength())) {
QUIC_DLOG(INFO) << ENDPOINT << length
<< " is too small to fit packet header";
RemoveSoftMaxPacketLength();
return;
}
QUIC_DVLOG(1) << ENDPOINT << "Setting soft max packet length to: " << length;
latched_hard_max_packet_length_ = max_packet_length_;
max_packet_length_ = length;
max_plaintext_size_ = framer_->GetMaxPlaintextSize(length);
}
void QuicPacketCreator::SetDiversificationNonce(
const DiversificationNonce& nonce) {
QUICHE_DCHECK(!have_diversification_nonce_) << ENDPOINT;
have_diversification_nonce_ = true;
diversification_nonce_ = nonce;
}
void QuicPacketCreator::UpdatePacketNumberLength(
QuicPacketNumber least_packet_awaited_by_peer,
QuicPacketCount max_packets_in_flight) {
if (!queued_frames_.empty()) {
QUIC_BUG(quic_bug_10752_3)
<< ENDPOINT << "Called UpdatePacketNumberLength with "
<< queued_frames_.size()
<< " queued_frames. First frame type:" << queued_frames_.front().type
<< " last frame type:" << queued_frames_.back().type;
return;
}
const QuicPacketNumber next_packet_number = NextSendingPacketNumber();
QUICHE_DCHECK_LE(least_packet_awaited_by_peer, next_packet_number)
<< ENDPOINT;
const uint64_t current_delta =
next_packet_number - least_packet_awaited_by_peer;
const uint64_t delta = std::max(current_delta, max_packets_in_flight);
const QuicPacketNumberLength packet_number_length =
QuicFramer::GetMinPacketNumberLength(QuicPacketNumber(delta * 4));
if (packet_.packet_number_length == packet_number_length) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "Updating packet number length from "
<< static_cast<int>(packet_.packet_number_length) << " to "
<< static_cast<int>(packet_number_length)
<< ", least_packet_awaited_by_peer: "
<< least_packet_awaited_by_peer
<< " max_packets_in_flight: " << max_packets_in_flight
<< " next_packet_number: " << next_packet_number;
packet_.packet_number_length = packet_number_length;
}
void QuicPacketCreator::SkipNPacketNumbers(
QuicPacketCount count, QuicPacketNumber least_packet_awaited_by_peer,
QuicPacketCount max_packets_in_flight) {
if (!queued_frames_.empty()) {
QUIC_BUG(quic_bug_10752_4)
<< ENDPOINT << "Called SkipNPacketNumbers with "
<< queued_frames_.size()
<< " queued_frames. First frame type:" << queued_frames_.front().type
<< " last frame type:" << queued_frames_.back().type;
return;
}
if (packet_.packet_number > packet_.packet_number + count) {
QUIC_LOG(WARNING) << ENDPOINT << "Skipping " << count
<< " packet numbers causes packet number wrapping "
"around, least_packet_awaited_by_peer: "
<< least_packet_awaited_by_peer
<< " packet_number:" << packet_.packet_number;
return;
}
packet_.packet_number += count;
UpdatePacketNumberLength(least_packet_awaited_by_peer, max_packets_in_flight);
}
bool QuicPacketCreator::ConsumeCryptoDataToFillCurrentPacket(
EncryptionLevel level, size_t write_length, QuicStreamOffset offset,
bool needs_full_padding, TransmissionType transmission_type,
QuicFrame* frame) {
QUIC_DVLOG(2) << ENDPOINT << "ConsumeCryptoDataToFillCurrentPacket " << level
<< " write_length " << write_length << " offset " << offset
<< (needs_full_padding ? " needs_full_padding" : "") << " "
<< transmission_type;
if (!CreateCryptoFrame(level, write_length, offset, frame)) {
return false;
}
if (needs_full_padding) {
needs_full_padding_ = true;
}
return AddFrame(*frame, transmission_type);
}
bool QuicPacketCreator::ConsumeDataToFillCurrentPacket(
QuicStreamId id, size_t data_size, QuicStreamOffset offset, bool fin,
bool needs_full_padding, TransmissionType transmission_type,
QuicFrame* frame) {
if (!HasRoomForStreamFrame(id, offset, data_size)) {
return false;
}
CreateStreamFrame(id, data_size, offset, fin, frame);
if (GetQuicFlag(quic_enforce_single_packet_chlo) &&
StreamFrameIsClientHello(frame->stream_frame) &&
frame->stream_frame.data_length < data_size) {
const std::string error_details =
"Client hello won't fit in a single packet.";
QUIC_BUG(quic_bug_10752_5)
<< ENDPOINT << error_details << " Constructed stream frame length: "
<< frame->stream_frame.data_length << " CHLO length: " << data_size;
delegate_->OnUnrecoverableError(QUIC_CRYPTO_CHLO_TOO_LARGE, error_details);
return false;
}
if (!AddFrame(*frame, transmission_type)) {
return false;
}
if (needs_full_padding) {
needs_full_padding_ = true;
}
return true;
}
bool QuicPacketCreator::HasRoomForStreamFrame(QuicStreamId id,
QuicStreamOffset offset,
size_t data_size) {
const size_t min_stream_frame_size = QuicFramer::GetMinStreamFrameSize(
framer_->transport_version(), id, offset, true,
data_size);
if (BytesFree() > min_stream_frame_size) {
return true;
}
if (!RemoveSoftMaxPacketLength()) {
return false;
}
return BytesFree() > min_stream_frame_size;
}
bool QuicPacketCreator::HasRoomForMessageFrame(QuicByteCount length) {
const size_t message_frame_size =
QuicFramer::GetMessageFrameSize(true, length);
if (static_cast<QuicByteCount>(message_frame_size) >
max_datagram_frame_size_) {
return false;
}
if (BytesFree() >= message_frame_size) {
return true;
}
if (!RemoveSoftMaxPacketLength()) {
return false;
}
return BytesFree() >= message_frame_size;
}
size_t QuicPacketCreator::StreamFramePacketOverhead(
QuicTransportVersion version, uint8_t destination_connection_id_length,
uint8_t source_connection_id_length, bool include_version,
bool include_diversification_nonce,
QuicPacketNumberLength packet_number_length,
quiche::QuicheVariableLengthIntegerLength retry_token_length_length,
quiche::QuicheVariableLengthIntegerLength length_length,
QuicStreamOffset offset) {
return GetPacketHeaderSize(version, destination_connection_id_length,
source_connection_id_length, include_version,
include_diversification_nonce,
packet_number_length, retry_token_length_length, 0,
length_length) +
QuicFramer::GetMinStreamFrameSize(version, 1u, offset, true,
kMaxOutgoingPacketSize );
}
void QuicPacketCreator::CreateStreamFrame(QuicStreamId id, size_t data_size,
QuicStreamOffset offset, bool fin,
QuicFrame* frame) {
QUICHE_DCHECK(
max_packet_length_ >
StreamFramePacketOverhead(
framer_->transport_version(), GetDestinationConnectionIdLength(),
GetSourceConnectionIdLength(), kIncludeVersion,
IncludeNonceInPublicHeader(), PACKET_6BYTE_PACKET_NUMBER,
GetRetryTokenLengthLength(), GetLengthLength(), offset) ||
latched_hard_max_packet_length_ > 0)
<< ENDPOINT;
QUIC_BUG_IF(quic_bug_12398_3, !HasRoomForStreamFrame(id, offset, data_size))
<< ENDPOINT << "No room for Stream frame, BytesFree: " << BytesFree()
<< " MinStreamFrameSize: "
<< QuicFramer::GetMinStreamFrameSize(framer_->transport_version(), id,
offset, true, data_size);
QUIC_BUG_IF(quic_bug_12398_4, data_size == 0 && !fin)
<< ENDPOINT << "Creating a stream frame for stream ID:" << id
<< " with no data or fin.";
size_t min_frame_size = QuicFramer::GetMinStreamFrameSize(
framer_->transport_version(), id, offset,
true, data_size);
size_t bytes_consumed =
std::min<size_t>(BytesFree() - min_frame_size, data_size);
bool set_fin = fin && bytes_consumed == data_size;
*frame = QuicFrame(QuicStreamFrame(id, set_fin, offset, bytes_consumed));
}
bool QuicPacketCreator::CreateCryptoFrame(EncryptionLevel level,
size_t write_length,
QuicStreamOffset offset,
QuicFrame* frame) {
const size_t min_frame_size =
QuicFramer::GetMinCryptoFrameSize(offset, write_length);
if (BytesFree() <= min_frame_size &&
(!RemoveSoftMaxPacketLength() || BytesFree() <= min_frame_size)) {
return false;
}
size_t max_write_length = BytesFree() - min_frame_size;
size_t bytes_consumed = std::min<size_t>(max_write_length, write_length);
*frame = QuicFrame(new QuicCryptoFrame(level, offset, bytes_consumed));
return true;
}
void QuicPacketCreator::FlushCurrentPacket() {
if (!HasPendingFrames() && pending_padding_bytes_ == 0) {
return;
}
ABSL_CACHELINE_ALIGNED char stack_buffer[kMaxOutgoingPacketSize];
QuicOwnedPacketBuffer external_buffer(delegate_->GetPacketBuffer());
if (external_buffer.buffer == nullptr) {
external_buffer.buffer = stack_buffer;
external_buffer.release_buffer = nullptr;
}
QUICHE_DCHECK_EQ(nullptr, packet_.encrypted_buffer) << ENDPOINT;
if (!SerializePacket(std::move(external_buffer), kMaxOutgoingPacketSize,
true)) {
return;
}
OnSerializedPacket();
}
void QuicPacketCreator::OnSerializedPacket() {
QUIC_BUG_IF(quic_bug_12398_5, packet_.encrypted_buffer == nullptr)
<< ENDPOINT;
if (packet_.transmission_type == NOT_RETRANSMISSION) {
packet_.bytes_not_retransmitted.reset();
}
SerializedPacket packet(std::move(packet_));
ClearPacket();
RemoveSoftMaxPacketLength();
delegate_->OnSerializedPacket(std::move(packet));
if (next_max_packet_length_ != 0) {
QUICHE_DCHECK(CanSetMaxPacketLength()) << ENDPOINT;
SetMaxPacketLength(next_max_packet_length_);
next_max_packet_length_ = 0;
}
}
void QuicPacketCreator::ClearPacket() {
packet_.has_ack = false;
packet_.has_stop_waiting = false;
packet_.has_ack_ecn = false;
packet_.has_crypto_handshake = NOT_HANDSHAKE;
packet_.transmission_type = NOT_RETRANSMISSION;
packet_.encrypted_buffer = nullptr;
packet_.encrypted_length = 0;
packet_.has_ack_frequency = false;
packet_.has_message = false;
packet_.fate = SEND_TO_WRITER;
QUIC_BUG_IF(quic_bug_12398_6, packet_.release_encrypted_buffer != nullptr)
<< ENDPOINT << "packet_.release_encrypted_buffer should be empty";
packet_.release_encrypted_buffer = nullptr;
QUICHE_DCHECK(packet_.retransmittable_frames.empty()) << ENDPOINT;
QUICHE_DCHECK(packet_.nonretransmittable_frames.empty()) << ENDPOINT;
packet_.largest_acked.Clear();
needs_full_padding_ = false;
packet_.bytes_not_retransmitted.reset();
packet_.initial_header.reset();
}
size_t QuicPacketCreator::ReserializeInitialPacketInCoalescedPacket(
const SerializedPacket& packet, size_t padding_size, char* buffer,
size_t buffer_len) {
QUIC_BUG_IF(quic_bug_12398_7, packet.encryption_level != ENCRYPTION_INITIAL);
QUIC_BUG_IF(quic_bug_12398_8, packet.nonretransmittable_frames.empty() &&
packet.retransmittable_frames.empty())
<< ENDPOINT
<< "Attempt to serialize empty ENCRYPTION_INITIAL packet in coalesced "
"packet";
if (HasPendingFrames()) {
QUIC_BUG(quic_packet_creator_unexpected_queued_frames)
<< "Unexpected queued frames: " << GetPendingFramesInfo();
return 0;
}
ScopedPacketContextSwitcher switcher(
packet.packet_number -
1,
packet.packet_number_length, packet.encryption_level, &packet_);
for (const QuicFrame& frame : packet.nonretransmittable_frames) {
if (!AddFrame(frame, packet.transmission_type)) {
QUIC_BUG(quic_bug_10752_6)
<< ENDPOINT << "Failed to serialize frame: " << frame;
return 0;
}
}
for (const QuicFrame& frame : packet.retransmittable_frames) {
if (!AddFrame(frame, packet.transmission_type)) {
QUIC_BUG(quic_bug_10752_7)
<< ENDPOINT << "Failed to serialize frame: " << frame;
return 0;
}
}
if (padding_size > 0) {
QUIC_DVLOG(2) << ENDPOINT << "Add padding of size: " << padding_size;
if (!AddFrame(QuicFrame(QuicPaddingFrame(padding_size)),
packet.transmission_type)) {
QUIC_BUG(quic_bug_10752_8)
<< ENDPOINT << "Failed to add padding of size " << padding_size
<< " when serializing ENCRYPTION_INITIAL "
"packet in coalesced packet";
return 0;
}
}
if (!SerializePacket(QuicOwnedPacketBuffer(buffer, nullptr), buffer_len,
false)) {
return 0;
}
if (!packet.initial_header.has_value() ||
!packet_.initial_header.has_value()) {
QUIC_BUG(missing initial packet header)
<< "initial serialized packet does not have header populated";
} else if (*packet.initial_header != *packet_.initial_header) {
QUIC_BUG(initial packet header changed before reserialization)
<< ENDPOINT << "original header: " << *packet.initial_header
<< ", new header: " << *packet_.initial_header;
}
const size_t encrypted_length = packet_.encrypted_length;
packet_.retransmittable_frames.clear();
packet_.nonretransmittable_frames.clear();
ClearPacket();
return encrypted_length;
}
void QuicPacketCreator::CreateAndSerializeStreamFrame(
QuicStreamId id, size_t write_length, QuicStreamOffset iov_offset,
QuicStreamOffset stream_offset, bool fin,
TransmissionType transmission_type, size_t* num_bytes_consumed) {
QUICHE_DCHECK(queued_frames_.empty()) << ENDPOINT;
QUICHE_DCHECK(!QuicUtils::IsCryptoStreamId(transport_version(), id))
<< ENDPOINT;
QuicPacketHeader header;
FillPacketHeader(&header);
packet_.fate = delegate_->GetSerializedPacketFate(
false, packet_.encryption_level);
QUIC_DVLOG(1) << ENDPOINT << "fate of packet " << packet_.packet_number
<< ": " << SerializedPacketFateToString(packet_.fate) << " of "
<< EncryptionLevelToString(packet_.encryption_level);
ABSL_CACHELINE_ALIGNED char stack_buffer[kMaxOutgoingPacketSize];
QuicOwnedPacketBuffer packet_buffer(delegate_->GetPacketBuffer());
if (packet_buffer.buffer == nullptr) {
packet_buffer.buffer = stack_buffer;
packet_buffer.release_buffer = nullptr;
}
char* encrypted_buffer = packet_buffer.buffer;
QuicDataWriter writer(kMaxOutgoingPacketSize, encrypted_buffer);
size_t length_field_offset = 0;
if (!framer_->AppendIetfPacketHeader(header, &writer, &length_field_offset)) {
QUIC_BUG(quic_bug_10752_9) << ENDPOINT << "AppendPacketHeader failed";
return;
}
QUIC_BUG_IF(quic_bug_12398_9, iov_offset == write_length && !fin)
<< ENDPOINT << "Creating a stream frame with no data or fin.";
const size_t remaining_data_size = write_length - iov_offset;
size_t min_frame_size = QuicFramer::GetMinStreamFrameSize(
framer_->transport_version(), id, stream_offset,
true, remaining_data_size);
size_t available_size =
max_plaintext_size_ - writer.length() - min_frame_size;
size_t bytes_consumed = std::min<size_t>(available_size, remaining_data_size);
size_t plaintext_bytes_written = min_frame_size + bytes_consumed;
bool needs_padding = false;
const size_t min_plaintext_size =
MinPlaintextPacketSize(framer_->version(), GetPacketNumberLength());
if (plaintext_bytes_written < min_plaintext_size) {
needs_padding = true;
}
const bool set_fin = fin && (bytes_consumed == remaining_data_size);
QuicStreamFrame frame(id, set_fin, stream_offset, bytes_consumed);
if (debug_delegate_ != nullptr) {
debug_delegate_->OnFrameAddedToPacket(QuicFrame(frame));
}
QUIC_DVLOG(1) << ENDPOINT << "Adding frame: " << frame;
QUIC_DVLOG(2) << ENDPOINT << "Serializing stream packet " << header << frame;
if (needs_padding) {
if (!writer.WritePaddingBytes(min_plaintext_size -
plaintext_bytes_written)) {
QUIC_BUG(quic_bug_10752_12) << ENDPOINT << "Unable to add padding bytes";
return;
}
needs_padding = false;
}
bool omit_frame_length = !needs_padding;
if (!framer_->AppendTypeByte(QuicFrame(frame), omit_frame_length, &writer)) {
QUIC_BUG(quic_bug_10752_10) << ENDPOINT << "AppendTypeByte failed";
return;
}
if (!framer_->AppendStreamFrame(frame, omit_frame_length, &writer)) {
QUIC_BUG(quic_bug_10752_11) << ENDPOINT << "AppendStreamFrame failed";
return;
}
if (needs_padding && plaintext_bytes_written < min_plaintext_size &&
!writer.WritePaddingBytes(min_plaintext_size - plaintext_bytes_written)) {
QUIC_BUG(quic_bug_10752_12) << ENDPOINT << "Unable to add padding bytes";
return;
}
if (!framer_->WriteIetfLongHeaderLength(header, &writer, length_field_offset,
packet_.encryption_level)) {
return;
}
packet_.transmission_type = transmission_type;
QUICHE_DCHECK(packet_.encryption_level == ENCRYPTION_FORWARD_SECURE ||
packet_.encryption_level == ENCRYPTION_ZERO_RTT)
<< ENDPOINT << packet_.encryption_level;
size_t encrypted_length = framer_->EncryptInPlace(
packet_.encryption_level, packet_.packet_number,
GetStartOfEncryptedData(framer_->transport_version(), header),
writer.length(), kMaxOutgoingPacketSize, encrypted_buffer);
if (encrypted_length == 0) {
QUIC_BUG(quic_bug_10752_13)
<< ENDPOINT << "Failed to encrypt packet number "
<< header.packet_number;
return;
}
*num_bytes_consumed = bytes_consumed;
packet_size_ = 0;
packet_.encrypted_buffer = encrypted_buffer;
packet_.encrypted_length = encrypted_length;
packet_buffer.buffer = nullptr;
packet_.release_encrypted_buffer = std::move(packet_buffer).release_buffer;
packet_.retransmittable_frames.push_back(QuicFrame(frame));
OnSerializedPacket();
}
bool QuicPacketCreator::HasPendingFrames() const {
return !queued_frames_.empty();
}
std::string QuicPacketCreator::GetPendingFramesInfo() const {
return QuicFramesToString(queued_frames_);
}
bool QuicPacketCreator::HasPendingRetransmittableFrames() const {
return !packet_.retransmittable_frames.empty();
}
bool QuicPacketCreator::HasPendingStreamFramesOfStream(QuicStreamId id) const {
for (const auto& frame : packet_.retransmittable_frames) {
if (frame.type == STREAM_FRAME && frame.stream_frame.stream_id == id) {
return true;
}
}
return false;
}
size_t QuicPacketCreator::ExpansionOnNewFrame() const {
if (queued_frames_.empty()) {
return 0;
}
return ExpansionOnNewFrameWithLastFrame(queued_frames_.back(),
framer_->transport_version());
}
size_t QuicPacketCreator::ExpansionOnNewFrameWithLastFrame(
const QuicFrame& last_frame, QuicTransportVersion version) {
if (last_frame.type == MESSAGE_FRAME) {
return QuicDataWriter::GetVarInt62Len(
last_frame.message_frame->message_length);
}
if (last_frame.type != STREAM_FRAME) {
return 0;
}
if (VersionHasIetfQuicFrames(version)) {
return QuicDataWriter::GetVarInt62Len(last_frame.stream_frame.data_length);
}
return kQuicStreamPayloadLengthSize;
}
size_t QuicPacketCreator::BytesFree() const {
return max_plaintext_size_ -
std::min(max_plaintext_size_, PacketSize() + ExpansionOnNewFrame());
}
size_t QuicPacketCreator::BytesFreeForPadding() const {
size_t consumed = PacketSize();
return max_plaintext_size_ - std::min(max_plaintext_size_, consumed);
}
size_t QuicPacketCreator::PacketSize() const {
return queued_frames_.empty() ? PacketHeaderSize() : packet_size_;
}
bool QuicPacketCreator::AddPaddedSavedFrame(
const QuicFrame& frame, TransmissionType transmission_type) {
if (AddFrame(frame, transmission_type)) {
needs_full_padding_ = true;
return true;
}
return false;
}
std::optional<size_t>
QuicPacketCreator::MaybeBuildDataPacketWithChaosProtection(
const QuicPacketHeader& header, char* buffer) {
if (!GetQuicFlag(quic_enable_chaos_protection) ||
framer_->perspective() != Perspective::IS_CLIENT ||
packet_.encryption_level != ENCRYPTION_INITIAL ||
!framer_->version().UsesCryptoFrames() || queued_frames_.size() != 2u ||
queued_frames_[0].type != CRYPTO_FRAME ||
queued_frames_[1].type != PADDING_FRAME ||
queued_frames_[1].padding_frame.num_padding_bytes <= 0 ||
framer_->data_producer() == nullptr) {
return std::nullopt;
}
const QuicCryptoFrame& crypto_frame = *queued_frames_[0].crypto_frame;
if (packet_.encryption_level != crypto_frame.level) {
QUIC_BUG(chaos frame level)
<< ENDPOINT << packet_.encryption_level << " != " << crypto_frame.level;
return std::nullopt;
}
QuicChaosProtector chaos_protector(
crypto_frame, queued_frames_[1].padding_frame.num_padding_bytes,
packet_size_, framer_, random_);
return chaos_protector.BuildDataPacket(header, buffer);
}
bool QuicPacketCreator::SerializePacket(QuicOwnedPacketBuffer encrypted_buffer,
size_t encrypted_buffer_len,
bool allow_padding) {
if (packet_.encrypted_buffer != nullptr) {
const std::string error_details =
"Packet's encrypted buffer is not empty before serialization";
QUIC_BUG(quic_bug_10752_14) << ENDPOINT << error_details;
delegate_->OnUnrecoverableError(QUIC_FAILED_TO_SERIALIZE_PACKET,
error_details);
return false;
}
ScopedSerializationFailureHandler handler(this);
QUICHE_DCHECK_LT(0u, encrypted_buffer_len) << ENDPOINT;
QUIC_BUG_IF(quic_bug_12398_10,
queued_frames_.empty() && pending_padding_bytes_ == 0)
<< ENDPOINT << "Attempt to serialize empty packet";
QuicPacketHeader header;
FillPacketHeader(&header);
if (packet_.encryption_level == ENCRYPTION_INITIAL) {
packet_.initial_header = header;
}
if (delegate_ != nullptr) {
packet_.fate = delegate_->GetSerializedPacketFate(
QuicUtils::ContainsFrameType(queued_frames_,
MTU_DISCOVERY_FRAME),
packet_.encryption_level);
QUIC_DVLOG(1) << ENDPOINT << "fate of packet " << packet_.packet_number
<< ": " << SerializedPacketFateToString(packet_.fate)
<< " of "
<< EncryptionLevelToString(packet_.encryption_level);
}
if (allow_padding) {
MaybeAddPadding();
}
QUIC_DVLOG(2) << ENDPOINT << "Serializing packet " << header
<< QuicFramesToString(queued_frames_) << " at encryption_level "
<< packet_.encryption_level
<< ", allow_padding:" << allow_padding;
if (!framer_->HasEncrypterOfEncryptionLevel(packet_.encryption_level)) {
QUIC_BUG(quic_bug_10752_15)
<< ENDPOINT << "Attempting to serialize " << header
<< QuicFramesToString(queued_frames_) << " at missing encryption_level "
<< packet_.encryption_level << " using " << framer_->version();
return false;
}
QUICHE_DCHECK_GE(max_plaintext_size_, packet_size_) << ENDPOINT;
size_t length;
std::optional<size_t> length_with_chaos_protection =
MaybeBuildDataPacketWithChaosProtection(header, encrypted_buffer.buffer);
if (length_with_chaos_protection.has_value()) {
length = *length_with_chaos_protection;
} else {
length = framer_->BuildDataPacket(header, queued_frames_,
encrypted_buffer.buffer, packet_size_,
packet_.encryption_level);
}
if (length == 0) {
QUIC_BUG(quic_bug_10752_16)
<< ENDPOINT << "Failed to serialize "
<< QuicFramesToString(queued_frames_)
<< " at encryption_level: " << packet_.encryption_level
<< ", needs_full_padding_: " << needs_full_padding_
<< ", pending_padding_bytes_: " << pending_padding_bytes_
<< ", latched_hard_max_packet_length_: "
<< latched_hard_max_packet_length_
<< ", max_packet_length_: " << max_packet_length_
<< ", header: " << header;
return false;
}
bool possibly_truncated_by_length = packet_size_ == max_plaintext_size_ &&
queued_frames_.size() == 1 &&
queued_frames_.back().type == ACK_FRAME;
if (!possibly_truncated_by_length) {
QUICHE_DCHECK_EQ(packet_size_, length) << ENDPOINT;
}
const size_t encrypted_length = framer_->EncryptInPlace(
packet_.encryption_level, packet_.packet_number,
GetStartOfEncryptedData(framer_->transport_version(), header), length,
encrypted_buffer_len, encrypted_buffer.buffer);
if (encrypted_length == 0) {
QUIC_BUG(quic_bug_10752_17)
<< ENDPOINT << "Failed to encrypt packet number "
<< packet_.packet_number;
return false;
}
packet_size_ = 0;
packet_.encrypted_buffer = encrypted_buffer.buffer;
packet_.encrypted_length = encrypted_length;
encrypted_buffer.buffer = nullptr;
packet_.release_encrypted_buffer = std::move(encrypted_buffer).release_buffer;
return true;
}
std::unique_ptr<SerializedPacket>
QuicPacketCreator::SerializeConnectivityProbingPacket() {
QUIC_BUG_IF(quic_bug_12398_11,
VersionHasIetfQuicFrames(framer_->transport_version()))
<< ENDPOINT
<< "Must not be version 99 to serialize padded ping connectivity probe";
RemoveSoftMaxPacketLength();
QuicPacketHeader header;
FillPacketHeader(&header);
QUIC_DVLOG(2) << ENDPOINT << "Serializing connectivity probing packet "
<< header;
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
size_t length = BuildConnectivityProbingPacket(
header, buffer.get(), max_plaintext_size_, packet_.encryption_level);
QUICHE_DCHECK(length) << ENDPOINT;
QUICHE_DCHECK_EQ(packet_.encryption_level, ENCRYPTION_FORWARD_SECURE)
<< ENDPOINT;
const size_t encrypted_length = framer_->EncryptInPlace(
packet_.encryption_level, packet_.packet_number,
GetStartOfEncryptedData(framer_->transport_version(), header), length,
kMaxOutgoingPacketSize, buffer.get());
QUICHE_DCHECK(encrypted_length) << ENDPOINT;
std::unique_ptr<SerializedPacket> serialize_packet(new SerializedPacket(
header.packet_number, header.packet_number_length, buffer.release(),
encrypted_length, false, false));
serialize_packet->release_encrypted_buffer = [](const char* p) {
delete[] p;
};
serialize_packet->encryption_level = packet_.encryption_level;
serialize_packet->transmission_type = NOT_RETRANSMISSION;
return serialize_packet;
}
std::unique_ptr<SerializedPacket>
QuicPacketCreator::SerializePathChallengeConnectivityProbingPacket(
const QuicPathFrameBuffer& payload) {
QUIC_BUG_IF(quic_bug_12398_12,
!VersionHasIetfQuicFrames(framer_->transport_version()))
<< ENDPOINT
<< "Must be version 99 to serialize path challenge connectivity probe, "
"is version "
<< framer_->transport_version();
RemoveSoftMaxPacketLength();
QuicPacketHeader header;
FillPacketHeader(&header);
QUIC_DVLOG(2) << ENDPOINT << "Serializing path challenge packet " << header;
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
size_t length =
BuildPaddedPathChallengePacket(header, buffer.get(), max_plaintext_size_,
payload, packet_.encryption_level);
QUICHE_DCHECK(length) << ENDPOINT;
QUICHE_DCHECK_EQ(packet_.encryption_level, ENCRYPTION_FORWARD_SECURE)
<< ENDPOINT;
const size_t encrypted_length = framer_->EncryptInPlace(
packet_.encryption_level, packet_.packet_number,
GetStartOfEncryptedData(framer_->transport_version(), header), length,
kMaxOutgoingPacketSize, buffer.get());
QUICHE_DCHECK(encrypted_length) << ENDPOINT;
std::unique_ptr<SerializedPacket> serialize_packet(
new SerializedPacket(header.packet_number, header.packet_number_length,
buffer.release(), encrypted_length,
false, false));
serialize_packet->release_encrypted_buffer = [](const char* p) {
delete[] p;
};
serialize_packet->encryption_level = packet_.encryption_level;
serialize_packet->transmission_type = NOT_RETRANSMISSION;
return serialize_packet;
}
std::unique_ptr<SerializedPacket>
QuicPacketCreator::SerializePathResponseConnectivityProbingPacket(
const quiche::QuicheCircularDeque<QuicPathFrameBuffer>& payloads,
const bool is_padded) {
QUIC_BUG_IF(quic_bug_12398_13,
!VersionHasIetfQuicFrames(framer_->transport_version()))
<< ENDPOINT
<< "Must be version 99 to serialize path response connectivity probe, is "
"version "
<< framer_->transport_version();
RemoveSoftMaxPacketLength();
QuicPacketHeader header;
FillPacketHeader(&header);
QUIC_DVLOG(2) << ENDPOINT << "Serializing path response packet " << header;
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
size_t length =
BuildPathResponsePacket(header, buffer.get(), max_plaintext_size_,
payloads, is_padded, packet_.encryption_level);
QUICHE_DCHECK(length) << ENDPOINT;
QUICHE_DCHECK_EQ(packet_.encryption_level, ENCRYPTION_FORWARD_SECURE)
<< ENDPOINT;
const size_t encrypted_length = framer_->EncryptInPlace(
packet_.encryption_level, packet_.packet_number,
GetStartOfEncryptedData(framer_->transport_version(), header), length,
kMaxOutgoingPacketSize, buffer.get());
QUICHE_DCHECK(encrypted_length) << ENDPOINT;
std::unique_ptr<SerializedPacket> serialize_packet(
new SerializedPacket(header.packet_number, header.packet_number_length,
buffer.release(), encrypted_length,
false, false));
serialize_packet->release_encrypted_buffer = [](const char* p) {
delete[] p;
};
serialize_packet->encryption_level = packet_.encryption_level;
serialize_packet->transmission_type = NOT_RETRANSMISSION;
return serialize_packet;
}
std::unique_ptr<SerializedPacket>
QuicPacketCreator::SerializeLargePacketNumberConnectionClosePacket(
QuicPacketNumber largest_acked_packet, QuicErrorCode error,
const std::string& error_details) {
QUICHE_DCHECK_EQ(packet_.encryption_level, ENCRYPTION_FORWARD_SECURE)
<< ENDPOINT;
const QuicPacketNumber largest_packet_number(
(largest_acked_packet.IsInitialized()
? largest_acked_packet
: framer_->first_sending_packet_number()) +
(1L << 31));
ScopedPacketContextSwitcher switcher(largest_packet_number,
PACKET_4BYTE_PACKET_NUMBER,
ENCRYPTION_FORWARD_SECURE, &packet_);
QuicPacketHeader header;
FillPacketHeader(&header);
QUIC_DVLOG(2) << ENDPOINT << "Serializing connection close packet " << header;
QuicFrames frames;
QuicConnectionCloseFrame close_frame(transport_version(), error,
NO_IETF_QUIC_ERROR, error_details, 0);
frames.push_back(QuicFrame(&close_frame));
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
const size_t length =
framer_->BuildDataPacket(header, frames, buffer.get(),
max_plaintext_size_, packet_.encryption_level);
QUICHE_DCHECK(length) << ENDPOINT;
const size_t encrypted_length = framer_->EncryptInPlace(
packet_.encryption_level, packet_.packet_number,
GetStartOfEncryptedData(framer_->transport_version(), header), length,
kMaxOutgoingPacketSize, buffer.get());
QUICHE_DCHECK(encrypted_length) << ENDPOINT;
std::unique_ptr<SerializedPacket> serialize_packet(
new SerializedPacket(header.packet_number, header.packet_number_length,
buffer.release(), encrypted_length,
false, false));
serialize_packet->release_encrypted_buffer = [](const char* p) {
delete[] p;
};
serialize_packet->encryption_level = packet_.encryption_level;
serialize_packet->transmission_type = NOT_RETRANSMISSION;
return serialize_packet;
}
size_t QuicPacketCreator::BuildPaddedPathChallengePacket(
const QuicPacketHeader& header, char* buffer, size_t packet_length,
const QuicPathFrameBuffer& payload, EncryptionLevel level) {
QUICHE_DCHECK(VersionHasIetfQuicFrames(framer_->transport_version()))
<< ENDPOINT;
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, payload)));
if (debug_delegate_ != nullptr) {
debug_delegate_->OnFrameAddedToPacket(frames.back());
}
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(padding_frame));
return framer_->BuildDataPacket(header, frames, buffer, packet_length, level);
}
size_t QuicPacketCreator::BuildPathResponsePacket(
const QuicPacketHeader& header, char* buffer, size_t packet_length,
const quiche::QuicheCircularDeque<QuicPathFrameBuffer>& payloads,
const bool is_padded, EncryptionLevel level) {
if (payloads.empty()) {
QUIC_BUG(quic_bug_12398_14)
<< ENDPOINT
<< "Attempt to generate connectivity response with no request payloads";
return 0;
}
QUICHE_DCHECK(VersionHasIetfQuicFrames(framer_->transport_version()))
<< ENDPOINT;
QuicFrames frames;
for (const QuicPathFrameBuffer& payload : payloads) {
frames.push_back(QuicFrame(QuicPathResponseFrame(0, payload)));
if (debug_delegate_ != nullptr) {
debug_delegate_->OnFrameAddedToPacket(frames.back());
}
}
if (is_padded) {
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(padding_frame));
}
return framer_->BuildDataPacket(header, frames, buffer, packet_length, level);
}
size_t QuicPacketCreator::BuildConnectivityProbingPacket(
const QuicPacketHeader& header, char* buffer, size_t packet_length,
EncryptionLevel level) {
QuicFrames frames;
QuicPingFrame ping_frame;
frames.push_back(QuicFrame(ping_frame));
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(padding_frame));
return framer_->BuildDataPacket(header, frames, buffer, packet_length, level);
}
size_t QuicPacketCreator::SerializeCoalescedPacket(
const QuicCoalescedPacket& coalesced, char* buffer, size_t buffer_len) {
if (HasPendingFrames()) {
QUIC_BUG(quic_bug_10752_18)
<< ENDPOINT << "Try to serialize coalesced packet with pending frames";
return 0;
}
RemoveSoftMaxPacketLength();
QUIC_BUG_IF(quic_bug_12398_15, coalesced.length() == 0)
<< ENDPOINT << "Attempt to serialize empty coalesced packet";
size_t packet_length = 0;
size_t initial_length = 0;
size_t padding_size = 0;
if (coalesced.initial_packet() != nullptr) {
padding_size = coalesced.max_packet_length() - coalesced.length();
if (framer_->perspective() == Perspective::IS_SERVER &&
QuicUtils::ContainsFrameType(
coalesced.initial_packet()->retransmittable_frames,
CONNECTION_CLOSE_FRAME)) {
padding_size = 0;
}
initial_length = ReserializeInitialPacketInCoalescedPacket(
*coalesced.initial_packet(), padding_size, buffer, buffer_len);
if (initial_length == 0) {
QUIC_BUG(quic_bug_10752_19)
<< ENDPOINT
<< "Failed to reserialize ENCRYPTION_INITIAL packet in "
"coalesced packet";
return 0;
}
QUIC_BUG_IF(quic_reserialize_initial_packet_unexpected_size,
coalesced.initial_packet()->encrypted_length + padding_size !=
initial_length)
<< "Reserialize initial packet in coalescer has unexpected size, "
"original_length: "
<< coalesced.initial_packet()->encrypted_length
<< ", coalesced.max_packet_length: " << coalesced.max_packet_length()
<< ", coalesced.length: " << coalesced.length()
<< ", padding_size: " << padding_size
<< ", serialized_length: " << initial_length
<< ", retransmittable frames: "
<< QuicFramesToString(
coalesced.initial_packet()->retransmittable_frames)
<< ", nonretransmittable frames: "
<< QuicFramesToString(
coalesced.initial_packet()->nonretransmittable_frames);
buffer += initial_length;
buffer_len -= initial_length;
packet_length += initial_length;
}
size_t length_copied = 0;
if (!coalesced.CopyEncryptedBuffers(buffer, buffer_len, &length_copied)) {
QUIC_BUG(quic_serialize_coalesced_packet_copy_failure)
<< "SerializeCoalescedPacket failed. buffer_len:" << buffer_len
<< ", initial_length:" << initial_length
<< ", padding_size: " << padding_size
<< ", length_copied:" << length_copied
<< ", coalesced.length:" << coalesced.length()
<< ", coalesced.max_packet_length:" << coalesced.max_packet_length()
<< ", coalesced.packet_lengths:"
<< absl::StrJoin(coalesced.packet_lengths(), ":");
return 0;
}
packet_length += length_copied;
QUIC_DVLOG(1) << ENDPOINT
<< "Successfully serialized coalesced packet of length: "
<< packet_length;
return packet_length;
}
SerializedPacket QuicPacketCreator::NoPacket() {
return SerializedPacket(QuicPacketNumber(), PACKET_1BYTE_PACKET_NUMBER,
nullptr, 0, false, false);
}
QuicConnectionId QuicPacketCreator::GetDestinationConnectionId() const {
if (framer_->perspective() == Perspective::IS_SERVER) {
return client_connection_id_;
}
return server_connection_id_;
}
QuicConnectionId QuicPacketCreator::GetSourceConnectionId() const {
if (framer_->perspective() == Perspective::IS_CLIENT) {
return client_connection_id_;
}
return server_connection_id_;
}
QuicConnectionIdIncluded QuicPacketCreator::GetDestinationConnectionIdIncluded()
const {
return (framer_->perspective() == Perspective::IS_CLIENT ||
framer_->version().SupportsClientConnectionIds())
? CONNECTION_ID_PRESENT
: CONNECTION_ID_ABSENT;
}
QuicConnectionIdIncluded QuicPacketCreator::GetSourceConnectionIdIncluded()
const {
if (HasIetfLongHeader() &&
(framer_->perspective() == Perspective::IS_SERVER ||
framer_->version().SupportsClientConnectionIds())) {
return CONNECTION_ID_PRESENT;
}
if (framer_->perspective() == Perspective::IS_SERVER) {
return server_connection_id_included_;
}
return CONNECTION_ID_ABSENT;
}
uint8_t QuicPacketCreator::GetDestinationConnectionIdLength() const {
QUICHE_DCHECK(QuicUtils::IsConnectionIdValidForVersion(server_connection_id_,
transport_version()))
<< ENDPOINT;
return GetDestinationConnectionIdIncluded() == CONNECTION_ID_PRESENT
? GetDestinationConnectionId().length()
: 0;
}
uint8_t QuicPacketCreator::GetSourceConnectionIdLength() const {
QUICHE_DCHECK(QuicUtils::IsConnectionIdValidForVersion(server_connection_id_,
transport_version()))
<< ENDPOINT;
return GetSourceConnectionIdIncluded() == CONNECTION_ID_PRESENT
? GetSourceConnectionId().length()
: 0;
}
QuicPacketNumberLength QuicPacketCreator::GetPacketNumberLength() const {
if (HasIetfLongHeader() &&
!framer_->version().SendsVariableLengthPacketNumberInLongHeader()) {
return PACKET_4BYTE_PACKET_NUMBER;
}
return packet_.packet_number_length;
}
size_t QuicPacketCreator::PacketHeaderSize() const {
return GetPacketHeaderSize(
framer_->transport_version(), GetDestinationConnectionIdLength(),
GetSourceConnectionIdLength(), IncludeVersionInHeader(),
IncludeNonceInPublicHeader(), GetPacketNumberLength(),
GetRetryTokenLengthLength(), GetRetryToken().length(), GetLengthLength());
}
quiche::QuicheVariableLengthIntegerLength
QuicPacketCreator::GetRetryTokenLengthLength() const {
if (QuicVersionHasLongHeaderLengths(framer_->transport_version()) &&
HasIetfLongHeader() &&
EncryptionlevelToLongHeaderType(packet_.encryption_level) == INITIAL) {
return QuicDataWriter::GetVarInt62Len(GetRetryToken().length());
}
return quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0;
}
absl::string_view QuicPacketCreator::GetRetryToken() const {
if (QuicVersionHasLongHeaderLengths(framer_->transport_version()) &&
HasIetfLongHeader() &&
EncryptionlevelToLongHeaderType(packet_.encryption_level) == INITIAL) {
return retry_token_;
}
return absl::string_view();
}
void QuicPacketCreator::SetRetryToken(absl::string_view retry_token) {
retry_token_ = std::string(retry_token);
}
bool QuicPacketCreator::ConsumeRetransmittableControlFrame(
const QuicFrame& frame) {
QUIC_BUG_IF(quic_bug_12398_16, IsControlFrame(frame.type) &&
!GetControlFrameId(frame) &&
frame.type != PING_FRAME)
<< ENDPOINT
<< "Adding a control frame with no control frame id: " << frame;
QUICHE_DCHECK(QuicUtils::IsRetransmittableFrame(frame.type))
<< ENDPOINT << frame;
MaybeBundleOpportunistically();
if (HasPendingFrames()) {
if (AddFrame(frame, next_transmission_type_)) {
return true;
}
}
QUICHE_DCHECK(!HasPendingFrames()) << ENDPOINT;
if (frame.type != PING_FRAME && frame.type != CONNECTION_CLOSE_FRAME &&
!delegate_->ShouldGeneratePacket(HAS_RETRANSMITTABLE_DATA,
NOT_HANDSHAKE)) {
return false;
}
const bool success = AddFrame(frame, next_transmission_type_);
QUIC_BUG_IF(quic_bug_10752_20, !success)
<< ENDPOINT << "Failed to add frame:" << frame
<< " transmission_type:" << next_transmission_type_;
return success;
}
void QuicPacketCreator::MaybeBundleOpportunistically() {
const TransmissionType next_transmission_type = next_transmission_type_;
delegate_->MaybeBundleOpportunistically(next_transmission_type_);
next_transmission_type_ = next_transmission_type;
}
QuicConsumedData QuicPacketCreator::ConsumeData(QuicStreamId id,
size_t write_length,
QuicStreamOffset offset,
StreamSendingState state) {
QUIC_BUG_IF(quic_bug_10752_21, !flusher_attached_)
<< ENDPOINT
<< "Packet flusher is not attached when "
"generator tries to write stream data.";
bool has_handshake = QuicUtils::IsCryptoStreamId(transport_version(), id);
const TransmissionType next_transmission_type = next_transmission_type_;
MaybeBundleOpportunistically();
const size_t original_write_length = write_length;
if (next_transmission_type_ == NOT_RETRANSMISSION) {
if (QuicByteCount send_window = delegate_->GetFlowControlSendWindowSize(id);
write_length > send_window) {
QUIC_DLOG(INFO) << ENDPOINT
<< "After bundled data, reducing (old) write_length:"
<< write_length << "to (new) send_window:" << send_window;
write_length = send_window;
state = NO_FIN;
}
}
bool fin = state != NO_FIN;
QUIC_BUG_IF(quic_bug_12398_17, has_handshake && fin)
<< ENDPOINT << "Handshake packets should never send a fin";
if (has_handshake && HasPendingRetransmittableFrames()) {
FlushCurrentPacket();
}
size_t total_bytes_consumed = 0;
bool fin_consumed = false;
if (!HasRoomForStreamFrame(id, offset, write_length)) {
FlushCurrentPacket();
}
if (!fin && (write_length == 0)) {
QUIC_BUG_IF(quic_bug_10752_22, original_write_length == 0)
<< ENDPOINT
<< "Attempt to consume empty data without FIN. old transmission type:"
<< next_transmission_type
<< ", new transmission type:" << next_transmission_type_;
return QuicConsumedData(0, false);
}
bool run_fast_path =
!has_handshake && state != FIN_AND_PADDING && !HasPendingFrames() &&
write_length - total_bytes_consumed > kMaxOutgoingPacketSize &&
latched_hard_max_packet_length_ == 0;
while (!run_fast_path &&
(has_handshake || delegate_->ShouldGeneratePacket(
HAS_RETRANSMITTABLE_DATA, NOT_HANDSHAKE))) {
QuicFrame frame;
bool needs_full_padding =
has_handshake && fully_pad_crypto_handshake_packets_;
if (!ConsumeDataToFillCurrentPacket(id, write_length - total_bytes_consumed,
offset + total_bytes_consumed, fin,
needs_full_padding,
next_transmission_type_, &frame)) {
QUIC_BUG(quic_bug_10752_23)
<< ENDPOINT << "Failed to ConsumeData, stream:" << id;
return QuicConsumedData(0, false);
}
size_t bytes_consumed = frame.stream_frame.data_length;
total_bytes_consumed += bytes_consumed;
fin_consumed = fin && total_bytes_consumed == write_length;
if (fin_consumed && state == FIN_AND_PADDING) {
AddRandomPadding();
}
QUICHE_DCHECK(total_bytes_consumed == write_length ||
(bytes_consumed > 0 && HasPendingFrames()))
<< ENDPOINT;
if (total_bytes_consumed == write_length) {
break;
}
FlushCurrentPacket();
run_fast_path =
!has_handshake && state != FIN_AND_PADDING && !HasPendingFrames() &&
write_length - total_bytes_consumed > kMaxOutgoingPacketSize &&
latched_hard_max_packet_length_ == 0;
}
if (run_fast_path) {
return ConsumeDataFastPath(id, write_length, offset, state != NO_FIN,
total_bytes_consumed);
}
if (has_handshake) {
FlushCurrentPacket();
}
return QuicConsumedData(total_bytes_consumed, fin_consumed);
}
QuicConsumedData QuicPacketCreator::ConsumeDataFastPath(
QuicStreamId id, size_t write_length, QuicStreamOffset offset, bool fin,
size_t total_bytes_consumed) {
QUICHE_DCHECK(!QuicUtils::IsCryptoStreamId(transport_version(), id))
<< ENDPOINT;
if (AttemptingToSendUnencryptedStreamData()) {
return QuicConsumedData(total_bytes_consumed,
fin && (total_bytes_consumed == write_length));
}
while (total_bytes_consumed < write_length &&
delegate_->ShouldGeneratePacket(HAS_RETRANSMITTABLE_DATA,
NOT_HANDSHAKE)) {
size_t bytes_consumed = 0;
CreateAndSerializeStreamFrame(id, write_length, total_bytes_consumed,
offset + total_bytes_consumed, fin,
next_transmission_type_, &bytes_consumed);
if (bytes_consumed == 0) {
const std::string error_details =
"Failed in CreateAndSerializeStreamFrame.";
QUIC_BUG(quic_bug_10752_24) << ENDPOINT << error_details;
delegate_->OnUnrecoverableError(QUIC_FAILED_TO_SERIALIZE_PACKET,
error_details);
break;
}
total_bytes_consumed += bytes_consumed;
}
return QuicConsumedData(total_bytes_consumed,
fin && (total_bytes_consumed == write_length));
}
size_t QuicPacketCreator::ConsumeCryptoData(EncryptionLevel level,
size_t write_length,
QuicStreamOffset offset) {
QUIC_DVLOG(2) << ENDPOINT << "ConsumeCryptoData " << level << " write_length "
<< write_length << " offset " << offset;
QUIC_BUG_IF(quic_bug_10752_25, !flusher_attached_)
<< ENDPOINT
<< "Packet flusher is not attached when "
"generator tries to write crypto data.";
MaybeBundleOpportunistically();
if (HasPendingRetransmittableFrames()) {
FlushCurrentPacket();
}
size_t total_bytes_consumed = 0;
while (
total_bytes_consumed < write_length &&
delegate_->ShouldGeneratePacket(HAS_RETRANSMITTABLE_DATA, IS_HANDSHAKE)) {
QuicFrame frame;
if (!ConsumeCryptoDataToFillCurrentPacket(
level, write_length - total_bytes_consumed,
offset + total_bytes_consumed, fully_pad_crypto_handshake_packets_,
next_transmission_type_, &frame)) {
QUIC_BUG_IF(quic_bug_10752_26, !HasSoftMaxPacketLength()) << absl::StrCat(
ENDPOINT, "Failed to ConsumeCryptoData at level ", level,
", pending_frames: ", GetPendingFramesInfo(),
", has_soft_max_packet_length: ", HasSoftMaxPacketLength(),
", max_packet_length: ", max_packet_length_, ", transmission_type: ",
TransmissionTypeToString(next_transmission_type_),
", packet_number: ", packet_number().ToString());
return 0;
}
total_bytes_consumed += frame.crypto_frame->data_length;
FlushCurrentPacket();
}
FlushCurrentPacket();
return total_bytes_consumed;
}
void QuicPacketCreator::GenerateMtuDiscoveryPacket(QuicByteCount target_mtu) {
if (!CanSetMaxPacketLength()) {
QUIC_BUG(quic_bug_10752_27)
<< ENDPOINT
<< "MTU discovery packets should only be sent when no other "
<< "frames needs to be sent.";
return;
}
const QuicByteCount current_mtu = max_packet_length();
QuicMtuDiscoveryFrame mtu_discovery_frame;
QuicFrame frame(mtu_discovery_frame);
SetMaxPacketLength(target_mtu);
const bool success = AddPaddedSavedFrame(frame, next_transmission_type_);
FlushCurrentPacket();
QUIC_BUG_IF(quic_bug_10752_28, !success)
<< ENDPOINT << "Failed to send path MTU target_mtu:" << target_mtu
<< " transmission_type:" << next_transmission_type_;
SetMaxPacketLength(current_mtu);
}
bool QuicPacketCreator::FlushAckFrame(const QuicFrames& frames) {
QUIC_BUG_IF(quic_bug_10752_30, !flusher_attached_)
<< ENDPOINT
<< "Packet flusher is not attached when "
"generator tries to send ACK frame.";
QUIC_BUG_IF(quic_bug_12398_18, !frames.empty() && has_ack())
<< ENDPOINT << "Trying to flush " << quiche::PrintElements(frames)
<< " when there is ACK queued";
for (const auto& frame : frames) {
QUICHE_DCHECK(frame.type == ACK_FRAME || frame.type == STOP_WAITING_FRAME)
<< ENDPOINT;
if (HasPendingFrames()) {
if (AddFrame(frame, next_transmission_type_)) {
continue;
}
}
QUICHE_DCHECK(!HasPendingFrames()) << ENDPOINT;
if (!delegate_->ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA,
NOT_HANDSHAKE)) {
return false;
}
const bool success = AddFrame(frame, next_transmission_type_);
QUIC_BUG_IF(quic_bug_10752_31, !success)
<< ENDPOINT << "Failed to flush " << frame;
}
return true;
}
void QuicPacketCreator::AddRandomPadding() {
AddPendingPadding(random_->RandUint64() % kMaxNumRandomPaddingBytes + 1);
}
void QuicPacketCreator::AttachPacketFlusher() {
flusher_attached_ = true;
if (!write_start_packet_number_.IsInitialized()) {
write_start_packet_number_ = NextSendingPacketNumber();
}
}
void QuicPacketCreator::Flush() {
FlushCurrentPacket();
SendRemainingPendingPadding();
flusher_attached_ = false;
if (GetQuicFlag(quic_export_write_path_stats_at_server)) {
if (!write_start_packet_number_.IsInitialized()) {
QUIC_BUG(quic_bug_10752_32)
<< ENDPOINT << "write_start_packet_number is not initialized";
return;
}
QUIC_SERVER_HISTOGRAM_COUNTS(
"quic_server_num_written_packets_per_write",
NextSendingPacketNumber() - write_start_packet_number_, 1, 200, 50,
"Number of QUIC packets written per write operation");
}
write_start_packet_number_.Clear();
}
void QuicPacketCreator::SendRemainingPendingPadding() {
while (
pending_padding_bytes() > 0 && !HasPendingFrames() &&
delegate_->ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA, NOT_HANDSHAKE)) {
FlushCurrentPacket();
}
}
void QuicPacketCreator::SetServerConnectionIdLength(uint32_t length) {
if (length == 0) {
SetServerConnectionIdIncluded(CONNECTION_ID_ABSENT);
} else {
SetServerConnectionIdIncluded(CONNECTION_ID_PRESENT);
}
}
void QuicPacketCreator::SetTransmissionType(TransmissionType type) {
next_transmission_type_ = type;
}
MessageStatus QuicPacketCreator::AddMessageFrame(
QuicMessageId message_id, absl::Span<quiche::QuicheMemSlice> message) {
QUIC_BUG_IF(quic_bug_10752_33, !flusher_attached_)
<< ENDPOINT
<< "Packet flusher is not attached when "
"generator tries to add message frame.";
MaybeBundleOpportunistically();
const QuicByteCount message_length = MemSliceSpanTotalSize(message);
if (message_length > GetCurrentLargestMessagePayload()) {
return MESSAGE_STATUS_TOO_LARGE;
}
if (!HasRoomForMessageFrame(message_length)) {
FlushCurrentPacket();
}
QuicMessageFrame* frame = new QuicMessageFrame(message_id, message);
const bool success = AddFrame(QuicFrame(frame), next_transmission_type_);
if (!success) {
QUIC_BUG(quic_bug_10752_34)
<< ENDPOINT << "Failed to send message " << message_id;
delete frame;
return MESSAGE_STATUS_INTERNAL_ERROR;
}
QUICHE_DCHECK_EQ(MemSliceSpanTotalSize(message),
0u);
return MESSAGE_STATUS_SUCCESS;
}
quiche::QuicheVariableLengthIntegerLength QuicPacketCreator::GetLengthLength()
const {
if (QuicVersionHasLongHeaderLengths(framer_->transport_version()) &&
HasIetfLongHeader()) {
QuicLongHeaderType long_header_type =
EncryptionlevelToLongHeaderType(packet_.encryption_level);
if (long_header_type == INITIAL || long_header_type == ZERO_RTT_PROTECTED ||
long_header_type == HANDSHAKE) {
return quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
}
return quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0;
}
void QuicPacketCreator::FillPacketHeader(QuicPacketHeader* header) {
header->destination_connection_id = GetDestinationConnectionId();
header->destination_connection_id_included =
GetDestinationConnectionIdIncluded();
header->source_connection_id = GetSourceConnectionId();
header->source_connection_id_included = GetSourceConnectionIdIncluded();
header->reset_flag = false;
header->version_flag = IncludeVersionInHeader();
if (IncludeNonceInPublicHeader()) {
QUICHE_DCHECK_EQ(Perspective::IS_SERVER, framer_->perspective())
<< ENDPOINT;
header->nonce = &diversification_nonce_;
} else {
header->nonce = nullptr;
}
packet_.packet_number = NextSendingPacketNumber();
header->packet_number = packet_.packet_number;
header->packet_number_length = GetPacketNumberLength();
header->retry_token_length_length = GetRetryTokenLengthLength();
header->retry_token = GetRetryToken();
header->length_length = GetLengthLength();
header->remaining_packet_length = 0;
if (!HasIetfLongHeader()) {
return;
}
header->long_packet_type =
EncryptionlevelToLongHeaderType(packet_.encryption_level);
}
size_t QuicPacketCreator::GetSerializedFrameLength(const QuicFrame& frame) {
size_t serialized_frame_length = framer_->GetSerializedFrameLength(
frame, BytesFree(), queued_frames_.empty(),
true, GetPacketNumberLength());
if (!framer_->version().HasHeaderProtection() ||
serialized_frame_length == 0) {
return serialized_frame_length;
}
const size_t frame_bytes = PacketSize() - PacketHeaderSize() +
ExpansionOnNewFrame() + serialized_frame_length;
if (frame_bytes >=
MinPlaintextPacketSize(framer_->version(), GetPacketNumberLength())) {
return serialized_frame_length;
}
if (BytesFree() < serialized_frame_length) {
QUIC_BUG(quic_bug_10752_35) << ENDPOINT << "Frame does not fit: " << frame;
return 0;
}
size_t bytes_free = BytesFree() - serialized_frame_length;
const size_t extra_bytes_needed = std::max(
1 + ExpansionOnNewFrameWithLastFrame(frame, framer_->transport_version()),
MinPlaintextPacketSize(framer_->version(), GetPacketNumberLength()) -
frame_bytes);
if (bytes_free < extra_bytes_needed) {
return 0;
}
return serialized_frame_length;
}
bool QuicPacketCreator::AddFrame(const QuicFrame& frame,
TransmissionType transmission_type) {
QUIC_DVLOG(1) << ENDPOINT << "Adding frame with transmission type "
<< transmission_type << ": " << frame;
if (frame.type == STREAM_FRAME &&
!QuicUtils::IsCryptoStreamId(framer_->transport_version(),
frame.stream_frame.stream_id) &&
AttemptingToSendUnencryptedStreamData()) {
return false;
}
QUICHE_DCHECK(
packet_.encryption_level == ENCRYPTION_ZERO_RTT ||
packet_.encryption_level == ENCRYPTION_FORWARD_SECURE ||
(frame.type != GOAWAY_FRAME && frame.type != WINDOW_UPDATE_FRAME &&
frame.type != HANDSHAKE_DONE_FRAME &&
frame.type != NEW_CONNECTION_ID_FRAME &&
frame.type != MAX_STREAMS_FRAME && frame.type != STREAMS_BLOCKED_FRAME &&
frame.type != PATH_RESPONSE_FRAME &&
frame.type != PATH_CHALLENGE_FRAME && frame.type != STOP_SENDING_FRAME &&
frame.type != MESSAGE_FRAME && frame.type != NEW_TOKEN_FRAME &&
frame.type != RETIRE_CONNECTION_ID_FRAME &&
frame.type != ACK_FREQUENCY_FRAME))
<< ENDPOINT << frame.type << " not allowed at "
<< packet_.encryption_level;
if (frame.type == STREAM_FRAME) {
if (MaybeCoalesceStreamFrame(frame.stream_frame)) {
LogCoalesceStreamFrameStatus(true);
return true;
} else {
LogCoalesceStreamFrameStatus(false);
}
}
QUICHE_DCHECK(frame.type != ACK_FRAME || (!frame.ack_frame->packets.Empty() &&
frame.ack_frame->packets.Max() ==
frame.ack_frame->largest_acked))
<< ENDPOINT << "Invalid ACK frame: " << frame;
size_t frame_len = GetSerializedFrameLength(frame);
if (frame_len == 0 && RemoveSoftMaxPacketLength()) {
frame_len = GetSerializedFrameLength(frame);
}
if (frame_len == 0) {
QUIC_DVLOG(1) << ENDPOINT
<< "Flushing because current open packet is full when adding "
<< frame;
FlushCurrentPacket();
return false;
}
if (queued_frames_.empty()) {
packet_size_ = PacketHeaderSize();
}
QUICHE_DCHECK_LT(0u, packet_size_) << ENDPOINT;
packet_size_ += ExpansionOnNewFrame() + frame_len;
if (QuicUtils::IsRetransmittableFrame(frame.type)) {
packet_.retransmittable_frames.push_back(frame);
queued_frames_.push_back(frame);
if (QuicUtils::IsHandshakeFrame(frame, framer_->transport_version())) {
packet_.has_crypto_handshake = IS_HANDSHAKE;
}
} else {
if (frame.type == PADDING_FRAME &&
frame.padding_frame.num_padding_bytes == -1) {
packet_.nonretransmittable_frames.push_back(
QuicFrame(QuicPaddingFrame(frame_len)));
} else {
packet_.nonretransmittable_frames.push_back(frame);
}
queued_frames_.push_back(frame);
}
if (frame.type == ACK_FRAME) {
packet_.has_ack = true;
packet_.largest_acked = LargestAcked(*frame.ack_frame);
if (frame.ack_frame->ecn_counters.has_value()) {
packet_.has_ack_ecn = true;
}
} else if (frame.type == STOP_WAITING_FRAME) {
packet_.has_stop_waiting = true;
} else if (frame.type == ACK_FREQUENCY_FRAME) {
packet_.has_ack_frequency = true;
} else if (frame.type == MESSAGE_FRAME) {
packet_.has_message = true;
}
if (debug_delegate_ != nullptr) {
debug_delegate_->OnFrameAddedToPacket(frame);
}
if (transmission_type == NOT_RETRANSMISSION) {
packet_.bytes_not_retransmitted.emplace(
packet_.bytes_not_retransmitted.value_or(0) + frame_len);
} else if (QuicUtils::IsRetransmittableFrame(frame.type)) {
packet_.transmission_type = transmission_type;
}
return true;
}
void QuicPacketCreator::MaybeAddExtraPaddingForHeaderProtection() {
if (!framer_->version().HasHeaderProtection() || needs_full_padding_) {
return;
}
const size_t frame_bytes = PacketSize() - PacketHeaderSize();
if (frame_bytes >=
MinPlaintextPacketSize(framer_->version(), GetPacketNumberLength())) {
return;
}
QuicByteCount min_header_protection_padding =
MinPlaintextPacketSize(framer_->version(), GetPacketNumberLength()) -
frame_bytes;
pending_padding_bytes_ =
std::max(pending_padding_bytes_, min_header_protection_padding);
}
bool QuicPacketCreator::MaybeCoalesceStreamFrame(const QuicStreamFrame& frame) {
if (queued_frames_.empty() || queued_frames_.back().type != STREAM_FRAME) {
return false;
}
QuicStreamFrame* candidate = &queued_frames_.back().stream_frame;
if (candidate->stream_id != frame.stream_id ||
candidate->offset + candidate->data_length != frame.offset ||
frame.data_length > BytesFree()) {
return false;
}
candidate->data_length += frame.data_length;
candidate->fin = frame.fin;
QUICHE_DCHECK_EQ(packet_.retransmittable_frames.back().type, STREAM_FRAME)
<< ENDPOINT;
QuicStreamFrame* retransmittable =
&packet_.retransmittable_frames.back().stream_frame;
QUICHE_DCHECK_EQ(retransmittable->stream_id, frame.stream_id) << ENDPOINT;
QUICHE_DCHECK_EQ(retransmittable->offset + retransmittable->data_length,
frame.offset)
<< ENDPOINT;
retransmittable->data_length = candidate->data_length;
retransmittable->fin = candidate->fin;
packet_size_ += frame.data_length;
if (debug_delegate_ != nullptr) {
debug_delegate_->OnStreamFrameCoalesced(*candidate);
}
return true;
}
bool QuicPacketCreator::RemoveSoftMaxPacketLength() {
if (latched_hard_max_packet_length_ == 0) {
return false;
}
if (!CanSetMaxPacketLength()) {
return false;
}
QUIC_DVLOG(1) << ENDPOINT << "Restoring max packet length to: "
<< latched_hard_max_packet_length_;
SetMaxPacketLength(latched_hard_max_packet_length_);
latched_hard_max_packet_length_ = 0;
return true;
}
void QuicPacketCreator::MaybeAddPadding() {
if (BytesFreeForPadding() == 0) {
return;
}
if (packet_.fate == COALESCE) {
needs_full_padding_ = false;
}
MaybeAddExtraPaddingForHeaderProtection();
QUIC_DVLOG(3) << "MaybeAddPadding for " << packet_.packet_number
<< ": transmission_type:" << packet_.transmission_type
<< ", fate:" << packet_.fate
<< ", needs_full_padding_:" << needs_full_padding_
<< ", pending_padding_bytes_:" << pending_padding_bytes_
<< ", BytesFree:" << BytesFree();
if (!needs_full_padding_ && pending_padding_bytes_ == 0) {
return;
}
int padding_bytes = -1;
if (!needs_full_padding_) {
padding_bytes =
std::min<int16_t>(pending_padding_bytes_, BytesFreeForPadding());
pending_padding_bytes_ -= padding_bytes;
}
if (!queued_frames_.empty()) {
if (needs_full_padding_) {
padding_bytes = BytesFreeForPadding();
}
QuicFrame frame{QuicPaddingFrame(padding_bytes)};
queued_frames_.insert(queued_frames_.begin(), frame);
packet_size_ += padding_bytes;
packet_.nonretransmittable_frames.push_back(frame);
if (packet_.transmission_type == NOT_RETRANSMISSION) {
packet_.bytes_not_retransmitted.emplace(
packet_.bytes_not_retransmitted.value_or(0) + padding_bytes);
}
} else {
bool success = AddFrame(QuicFrame(QuicPaddingFrame(padding_bytes)),
packet_.transmission_type);
QUIC_BUG_IF(quic_bug_10752_36, !success)
<< ENDPOINT << "Failed to add padding_bytes: " << padding_bytes
<< " transmission_type: " << packet_.transmission_type;
}
}
bool QuicPacketCreator::IncludeNonceInPublicHeader() const {
return have_diversification_nonce_ &&
packet_.encryption_level == ENCRYPTION_ZERO_RTT;
}
bool QuicPacketCreator::IncludeVersionInHeader() const {
return packet_.encryption_level < ENCRYPTION_FORWARD_SECURE;
}
void QuicPacketCreator::AddPendingPadding(QuicByteCount size) {
pending_padding_bytes_ += size;
QUIC_DVLOG(3) << "After AddPendingPadding(" << size
<< "), pending_padding_bytes_:" << pending_padding_bytes_;
}
bool QuicPacketCreator::StreamFrameIsClientHello(
const QuicStreamFrame& frame) const {
if (framer_->perspective() == Perspective::IS_SERVER ||
!QuicUtils::IsCryptoStreamId(framer_->transport_version(),
frame.stream_id)) {
return false;
}
return packet_.encryption_level == ENCRYPTION_INITIAL;
}
void QuicPacketCreator::SetServerConnectionIdIncluded(
QuicConnectionIdIncluded server_connection_id_included) {
QUICHE_DCHECK(server_connection_id_included == CONNECTION_ID_PRESENT ||
server_connection_id_included == CONNECTION_ID_ABSENT)
<< ENDPOINT;
QUICHE_DCHECK(framer_->perspective() == Perspective::IS_SERVER ||
server_connection_id_included != CONNECTION_ID_ABSENT)
<< ENDPOINT;
server_connection_id_included_ = server_connection_id_included;
}
void QuicPacketCreator::SetServerConnectionId(
QuicConnectionId server_connection_id) {
server_connection_id_ = server_connection_id;
}
void QuicPacketCreator::SetClientConnectionId(
QuicConnectionId client_connection_id) {
QUICHE_DCHECK(client_connection_id.IsEmpty() ||
framer_->version().SupportsClientConnectionIds())
<< ENDPOINT;
client_connection_id_ = client_connection_id;
}
QuicPacketLength QuicPacketCreator::GetCurrentLargestMessagePayload() const {
const size_t packet_header_size = GetPacketHeaderSize(
framer_->transport_version(), GetDestinationConnectionIdLength(),
GetSourceConnectionIdLength(), IncludeVersionInHeader(),
IncludeNonceInPublicHeader(), GetPacketNumberLength(),
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0, 0, GetLengthLength());
size_t max_plaintext_size =
latched_hard_max_packet_length_ == 0
? max_plaintext_size_
: framer_->GetMaxPlaintextSize(latched_hard_max_packet_length_);
size_t largest_frame =
max_plaintext_size - std::min(max_plaintext_size, packet_header_size);
if (static_cast<QuicByteCount>(largest_frame) > max_datagram_frame_size_) {
largest_frame = static_cast<size_t>(max_datagram_frame_size_);
}
return largest_frame - std::min(largest_frame, kQuicFrameTypeSize);
}
QuicPacketLength QuicPacketCreator::GetGuaranteedLargestMessagePayload() const {
const bool may_include_nonce =
framer_->version().handshake_protocol == PROTOCOL_QUIC_CRYPTO &&
framer_->perspective() == Perspective::IS_SERVER;
quiche::QuicheVariableLengthIntegerLength length_length =
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0;
if (framer_->perspective() == Perspective::IS_CLIENT) {
length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
if (!QuicVersionHasLongHeaderLengths(framer_->transport_version())) {
length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0;
}
const size_t packet_header_size = GetPacketHeaderSize(
framer_->transport_version(), GetDestinationConnectionIdLength(),
GetSourceConnectionIdLength(), kIncludeVersion, may_include_nonce,
PACKET_4BYTE_PACKET_NUMBER,
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0, 0, length_length);
size_t max_plaintext_size =
latched_hard_max_packet_length_ == 0
? max_plaintext_size_
: framer_->GetMaxPlaintextSize(latched_hard_max_packet_length_);
size_t largest_frame =
max_plaintext_size - std::min(max_plaintext_size, packet_header_size);
if (static_cast<QuicByteCount>(largest_frame) > max_datagram_frame_size_) {
largest_frame = static_cast<size_t>(max_datagram_frame_size_);
}
const QuicPacketLength largest_payload =
largest_frame - std::min(largest_frame, kQuicFrameTypeSize);
QUICHE_DCHECK_LE(largest_payload, GetCurrentLargestMessagePayload())
<< ENDPOINT;
return largest_payload;
}
bool QuicPacketCreator::AttemptingToSendUnencryptedStreamData() {
if (packet_.encryption_level == ENCRYPTION_ZERO_RTT ||
packet_.encryption_level == ENCRYPTION_FORWARD_SECURE) {
return false;
}
const std::string error_details =
absl::StrCat("Cannot send stream data with level: ",
EncryptionLevelToString(packet_.encryption_level));
QUIC_BUG(quic_bug_10752_37) << ENDPOINT << error_details;
delegate_->OnUnrecoverableError(QUIC_ATTEMPT_TO_SEND_UNENCRYPTED_STREAM_DATA,
error_details);
return true;
}
bool QuicPacketCreator::HasIetfLongHeader() const {
return packet_.encryption_level < ENCRYPTION_FORWARD_SECURE;
}
size_t QuicPacketCreator::MinPlaintextPacketSize(
const ParsedQuicVersion& version,
QuicPacketNumberLength packet_number_length) {
if (!version.HasHeaderProtection()) {
return 0;
}
return (version.UsesTls() ? 4 : 8) - packet_number_length;
}
QuicPacketNumber QuicPacketCreator::NextSendingPacketNumber() const {
if (!packet_number().IsInitialized()) {
return framer_->first_sending_packet_number();
}
return packet_number() + 1;
}
bool QuicPacketCreator::PacketFlusherAttached() const {
return flusher_attached_;
}
bool QuicPacketCreator::HasSoftMaxPacketLength() const {
return latched_hard_max_packet_length_ != 0;
}
void QuicPacketCreator::SetDefaultPeerAddress(QuicSocketAddress address) {
if (!packet_.peer_address.IsInitialized()) {
packet_.peer_address = address;
return;
}
if (packet_.peer_address != address) {
FlushCurrentPacket();
packet_.peer_address = address;
}
}
#define ENDPOINT2 \
(creator_->framer_->perspective() == Perspective::IS_SERVER ? "Server: " \
: "Client: ")
QuicPacketCreator::ScopedPeerAddressContext::ScopedPeerAddressContext(
QuicPacketCreator* creator, QuicSocketAddress address,
const QuicConnectionId& client_connection_id,
const QuicConnectionId& server_connection_id)
: creator_(creator),
old_peer_address_(creator_->packet_.peer_address),
old_client_connection_id_(creator_->GetClientConnectionId()),
old_server_connection_id_(creator_->GetServerConnectionId()) {
QUIC_BUG_IF(quic_bug_12398_19, !old_peer_address_.IsInitialized())
<< ENDPOINT2
<< "Context is used before serialized packet's peer address is "
"initialized.";
creator_->SetDefaultPeerAddress(address);
if (creator_->version().HasIetfQuicFrames()) {
if (address == old_peer_address_ &&
((client_connection_id.length() !=
old_client_connection_id_.length()) ||
(server_connection_id.length() !=
old_server_connection_id_.length()))) {
creator_->FlushCurrentPacket();
}
creator_->SetClientConnectionId(client_connection_id);
creator_->SetServerConnectionId(server_connection_id);
}
}
QuicPacketCreator::ScopedPeerAddressContext::~ScopedPeerAddressContext() {
creator_->SetDefaultPeerAddress(old_peer_address_);
if (creator_->version().HasIetfQuicFrames()) {
creator_->SetClientConnectionId(old_client_connection_id_);
creator_->SetServerConnectionId(old_server_connection_id_);
}
}
QuicPacketCreator::ScopedSerializationFailureHandler::
ScopedSerializationFailureHandler(QuicPacketCreator* creator)
: creator_(creator) {}
QuicPacketCreator::ScopedSerializationFailureHandler::
~ScopedSerializationFailureHandler() {
if (creator_ == nullptr) {
return;
}
creator_->queued_frames_.clear();
if (creator_->packet_.encrypted_buffer == nullptr) {
const std::string error_details = "Failed to SerializePacket.";
QUIC_BUG(quic_bug_10752_38) << ENDPOINT2 << error_details;
creator_->delegate_->OnUnrecoverableError(QUIC_FAILED_TO_SERIALIZE_PACKET,
error_details);
}
}
#undef ENDPOINT2
void QuicPacketCreator::set_encryption_level(EncryptionLevel level) {
QUICHE_DCHECK(level == packet_.encryption_level || !HasPendingFrames())
<< ENDPOINT << "Cannot update encryption level from "
<< packet_.encryption_level << " to " << level
<< " when we already have pending frames: "
<< QuicFramesToString(queued_frames_);
packet_.encryption_level = level;
}
void QuicPacketCreator::AddPathChallengeFrame(
const QuicPathFrameBuffer& payload) {
QUIC_BUG_IF(quic_bug_10752_39, !flusher_attached_)
<< ENDPOINT
<< "Packet flusher is not attached when "
"generator tries to write stream data.";
QuicFrame frame(QuicPathChallengeFrame(0, payload));
if (AddPaddedFrameWithRetry(frame)) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "Can't send PATH_CHALLENGE now";
}
bool QuicPacketCreator::AddPathResponseFrame(
const QuicPathFrameBuffer& data_buffer) {
QuicFrame frame(QuicPathResponseFrame(kInvalidControlFrameId, data_buffer));
if (AddPaddedFrameWithRetry(frame)) {
return true;
}
QUIC_DVLOG(1) << ENDPOINT << "Can't send PATH_RESPONSE now";
return false;
}
bool QuicPacketCreator::AddPaddedFrameWithRetry(const QuicFrame& frame) {
if (HasPendingFrames()) {
if (AddPaddedSavedFrame(frame, NOT_RETRANSMISSION)) {
return true;
}
}
QUICHE_DCHECK(!HasPendingFrames()) << ENDPOINT;
if (!delegate_->ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA,
NOT_HANDSHAKE)) {
return false;
}
bool success = AddPaddedSavedFrame(frame, NOT_RETRANSMISSION);
QUIC_BUG_IF(quic_bug_12398_20, !success) << ENDPOINT;
return true;
}
bool QuicPacketCreator::HasRetryToken() const { return !retry_token_.empty(); }
#undef ENDPOINT
} | #include "quiche/quic/core/quic_packet_creator.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/frames/quic_frame.h"
#include "quiche/quic/core/frames/quic_stream_frame.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_framer_peer.h"
#include "quiche/quic/test_tools/quic_packet_creator_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_data_producer.h"
#include "quiche/quic/test_tools/simple_quic_framer.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::SaveArg;
using ::testing::StrictMock;
namespace quic {
namespace test {
namespace {
const QuicPacketNumber kPacketNumber = QuicPacketNumber(UINT64_C(0x12345678));
QuicConnectionId CreateTestConnectionId() {
return TestConnectionId(UINT64_C(0xFEDCBA9876543210));
}
struct TestParams {
TestParams(ParsedQuicVersion version, bool version_serialization)
: version(version), version_serialization(version_serialization) {}
ParsedQuicVersion version;
bool version_serialization;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(ParsedQuicVersionToString(p.version), "_",
(p.version_serialization ? "Include" : "No"), "Version");
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
ParsedQuicVersionVector all_supported_versions = AllSupportedVersions();
for (size_t i = 0; i < all_supported_versions.size(); ++i) {
params.push_back(TestParams(all_supported_versions[i], true));
params.push_back(TestParams(all_supported_versions[i], false));
}
return params;
}
class MockDebugDelegate : public QuicPacketCreator::DebugDelegate {
public:
~MockDebugDelegate() override = default;
MOCK_METHOD(void, OnFrameAddedToPacket, (const QuicFrame& frame), (override));
MOCK_METHOD(void, OnStreamFrameCoalesced, (const QuicStreamFrame& frame),
(override));
};
class TestPacketCreator : public QuicPacketCreator {
public:
TestPacketCreator(QuicConnectionId connection_id, QuicFramer* framer,
DelegateInterface* delegate, SimpleDataProducer* producer)
: QuicPacketCreator(connection_id, framer, delegate),
producer_(producer),
version_(framer->version()) {}
bool ConsumeDataToFillCurrentPacket(QuicStreamId id, absl::string_view data,
QuicStreamOffset offset, bool fin,
bool needs_full_padding,
TransmissionType transmission_type,
QuicFrame* frame) {
if (!data.empty()) {
producer_->SaveStreamData(id, data);
}
return QuicPacketCreator::ConsumeDataToFillCurrentPacket(
id, data.length(), offset, fin, needs_full_padding, transmission_type,
frame);
}
void StopSendingVersion() { set_encryption_level(ENCRYPTION_FORWARD_SECURE); }
SimpleDataProducer* producer_;
ParsedQuicVersion version_;
};
class QuicPacketCreatorTest : public QuicTestWithParam<TestParams> {
public:
void ClearSerializedPacketForTests(SerializedPacket ) {
}
void SaveSerializedPacket(SerializedPacket serialized_packet) {
serialized_packet_.reset(CopySerializedPacket(
serialized_packet, &allocator_, true));
}
void DeleteSerializedPacket() { serialized_packet_ = nullptr; }
protected:
QuicPacketCreatorTest()
: connection_id_(TestConnectionId(2)),
server_framer_(SupportedVersions(GetParam().version), QuicTime::Zero(),
Perspective::IS_SERVER, connection_id_.length()),
client_framer_(SupportedVersions(GetParam().version), QuicTime::Zero(),
Perspective::IS_CLIENT, connection_id_.length()),
data_("foo"),
creator_(connection_id_, &client_framer_, &delegate_, &producer_) {
EXPECT_CALL(delegate_, GetPacketBuffer())
.WillRepeatedly(Return(QuicPacketBuffer()));
EXPECT_CALL(delegate_, GetSerializedPacketFate(_, _))
.WillRepeatedly(Return(SEND_TO_WRITER));
creator_.SetEncrypter(
ENCRYPTION_INITIAL,
std::make_unique<TaggingEncrypter>(ENCRYPTION_INITIAL));
creator_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
creator_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
creator_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
client_framer_.set_visitor(&framer_visitor_);
server_framer_.set_visitor(&framer_visitor_);
client_framer_.set_data_producer(&producer_);
if (server_framer_.version().KnowsWhichDecrypterToUse()) {
server_framer_.InstallDecrypter(ENCRYPTION_INITIAL,
std::make_unique<TaggingDecrypter>());
server_framer_.InstallDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingDecrypter>());
server_framer_.InstallDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingDecrypter>());
server_framer_.InstallDecrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingDecrypter>());
} else {
server_framer_.SetDecrypter(ENCRYPTION_INITIAL,
std::make_unique<TaggingDecrypter>());
server_framer_.SetAlternativeDecrypter(
ENCRYPTION_FORWARD_SECURE, std::make_unique<TaggingDecrypter>(),
false);
}
}
~QuicPacketCreatorTest() override {}
SerializedPacket SerializeAllFrames(const QuicFrames& frames) {
SerializedPacket packet = QuicPacketCreatorPeer::SerializeAllFrames(
&creator_, frames, buffer_, kMaxOutgoingPacketSize);
EXPECT_EQ(QuicPacketCreatorPeer::GetEncryptionLevel(&creator_),
packet.encryption_level);
return packet;
}
void ProcessPacket(const SerializedPacket& packet) {
QuicEncryptedPacket encrypted_packet(packet.encrypted_buffer,
packet.encrypted_length);
server_framer_.ProcessPacket(encrypted_packet);
}
void CheckStreamFrame(const QuicFrame& frame, QuicStreamId stream_id,
const std::string& data, QuicStreamOffset offset,
bool fin) {
EXPECT_EQ(STREAM_FRAME, frame.type);
EXPECT_EQ(stream_id, frame.stream_frame.stream_id);
char buf[kMaxOutgoingPacketSize];
QuicDataWriter writer(kMaxOutgoingPacketSize, buf, quiche::HOST_BYTE_ORDER);
if (frame.stream_frame.data_length > 0) {
producer_.WriteStreamData(stream_id, frame.stream_frame.offset,
frame.stream_frame.data_length, &writer);
}
EXPECT_EQ(data, absl::string_view(buf, frame.stream_frame.data_length));
EXPECT_EQ(offset, frame.stream_frame.offset);
EXPECT_EQ(fin, frame.stream_frame.fin);
}
size_t GetPacketHeaderOverhead(QuicTransportVersion version) {
return GetPacketHeaderSize(
version, creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_), 0,
QuicPacketCreatorPeer::GetLengthLength(&creator_));
}
size_t GetEncryptionOverhead() {
return creator_.max_packet_length() -
client_framer_.GetMaxPlaintextSize(creator_.max_packet_length());
}
size_t GetStreamFrameOverhead(QuicTransportVersion version) {
return QuicFramer::GetMinStreamFrameSize(
version, GetNthClientInitiatedStreamId(1), kOffset, true,
0);
}
bool IsDefaultTestConfiguration() {
TestParams p = GetParam();
return p.version == AllSupportedVersions()[0] && p.version_serialization;
}
QuicStreamId GetNthClientInitiatedStreamId(int n) const {
return QuicUtils::GetFirstBidirectionalStreamId(
creator_.transport_version(), Perspective::IS_CLIENT) +
n * 2;
}
void TestChaosProtection(bool enabled);
static constexpr QuicStreamOffset kOffset = 0u;
char buffer_[kMaxOutgoingPacketSize];
QuicConnectionId connection_id_;
QuicFrames frames_;
QuicFramer server_framer_;
QuicFramer client_framer_;
StrictMock<MockFramerVisitor> framer_visitor_;
StrictMock<MockPacketCreatorDelegate> delegate_;
std::string data_;
TestPacketCreator creator_;
std::unique_ptr<SerializedPacket> serialized_packet_;
SimpleDataProducer producer_;
quiche::SimpleBufferAllocator allocator_;
};
INSTANTIATE_TEST_SUITE_P(QuicPacketCreatorTests, QuicPacketCreatorTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicPacketCreatorTest, SerializeFrames) {
ParsedQuicVersion version = client_framer_.version();
for (int i = ENCRYPTION_INITIAL; i < NUM_ENCRYPTION_LEVELS; ++i) {
EncryptionLevel level = static_cast<EncryptionLevel>(i);
bool has_ack = false, has_stream = false;
creator_.set_encryption_level(level);
size_t payload_len = 0;
if (level != ENCRYPTION_ZERO_RTT) {
frames_.push_back(QuicFrame(new QuicAckFrame(InitAckFrame(1))));
has_ack = true;
payload_len += version.UsesTls() ? 12 : 6;
}
if (level != ENCRYPTION_INITIAL && level != ENCRYPTION_HANDSHAKE) {
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
frames_.push_back(QuicFrame(
QuicStreamFrame(stream_id, false, 0u, absl::string_view())));
has_stream = true;
payload_len += 2;
}
SerializedPacket serialized = SerializeAllFrames(frames_);
EXPECT_EQ(level, serialized.encryption_level);
if (level != ENCRYPTION_ZERO_RTT) {
delete frames_[0].ack_frame;
}
frames_.clear();
ASSERT_GT(payload_len, 0);
size_t min_payload = version.UsesTls() ? 3 : 7;
bool need_padding =
(version.HasHeaderProtection() && (payload_len < min_payload));
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
if (need_padding) {
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
}
if (has_ack) {
EXPECT_CALL(framer_visitor_, OnAckFrameStart(_, _))
.WillOnce(Return(true));
EXPECT_CALL(framer_visitor_,
OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2)))
.WillOnce(Return(true));
EXPECT_CALL(framer_visitor_, OnAckFrameEnd(QuicPacketNumber(1), _))
.WillOnce(Return(true));
}
if (has_stream) {
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
}
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
ProcessPacket(serialized);
}
}
TEST_P(QuicPacketCreatorTest, SerializeConnectionClose) {
QuicConnectionCloseFrame* frame = new QuicConnectionCloseFrame(
creator_.transport_version(), QUIC_NO_ERROR, NO_IETF_QUIC_ERROR, "error",
0);
QuicFrames frames;
frames.push_back(QuicFrame(frame));
SerializedPacket serialized = SerializeAllFrames(frames);
EXPECT_EQ(ENCRYPTION_INITIAL, serialized.encryption_level);
ASSERT_EQ(QuicPacketNumber(1u), serialized.packet_number);
ASSERT_EQ(QuicPacketNumber(1u), creator_.packet_number());
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnConnectionCloseFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
ProcessPacket(serialized);
}
TEST_P(QuicPacketCreatorTest, SerializePacketWithPadding) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
creator_.AddFrame(QuicFrame(QuicWindowUpdateFrame()), NOT_RETRANSMISSION);
creator_.AddFrame(QuicFrame(QuicPaddingFrame()), NOT_RETRANSMISSION);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
EXPECT_EQ(kDefaultMaxPacketSize, serialized_packet_->encrypted_length);
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest, SerializeLargerPacketWithPadding) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const QuicByteCount packet_size = 100 + kDefaultMaxPacketSize;
creator_.SetMaxPacketLength(packet_size);
creator_.AddFrame(QuicFrame(QuicWindowUpdateFrame()), NOT_RETRANSMISSION);
creator_.AddFrame(QuicFrame(QuicPaddingFrame()), NOT_RETRANSMISSION);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
EXPECT_EQ(packet_size, serialized_packet_->encrypted_length);
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest, IncreaseMaxPacketLengthWithFramesPending) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const QuicByteCount packet_size = 100 + kDefaultMaxPacketSize;
creator_.AddFrame(QuicFrame(QuicWindowUpdateFrame()), NOT_RETRANSMISSION);
creator_.SetMaxPacketLength(packet_size);
creator_.AddFrame(QuicFrame(QuicPaddingFrame()), NOT_RETRANSMISSION);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
EXPECT_EQ(kDefaultMaxPacketSize, serialized_packet_->encrypted_length);
DeleteSerializedPacket();
creator_.AddFrame(QuicFrame(QuicWindowUpdateFrame()), NOT_RETRANSMISSION);
creator_.AddFrame(QuicFrame(QuicPaddingFrame()), NOT_RETRANSMISSION);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
EXPECT_EQ(packet_size, serialized_packet_->encrypted_length);
EXPECT_EQ(packet_size, serialized_packet_->encrypted_length);
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest, ConsumeCryptoDataToFillCurrentPacket) {
std::string data = "crypto data";
QuicFrame frame;
ASSERT_TRUE(creator_.ConsumeCryptoDataToFillCurrentPacket(
ENCRYPTION_INITIAL, data.length(), 0,
true, NOT_RETRANSMISSION, &frame));
EXPECT_EQ(frame.crypto_frame->data_length, data.length());
EXPECT_TRUE(creator_.HasPendingFrames());
}
TEST_P(QuicPacketCreatorTest, ConsumeDataToFillCurrentPacket) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicFrame frame;
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
const std::string data("test");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false, false, NOT_RETRANSMISSION, &frame));
size_t consumed = frame.stream_frame.data_length;
EXPECT_EQ(4u, consumed);
CheckStreamFrame(frame, stream_id, "test", 0u, false);
EXPECT_TRUE(creator_.HasPendingFrames());
}
TEST_P(QuicPacketCreatorTest, ConsumeDataFin) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicFrame frame;
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
const std::string data("test");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, true, false, NOT_RETRANSMISSION, &frame));
size_t consumed = frame.stream_frame.data_length;
EXPECT_EQ(4u, consumed);
CheckStreamFrame(frame, stream_id, "test", 0u, true);
EXPECT_TRUE(creator_.HasPendingFrames());
}
TEST_P(QuicPacketCreatorTest, ConsumeDataFinOnly) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicFrame frame;
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, {}, 0u, true, false, NOT_RETRANSMISSION, &frame));
size_t consumed = frame.stream_frame.data_length;
EXPECT_EQ(0u, consumed);
CheckStreamFrame(frame, stream_id, std::string(), 0u, true);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(absl::StartsWith(creator_.GetPendingFramesInfo(),
"type { STREAM_FRAME }"));
}
TEST_P(QuicPacketCreatorTest, CreateAllFreeBytesForStreamFrames) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const size_t overhead =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
GetEncryptionOverhead();
for (size_t i = overhead +
QuicPacketCreator::MinPlaintextPacketSize(
client_framer_.version(),
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
i < overhead + 100; ++i) {
SCOPED_TRACE(i);
creator_.SetMaxPacketLength(i);
const bool should_have_room =
i >
overhead + GetStreamFrameOverhead(client_framer_.transport_version());
ASSERT_EQ(should_have_room,
creator_.HasRoomForStreamFrame(GetNthClientInitiatedStreamId(1),
kOffset, 0xffff));
if (should_have_room) {
QuicFrame frame;
const std::string data("testdata");
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(Invoke(
this, &QuicPacketCreatorTest::ClearSerializedPacketForTests));
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
GetNthClientInitiatedStreamId(1), data, kOffset, false, false,
NOT_RETRANSMISSION, &frame));
size_t bytes_consumed = frame.stream_frame.data_length;
EXPECT_LT(0u, bytes_consumed);
creator_.FlushCurrentPacket();
}
}
}
TEST_P(QuicPacketCreatorTest, StreamFrameConsumption) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const size_t overhead =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
GetEncryptionOverhead() +
GetStreamFrameOverhead(client_framer_.transport_version());
size_t capacity = kDefaultMaxPacketSize - overhead;
for (int delta = -5; delta <= 5; ++delta) {
std::string data(capacity + delta, 'A');
size_t bytes_free = delta > 0 ? 0 : 0 - delta;
QuicFrame frame;
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
GetNthClientInitiatedStreamId(1), data, kOffset, false, false,
NOT_RETRANSMISSION, &frame));
EXPECT_EQ(2u, creator_.ExpansionOnNewFrame());
size_t expected_bytes_free = bytes_free < 3 ? 0 : bytes_free - 2;
EXPECT_EQ(expected_bytes_free, creator_.BytesFree()) << "delta: " << delta;
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
DeleteSerializedPacket();
}
}
TEST_P(QuicPacketCreatorTest, CryptoStreamFramePacketPadding) {
SetQuicFlag(quic_enforce_single_packet_chlo, false);
size_t overhead =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
GetEncryptionOverhead();
if (QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
overhead +=
QuicFramer::GetMinCryptoFrameSize(kOffset, kMaxOutgoingPacketSize);
} else {
overhead += QuicFramer::GetMinStreamFrameSize(
client_framer_.transport_version(), GetNthClientInitiatedStreamId(1),
kOffset, false, 0);
}
ASSERT_GT(kMaxOutgoingPacketSize, overhead);
size_t capacity = kDefaultMaxPacketSize - overhead;
for (int delta = -5; delta <= 5; ++delta) {
SCOPED_TRACE(delta);
std::string data(capacity + delta, 'A');
size_t bytes_free = delta > 0 ? 0 : 0 - delta;
QuicFrame frame;
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
if (client_framer_.version().CanSendCoalescedPackets()) {
EXPECT_CALL(delegate_, GetSerializedPacketFate(_, _))
.WillRepeatedly(Return(COALESCE));
}
if (!QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
QuicUtils::GetCryptoStreamId(client_framer_.transport_version()),
data, kOffset, false, true, NOT_RETRANSMISSION, &frame));
size_t bytes_consumed = frame.stream_frame.data_length;
EXPECT_LT(0u, bytes_consumed);
} else {
producer_.SaveCryptoData(ENCRYPTION_INITIAL, kOffset, data);
ASSERT_TRUE(creator_.ConsumeCryptoDataToFillCurrentPacket(
ENCRYPTION_INITIAL, data.length(), kOffset,
true, NOT_RETRANSMISSION, &frame));
size_t bytes_consumed = frame.crypto_frame->data_length;
EXPECT_LT(0u, bytes_consumed);
}
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
if (client_framer_.version().CanSendCoalescedPackets()) {
EXPECT_EQ(kDefaultMaxPacketSize - bytes_free,
serialized_packet_->encrypted_length);
} else {
EXPECT_EQ(kDefaultMaxPacketSize, serialized_packet_->encrypted_length);
}
DeleteSerializedPacket();
}
}
TEST_P(QuicPacketCreatorTest, NonCryptoStreamFramePacketNonPadding) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const size_t overhead =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
GetEncryptionOverhead() +
GetStreamFrameOverhead(client_framer_.transport_version());
ASSERT_GT(kDefaultMaxPacketSize, overhead);
size_t capacity = kDefaultMaxPacketSize - overhead;
for (int delta = -5; delta <= 5; ++delta) {
std::string data(capacity + delta, 'A');
size_t bytes_free = delta > 0 ? 0 : 0 - delta;
QuicFrame frame;
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
GetNthClientInitiatedStreamId(1), data, kOffset, false, false,
NOT_RETRANSMISSION, &frame));
size_t bytes_consumed = frame.stream_frame.data_length;
EXPECT_LT(0u, bytes_consumed);
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
if (bytes_free > 0) {
EXPECT_EQ(kDefaultMaxPacketSize - bytes_free,
serialized_packet_->encrypted_length);
} else {
EXPECT_EQ(kDefaultMaxPacketSize, serialized_packet_->encrypted_length);
}
DeleteSerializedPacket();
}
}
TEST_P(QuicPacketCreatorTest, BuildPathChallengePacket) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPacketHeader header;
header.destination_connection_id = CreateTestConnectionId();
header.reset_flag = false;
header.version_flag = false;
header.packet_number = kPacketNumber;
MockRandom randomizer;
QuicPathFrameBuffer payload;
randomizer.RandBytes(payload.data(), payload.size());
unsigned char packet[] = {
0x43,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x12, 0x34, 0x56, 0x78,
0x1a,
'r', 'r', 'r', 'r', 'r', 'r', 'r', 'r',
0x00,
0x00, 0x00, 0x00, 0x00
};
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
size_t length = creator_.BuildPaddedPathChallengePacket(
header, buffer.get(), ABSL_ARRAYSIZE(packet), payload,
ENCRYPTION_INITIAL);
EXPECT_EQ(length, ABSL_ARRAYSIZE(packet));
EXPECT_EQ(kQuicPathFrameBufferSize, payload.size());
QuicPacket data(creator_.transport_version(), buffer.release(), length, true,
header);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data.data(), data.length(),
reinterpret_cast<char*>(packet), ABSL_ARRAYSIZE(packet));
}
TEST_P(QuicPacketCreatorTest, BuildConnectivityProbingPacket) {
QuicPacketHeader header;
header.destination_connection_id = CreateTestConnectionId();
header.reset_flag = false;
header.version_flag = false;
header.packet_number = kPacketNumber;
unsigned char packet[] = {
0x43,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x12, 0x34, 0x56, 0x78,
0x07,
0x00,
0x00, 0x00, 0x00, 0x00
};
unsigned char packet99[] = {
0x43,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x12, 0x34, 0x56, 0x78,
0x01,
0x00,
0x00, 0x00, 0x00, 0x00
};
unsigned char* p = packet;
size_t packet_size = ABSL_ARRAYSIZE(packet);
if (creator_.version().HasIetfQuicFrames()) {
p = packet99;
packet_size = ABSL_ARRAYSIZE(packet99);
}
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
size_t length = creator_.BuildConnectivityProbingPacket(
header, buffer.get(), packet_size, ENCRYPTION_INITIAL);
EXPECT_NE(0u, length);
QuicPacket data(creator_.transport_version(), buffer.release(), length, true,
header);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data.data(), data.length(),
reinterpret_cast<char*>(p), packet_size);
}
TEST_P(QuicPacketCreatorTest, BuildPathResponsePacket1ResponseUnpadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPacketHeader header;
header.destination_connection_id = CreateTestConnectionId();
header.reset_flag = false;
header.version_flag = false;
header.packet_number = kPacketNumber;
QuicPathFrameBuffer payload0 = {
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}};
unsigned char packet[] = {
0x43,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x12, 0x34, 0x56, 0x78,
0x1b,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
};
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
size_t length = creator_.BuildPathResponsePacket(
header, buffer.get(), ABSL_ARRAYSIZE(packet), payloads,
false, ENCRYPTION_INITIAL);
EXPECT_EQ(length, ABSL_ARRAYSIZE(packet));
QuicPacket data(creator_.transport_version(), buffer.release(), length, true,
header);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data.data(), data.length(),
reinterpret_cast<char*>(packet), ABSL_ARRAYSIZE(packet));
}
TEST_P(QuicPacketCreatorTest, BuildPathResponsePacket1ResponsePadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPacketHeader header;
header.destination_connection_id = CreateTestConnectionId();
header.reset_flag = false;
header.version_flag = false;
header.packet_number = kPacketNumber;
QuicPathFrameBuffer payload0 = {
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}};
unsigned char packet[] = {
0x43,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x12, 0x34, 0x56, 0x78,
0x1b,
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x00, 0x00, 0x00, 0x00, 0x00
};
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
size_t length = creator_.BuildPathResponsePacket(
header, buffer.get(), ABSL_ARRAYSIZE(packet), payloads,
true, ENCRYPTION_INITIAL);
EXPECT_EQ(length, ABSL_ARRAYSIZE(packet));
QuicPacket data(creator_.transport_version(), buffer.release(), length, true,
header);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data.data(), data.length(),
reinterpret_cast<char*>(packet), ABSL_ARRAYSIZE(packet));
}
TEST_P(QuicPacketCreatorTest, BuildPathResponsePacket3ResponsesUnpadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPacketHeader header;
header.destination_connection_id = CreateTestConnectionId();
header.reset_flag = false;
header.version_flag = false;
header.packet_number = kPacketNumber;
QuicPathFrameBuffer payload0 = {
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}};
QuicPathFrameBuffer payload1 = {
{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}};
QuicPathFrameBuffer payload2 = {
{0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28}};
unsigned char packet[] = {
0x43,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x12, 0x34, 0x56, 0x78,
0x1b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x1b, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x1b, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
};
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
payloads.push_back(payload1);
payloads.push_back(payload2);
size_t length = creator_.BuildPathResponsePacket(
header, buffer.get(), ABSL_ARRAYSIZE(packet), payloads,
false, ENCRYPTION_INITIAL);
EXPECT_EQ(length, ABSL_ARRAYSIZE(packet));
QuicPacket data(creator_.transport_version(), buffer.release(), length, true,
header);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data.data(), data.length(),
reinterpret_cast<char*>(packet), ABSL_ARRAYSIZE(packet));
}
TEST_P(QuicPacketCreatorTest, BuildPathResponsePacket3ResponsesPadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPacketHeader header;
header.destination_connection_id = CreateTestConnectionId();
header.reset_flag = false;
header.version_flag = false;
header.packet_number = kPacketNumber;
QuicPathFrameBuffer payload0 = {
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}};
QuicPathFrameBuffer payload1 = {
{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}};
QuicPathFrameBuffer payload2 = {
{0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28}};
unsigned char packet[] = {
0x43,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x12, 0x34, 0x56, 0x78,
0x1b, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x1b, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x1b, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x00, 0x00, 0x00, 0x00, 0x00
};
std::unique_ptr<char[]> buffer(new char[kMaxOutgoingPacketSize]);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
payloads.push_back(payload1);
payloads.push_back(payload2);
size_t length = creator_.BuildPathResponsePacket(
header, buffer.get(), ABSL_ARRAYSIZE(packet), payloads,
true, ENCRYPTION_INITIAL);
EXPECT_EQ(length, ABSL_ARRAYSIZE(packet));
QuicPacket data(creator_.transport_version(), buffer.release(), length, true,
header);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data.data(), data.length(),
reinterpret_cast<char*>(packet), ABSL_ARRAYSIZE(packet));
}
TEST_P(QuicPacketCreatorTest, SerializeConnectivityProbingPacket) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
std::unique_ptr<SerializedPacket> encrypted;
if (VersionHasIetfQuicFrames(creator_.transport_version())) {
QuicPathFrameBuffer payload = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xfe}};
encrypted =
creator_.SerializePathChallengeConnectivityProbingPacket(payload);
} else {
encrypted = creator_.SerializeConnectivityProbingPacket();
}
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
if (VersionHasIetfQuicFrames(creator_.transport_version())) {
EXPECT_CALL(framer_visitor_, OnPathChallengeFrame(_));
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
} else {
EXPECT_CALL(framer_visitor_, OnPingFrame(_));
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
}
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest, SerializePathChallengeProbePacket) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPathFrameBuffer payload = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee}};
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializePathChallengeConnectivityProbingPacket(payload));
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathChallengeFrame(_));
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest, SerializePathResponseProbePacket1PayloadPadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPathFrameBuffer payload0 = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee}};
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializePathResponseConnectivityProbingPacket(payloads, true));
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_));
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest,
SerializePathResponseProbePacket1PayloadUnPadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPathFrameBuffer payload0 = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee}};
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializePathResponseConnectivityProbingPacket(payloads, false));
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest, SerializePathResponseProbePacket2PayloadsPadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPathFrameBuffer payload0 = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee}};
QuicPathFrameBuffer payload1 = {
{0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee, 0xde}};
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
payloads.push_back(payload1);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializePathResponseConnectivityProbingPacket(payloads, true));
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_)).Times(2);
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest,
SerializePathResponseProbePacket2PayloadsUnPadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPathFrameBuffer payload0 = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee}};
QuicPathFrameBuffer payload1 = {
{0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee, 0xde}};
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
payloads.push_back(payload1);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializePathResponseConnectivityProbingPacket(payloads, false));
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_)).Times(2);
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest, SerializePathResponseProbePacket3PayloadsPadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPathFrameBuffer payload0 = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee}};
QuicPathFrameBuffer payload1 = {
{0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee, 0xde}};
QuicPathFrameBuffer payload2 = {
{0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee, 0xde, 0xad}};
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
payloads.push_back(payload1);
payloads.push_back(payload2);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializePathResponseConnectivityProbingPacket(payloads, true));
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_)).Times(3);
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest,
SerializePathResponseProbePacket3PayloadsUnpadded) {
if (!VersionHasIetfQuicFrames(creator_.transport_version())) {
return;
}
QuicPathFrameBuffer payload0 = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee}};
QuicPathFrameBuffer payload1 = {
{0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee, 0xde}};
QuicPathFrameBuffer payload2 = {
{0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xee, 0xde, 0xad}};
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
quiche::QuicheCircularDeque<QuicPathFrameBuffer> payloads;
payloads.push_back(payload0);
payloads.push_back(payload1);
payloads.push_back(payload2);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializePathResponseConnectivityProbingPacket(payloads, false));
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPathResponseFrame(_)).Times(3);
EXPECT_CALL(framer_visitor_, OnPacketComplete());
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest, SerializeLargePacketNumberConnectionClosePacket) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
std::unique_ptr<SerializedPacket> encrypted(
creator_.SerializeLargePacketNumberConnectionClosePacket(
QuicPacketNumber(1), QUIC_CLIENT_LOST_NETWORK_ACCESS,
"QuicPacketCreatorTest"));
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnConnectionCloseFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
server_framer_.ProcessPacket(QuicEncryptedPacket(
encrypted->encrypted_buffer, encrypted->encrypted_length));
}
TEST_P(QuicPacketCreatorTest, UpdatePacketSequenceNumberLengthLeastAwaiting) {
if (!GetParam().version.SendsVariableLengthPacketNumberInLongHeader()) {
EXPECT_EQ(PACKET_4BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
} else {
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
}
QuicPacketCreatorPeer::SetPacketNumber(&creator_, 64);
creator_.UpdatePacketNumberLength(QuicPacketNumber(2),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
QuicPacketCreatorPeer::SetPacketNumber(&creator_, 64 * 256);
creator_.UpdatePacketNumberLength(QuicPacketNumber(2),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_2BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
QuicPacketCreatorPeer::SetPacketNumber(&creator_, 64 * 256 * 256);
creator_.UpdatePacketNumberLength(QuicPacketNumber(2),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_4BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
QuicPacketCreatorPeer::SetPacketNumber(&creator_,
UINT64_C(64) * 256 * 256 * 256 * 256);
creator_.UpdatePacketNumberLength(QuicPacketNumber(2),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_6BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
}
TEST_P(QuicPacketCreatorTest, UpdatePacketSequenceNumberLengthCwnd) {
QuicPacketCreatorPeer::SetPacketNumber(&creator_, 1);
if (!GetParam().version.SendsVariableLengthPacketNumberInLongHeader()) {
EXPECT_EQ(PACKET_4BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
} else {
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
}
creator_.UpdatePacketNumberLength(QuicPacketNumber(1),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.UpdatePacketNumberLength(QuicPacketNumber(1),
10000 * 256 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_2BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.UpdatePacketNumberLength(QuicPacketNumber(1),
10000 * 256 * 256 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_4BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.UpdatePacketNumberLength(
QuicPacketNumber(1),
UINT64_C(1000) * 256 * 256 * 256 * 256 / kDefaultMaxPacketSize);
EXPECT_EQ(PACKET_6BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
}
TEST_P(QuicPacketCreatorTest, SkipNPacketNumbers) {
QuicPacketCreatorPeer::SetPacketNumber(&creator_, 1);
if (!GetParam().version.SendsVariableLengthPacketNumberInLongHeader()) {
EXPECT_EQ(PACKET_4BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
} else {
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
}
creator_.SkipNPacketNumbers(63, QuicPacketNumber(2),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(QuicPacketNumber(64), creator_.packet_number());
EXPECT_EQ(PACKET_1BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.SkipNPacketNumbers(64 * 255, QuicPacketNumber(2),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(QuicPacketNumber(64 * 256), creator_.packet_number());
EXPECT_EQ(PACKET_2BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
creator_.SkipNPacketNumbers(64 * 256 * 255, QuicPacketNumber(2),
10000 / kDefaultMaxPacketSize);
EXPECT_EQ(QuicPacketNumber(64 * 256 * 256), creator_.packet_number());
EXPECT_EQ(PACKET_4BYTE_PACKET_NUMBER,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_));
}
TEST_P(QuicPacketCreatorTest, SerializeFrame) {
if (!GetParam().version_serialization) {
creator_.StopSendingVersion();
}
std::string data("test data");
if (!QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
QuicStreamFrame stream_frame(
QuicUtils::GetCryptoStreamId(client_framer_.transport_version()),
false, 0u, absl::string_view());
frames_.push_back(QuicFrame(stream_frame));
} else {
producer_.SaveCryptoData(ENCRYPTION_INITIAL, 0, data);
frames_.push_back(
QuicFrame(new QuicCryptoFrame(ENCRYPTION_INITIAL, 0, data.length())));
}
SerializedPacket serialized = SerializeAllFrames(frames_);
QuicPacketHeader header;
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_))
.WillOnce(DoAll(SaveArg<0>(&header), Return(true)));
if (QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
EXPECT_CALL(framer_visitor_, OnCryptoFrame(_));
} else {
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
}
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
ProcessPacket(serialized);
EXPECT_EQ(GetParam().version_serialization, header.version_flag);
}
TEST_P(QuicPacketCreatorTest, SerializeFrameShortData) {
if (!GetParam().version_serialization) {
creator_.StopSendingVersion();
}
std::string data("Hello World!");
if (!QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
QuicStreamFrame stream_frame(
QuicUtils::GetCryptoStreamId(client_framer_.transport_version()),
false, 0u, absl::string_view());
frames_.push_back(QuicFrame(stream_frame));
} else {
producer_.SaveCryptoData(ENCRYPTION_INITIAL, 0, data);
frames_.push_back(
QuicFrame(new QuicCryptoFrame(ENCRYPTION_INITIAL, 0, data.length())));
}
SerializedPacket serialized = SerializeAllFrames(frames_);
QuicPacketHeader header;
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_))
.WillOnce(DoAll(SaveArg<0>(&header), Return(true)));
if (QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
EXPECT_CALL(framer_visitor_, OnCryptoFrame(_));
} else {
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
}
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
ProcessPacket(serialized);
EXPECT_EQ(GetParam().version_serialization, header.version_flag);
}
void QuicPacketCreatorTest::TestChaosProtection(bool enabled) {
if (!GetParam().version.UsesCryptoFrames()) {
return;
}
MockRandom mock_random(2);
QuicPacketCreatorPeer::SetRandom(&creator_, &mock_random);
std::string data("ChAoS_ThEoRy!");
producer_.SaveCryptoData(ENCRYPTION_INITIAL, 0, data);
frames_.push_back(
QuicFrame(new QuicCryptoFrame(ENCRYPTION_INITIAL, 0, data.length())));
frames_.push_back(QuicFrame(QuicPaddingFrame(33)));
SerializedPacket serialized = SerializeAllFrames(frames_);
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
if (enabled) {
EXPECT_CALL(framer_visitor_, OnCryptoFrame(_)).Times(AtLeast(2));
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_)).Times(AtLeast(2));
EXPECT_CALL(framer_visitor_, OnPingFrame(_)).Times(AtLeast(1));
} else {
EXPECT_CALL(framer_visitor_, OnCryptoFrame(_)).Times(1);
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_)).Times(1);
EXPECT_CALL(framer_visitor_, OnPingFrame(_)).Times(0);
}
EXPECT_CALL(framer_visitor_, OnPacketComplete());
ProcessPacket(serialized);
}
TEST_P(QuicPacketCreatorTest, ChaosProtectionEnabled) {
TestChaosProtection(true);
}
TEST_P(QuicPacketCreatorTest, ChaosProtectionDisabled) {
SetQuicFlag(quic_enable_chaos_protection, false);
TestChaosProtection(false);
}
TEST_P(QuicPacketCreatorTest, ConsumeDataLargerThanOneStreamFrame) {
if (!GetParam().version_serialization) {
creator_.StopSendingVersion();
}
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicFrame frame;
size_t payload_length = creator_.max_packet_length();
const std::string too_long_payload(payload_length, 'a');
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, too_long_payload, 0u, true, false, NOT_RETRANSMISSION,
&frame));
size_t consumed = frame.stream_frame.data_length;
EXPECT_GT(payload_length, consumed);
creator_.FlushCurrentPacket();
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest, AddFrameAndFlush) {
if (!GetParam().version_serialization) {
creator_.StopSendingVersion();
}
const size_t max_plaintext_size =
client_framer_.GetMaxPlaintextSize(creator_.max_packet_length());
EXPECT_FALSE(creator_.HasPendingFrames());
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
if (!QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
stream_id =
QuicUtils::GetCryptoStreamId(client_framer_.transport_version());
}
EXPECT_FALSE(creator_.HasPendingStreamFramesOfStream(stream_id));
EXPECT_EQ(max_plaintext_size -
GetPacketHeaderSize(
client_framer_.transport_version(),
creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_),
0, QuicPacketCreatorPeer::GetLengthLength(&creator_)),
creator_.BytesFree());
StrictMock<MockDebugDelegate> debug;
creator_.set_debug_delegate(&debug);
QuicAckFrame ack_frame(InitAckFrame(10u));
EXPECT_CALL(debug, OnFrameAddedToPacket(_));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(&ack_frame), NOT_RETRANSMISSION));
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingStreamFramesOfStream(stream_id));
QuicFrame frame;
const std::string data("test");
EXPECT_CALL(debug, OnFrameAddedToPacket(_));
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false, false, NOT_RETRANSMISSION, &frame));
size_t consumed = frame.stream_frame.data_length;
EXPECT_EQ(4u, consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingStreamFramesOfStream(stream_id));
QuicPaddingFrame padding_frame;
EXPECT_CALL(debug, OnFrameAddedToPacket(_));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(padding_frame), NOT_RETRANSMISSION));
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_EQ(0u, creator_.BytesFree());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
EXPECT_FALSE(creator_.AddFrame(QuicFrame(&ack_frame), NOT_RETRANSMISSION));
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_FALSE(serialized_packet_->retransmittable_frames.empty());
const QuicFrames& retransmittable =
serialized_packet_->retransmittable_frames;
ASSERT_EQ(1u, retransmittable.size());
EXPECT_EQ(STREAM_FRAME, retransmittable[0].type);
EXPECT_TRUE(serialized_packet_->has_ack);
EXPECT_EQ(QuicPacketNumber(10u), serialized_packet_->largest_acked);
DeleteSerializedPacket();
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingStreamFramesOfStream(stream_id));
EXPECT_EQ(max_plaintext_size -
GetPacketHeaderSize(
client_framer_.transport_version(),
creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_),
0, QuicPacketCreatorPeer::GetLengthLength(&creator_)),
creator_.BytesFree());
}
TEST_P(QuicPacketCreatorTest, SerializeAndSendStreamFrame) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
if (!GetParam().version_serialization) {
creator_.StopSendingVersion();
}
EXPECT_FALSE(creator_.HasPendingFrames());
const std::string data("test");
producer_.SaveStreamData(GetNthClientInitiatedStreamId(0), data);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
size_t num_bytes_consumed;
StrictMock<MockDebugDelegate> debug;
creator_.set_debug_delegate(&debug);
EXPECT_CALL(debug, OnFrameAddedToPacket(_));
creator_.CreateAndSerializeStreamFrame(
GetNthClientInitiatedStreamId(0), data.length(), 0, 0, true,
NOT_RETRANSMISSION, &num_bytes_consumed);
EXPECT_EQ(4u, num_bytes_consumed);
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_FALSE(serialized_packet_->retransmittable_frames.empty());
const QuicFrames& retransmittable =
serialized_packet_->retransmittable_frames;
ASSERT_EQ(1u, retransmittable.size());
EXPECT_EQ(STREAM_FRAME, retransmittable[0].type);
DeleteSerializedPacket();
EXPECT_FALSE(creator_.HasPendingFrames());
}
TEST_P(QuicPacketCreatorTest, SerializeStreamFrameWithPadding) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
if (!GetParam().version_serialization) {
creator_.StopSendingVersion();
}
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
size_t num_bytes_consumed;
creator_.CreateAndSerializeStreamFrame(GetNthClientInitiatedStreamId(0), 0, 0,
0, true, NOT_RETRANSMISSION,
&num_bytes_consumed);
EXPECT_EQ(0u, num_bytes_consumed);
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_FALSE(serialized_packet_->retransmittable_frames.empty());
ASSERT_EQ(serialized_packet_->packet_number_length,
PACKET_1BYTE_PACKET_NUMBER);
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
if (client_framer_.version().HasHeaderProtection()) {
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
} else {
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
}
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
ProcessPacket(*serialized_packet_);
}
TEST_P(QuicPacketCreatorTest, AddUnencryptedStreamDataClosesConnection) {
if (!IsDefaultTestConfiguration()) {
return;
}
creator_.set_encryption_level(ENCRYPTION_INITIAL);
QuicStreamFrame stream_frame(GetNthClientInitiatedStreamId(0),
false, 0u, absl::string_view());
EXPECT_QUIC_BUG(
{
EXPECT_CALL(delegate_, OnUnrecoverableError(_, _));
creator_.AddFrame(QuicFrame(stream_frame), NOT_RETRANSMISSION);
},
"Cannot send stream data with level: ENCRYPTION_INITIAL");
}
TEST_P(QuicPacketCreatorTest, SendStreamDataWithEncryptionHandshake) {
if (!IsDefaultTestConfiguration()) {
return;
}
creator_.set_encryption_level(ENCRYPTION_HANDSHAKE);
QuicStreamFrame stream_frame(GetNthClientInitiatedStreamId(0),
false, 0u, absl::string_view());
EXPECT_QUIC_BUG(
{
EXPECT_CALL(delegate_, OnUnrecoverableError(_, _));
creator_.AddFrame(QuicFrame(stream_frame), NOT_RETRANSMISSION);
},
"Cannot send stream data with level: ENCRYPTION_HANDSHAKE");
}
TEST_P(QuicPacketCreatorTest, ChloTooLarge) {
if (!IsDefaultTestConfiguration()) {
return;
}
if (QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
return;
}
CryptoHandshakeMessage message;
message.set_tag(kCHLO);
message.set_minimum_size(kMaxOutgoingPacketSize);
CryptoFramer framer;
std::unique_ptr<QuicData> message_data;
message_data = framer.ConstructHandshakeMessage(message);
QuicFrame frame;
EXPECT_CALL(delegate_, OnUnrecoverableError(QUIC_CRYPTO_CHLO_TOO_LARGE, _));
EXPECT_QUIC_BUG(
creator_.ConsumeDataToFillCurrentPacket(
QuicUtils::GetCryptoStreamId(client_framer_.transport_version()),
absl::string_view(message_data->data(), message_data->length()), 0u,
false, false, NOT_RETRANSMISSION, &frame),
"Client hello won't fit in a single packet.");
}
TEST_P(QuicPacketCreatorTest, PendingPadding) {
EXPECT_EQ(0u, creator_.pending_padding_bytes());
creator_.AddPendingPadding(kMaxNumRandomPaddingBytes * 10);
EXPECT_EQ(kMaxNumRandomPaddingBytes * 10, creator_.pending_padding_bytes());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
while (creator_.pending_padding_bytes() > 0) {
creator_.FlushCurrentPacket();
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
ProcessPacket(*serialized_packet_);
}
EXPECT_EQ(0u, creator_.pending_padding_bytes());
}
TEST_P(QuicPacketCreatorTest, FullPaddingDoesNotConsumePendingPadding) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
creator_.AddPendingPadding(kMaxNumRandomPaddingBytes);
QuicFrame frame;
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
const std::string data("test");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false,
true, NOT_RETRANSMISSION, &frame));
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
EXPECT_EQ(kMaxNumRandomPaddingBytes, creator_.pending_padding_bytes());
}
TEST_P(QuicPacketCreatorTest, ConsumeDataAndRandomPadding) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const QuicByteCount kStreamFramePayloadSize = 100u;
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
size_t length =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
GetEncryptionOverhead() +
QuicFramer::GetMinStreamFrameSize(
client_framer_.transport_version(), stream_id, 0,
true, kStreamFramePayloadSize + 1) +
kStreamFramePayloadSize + 1;
creator_.SetMaxPacketLength(length);
creator_.AddPendingPadding(kMaxNumRandomPaddingBytes);
QuicByteCount pending_padding_bytes = creator_.pending_padding_bytes();
QuicFrame frame;
char buf[kStreamFramePayloadSize + 1] = {};
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.ConsumeDataToFillCurrentPacket(
stream_id, absl::string_view(buf, kStreamFramePayloadSize), 0u, false,
false, NOT_RETRANSMISSION, &frame);
creator_.FlushCurrentPacket();
EXPECT_EQ(pending_padding_bytes - 1, creator_.pending_padding_bytes());
creator_.ConsumeDataToFillCurrentPacket(
stream_id, absl::string_view(buf, kStreamFramePayloadSize + 1),
kStreamFramePayloadSize, false, false, NOT_RETRANSMISSION, &frame);
creator_.FlushCurrentPacket();
EXPECT_EQ(pending_padding_bytes - 1, creator_.pending_padding_bytes());
while (creator_.pending_padding_bytes() > 0) {
creator_.FlushCurrentPacket();
}
EXPECT_EQ(0u, creator_.pending_padding_bytes());
}
TEST_P(QuicPacketCreatorTest, FlushWithExternalBuffer) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
char* buffer = new char[kMaxOutgoingPacketSize];
QuicPacketBuffer external_buffer = {buffer,
[](const char* p) { delete[] p; }};
EXPECT_CALL(delegate_, GetPacketBuffer()).WillOnce(Return(external_buffer));
QuicFrame frame;
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
const std::string data("test");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false,
true, NOT_RETRANSMISSION, &frame));
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke([&external_buffer](SerializedPacket serialized_packet) {
EXPECT_EQ(external_buffer.buffer, serialized_packet.encrypted_buffer);
}));
creator_.FlushCurrentPacket();
}
TEST_P(QuicPacketCreatorTest, IetfAckGapErrorRegression) {
QuicAckFrame ack_frame =
InitAckFrame({{QuicPacketNumber(60), QuicPacketNumber(61)},
{QuicPacketNumber(125), QuicPacketNumber(126)}});
frames_.push_back(QuicFrame(&ack_frame));
SerializeAllFrames(frames_);
}
TEST_P(QuicPacketCreatorTest, AddMessageFrame) {
if (client_framer_.version().UsesTls()) {
creator_.SetMaxDatagramFrameSize(kMaxAcceptedDatagramFrameSize);
}
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.Times(3)
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorTest::ClearSerializedPacketForTests));
EXPECT_TRUE(creator_.HasRoomForMessageFrame(
creator_.GetCurrentLargestMessagePayload()));
std::string large_message(creator_.GetCurrentLargestMessagePayload(), 'a');
QuicMessageFrame* message_frame =
new QuicMessageFrame(1, MemSliceFromString(large_message));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(message_frame), NOT_RETRANSMISSION));
EXPECT_TRUE(creator_.HasPendingFrames());
creator_.FlushCurrentPacket();
QuicMessageFrame* frame2 =
new QuicMessageFrame(2, MemSliceFromString("message"));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(frame2), NOT_RETRANSMISSION));
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_EQ(1u, creator_.ExpansionOnNewFrame());
QuicMessageFrame* frame3 =
new QuicMessageFrame(3, MemSliceFromString("message2"));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(frame3), NOT_RETRANSMISSION));
EXPECT_EQ(1u, creator_.ExpansionOnNewFrame());
creator_.FlushCurrentPacket();
QuicFrame frame;
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
const std::string data("test");
EXPECT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false, false, NOT_RETRANSMISSION, &frame));
QuicMessageFrame* frame4 =
new QuicMessageFrame(4, MemSliceFromString("message"));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(frame4), NOT_RETRANSMISSION));
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasRoomForMessageFrame(
creator_.GetCurrentLargestMessagePayload()));
QuicMessageFrame frame5(5, MemSliceFromString(large_message));
EXPECT_FALSE(creator_.AddFrame(QuicFrame(&frame5), NOT_RETRANSMISSION));
EXPECT_FALSE(creator_.HasPendingFrames());
}
TEST_P(QuicPacketCreatorTest, MessageFrameConsumption) {
if (client_framer_.version().UsesTls()) {
creator_.SetMaxDatagramFrameSize(kMaxAcceptedDatagramFrameSize);
}
std::string message_data(kDefaultMaxPacketSize, 'a');
for (EncryptionLevel level :
{ENCRYPTION_ZERO_RTT, ENCRYPTION_FORWARD_SECURE}) {
creator_.set_encryption_level(level);
for (size_t message_size = 0;
message_size <= creator_.GetCurrentLargestMessagePayload();
++message_size) {
QuicMessageFrame* frame =
new QuicMessageFrame(0, MemSliceFromString(absl::string_view(
message_data.data(), message_size)));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(frame), NOT_RETRANSMISSION));
EXPECT_TRUE(creator_.HasPendingFrames());
size_t expansion_bytes = message_size >= 64 ? 2 : 1;
EXPECT_EQ(expansion_bytes, creator_.ExpansionOnNewFrame());
size_t expected_bytes_free =
creator_.GetCurrentLargestMessagePayload() - message_size <
expansion_bytes
? 0
: creator_.GetCurrentLargestMessagePayload() - expansion_bytes -
message_size;
EXPECT_EQ(expected_bytes_free, creator_.BytesFree());
EXPECT_LE(creator_.GetGuaranteedLargestMessagePayload(),
creator_.GetCurrentLargestMessagePayload());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
DeleteSerializedPacket();
}
}
}
TEST_P(QuicPacketCreatorTest, GetGuaranteedLargestMessagePayload) {
ParsedQuicVersion version = GetParam().version;
if (version.UsesTls()) {
creator_.SetMaxDatagramFrameSize(kMaxAcceptedDatagramFrameSize);
}
QuicPacketLength expected_largest_payload = 1215;
if (version.HasLongHeaderLengths()) {
expected_largest_payload -= 2;
}
if (version.HasLengthPrefixedConnectionIds()) {
expected_largest_payload -= 1;
}
EXPECT_EQ(expected_largest_payload,
creator_.GetGuaranteedLargestMessagePayload());
EXPECT_TRUE(creator_.HasRoomForMessageFrame(
creator_.GetGuaranteedLargestMessagePayload()));
creator_.SetMaxDatagramFrameSize(expected_largest_payload + 1 +
kQuicFrameTypeSize);
EXPECT_EQ(expected_largest_payload,
creator_.GetGuaranteedLargestMessagePayload());
EXPECT_TRUE(creator_.HasRoomForMessageFrame(
creator_.GetGuaranteedLargestMessagePayload()));
creator_.SetMaxDatagramFrameSize(expected_largest_payload +
kQuicFrameTypeSize);
EXPECT_EQ(expected_largest_payload,
creator_.GetGuaranteedLargestMessagePayload());
EXPECT_TRUE(creator_.HasRoomForMessageFrame(
creator_.GetGuaranteedLargestMessagePayload()));
creator_.SetMaxDatagramFrameSize(expected_largest_payload - 1 +
kQuicFrameTypeSize);
EXPECT_EQ(expected_largest_payload - 1,
creator_.GetGuaranteedLargestMessagePayload());
EXPECT_TRUE(creator_.HasRoomForMessageFrame(
creator_.GetGuaranteedLargestMessagePayload()));
constexpr QuicPacketLength kFrameSizeLimit = 1000;
constexpr QuicPacketLength kPayloadSizeLimit =
kFrameSizeLimit - kQuicFrameTypeSize;
creator_.SetMaxDatagramFrameSize(kFrameSizeLimit);
EXPECT_EQ(creator_.GetGuaranteedLargestMessagePayload(), kPayloadSizeLimit);
EXPECT_TRUE(creator_.HasRoomForMessageFrame(kPayloadSizeLimit));
EXPECT_FALSE(creator_.HasRoomForMessageFrame(kPayloadSizeLimit + 1));
}
TEST_P(QuicPacketCreatorTest, GetCurrentLargestMessagePayload) {
ParsedQuicVersion version = GetParam().version;
if (version.UsesTls()) {
creator_.SetMaxDatagramFrameSize(kMaxAcceptedDatagramFrameSize);
}
QuicPacketLength expected_largest_payload = 1215;
if (version.SendsVariableLengthPacketNumberInLongHeader()) {
expected_largest_payload += 3;
}
if (version.HasLongHeaderLengths()) {
expected_largest_payload -= 2;
}
if (version.HasLengthPrefixedConnectionIds()) {
expected_largest_payload -= 1;
}
EXPECT_EQ(expected_largest_payload,
creator_.GetCurrentLargestMessagePayload());
creator_.SetMaxDatagramFrameSize(expected_largest_payload + 1 +
kQuicFrameTypeSize);
EXPECT_EQ(expected_largest_payload,
creator_.GetCurrentLargestMessagePayload());
creator_.SetMaxDatagramFrameSize(expected_largest_payload +
kQuicFrameTypeSize);
EXPECT_EQ(expected_largest_payload,
creator_.GetCurrentLargestMessagePayload());
creator_.SetMaxDatagramFrameSize(expected_largest_payload - 1 +
kQuicFrameTypeSize);
EXPECT_EQ(expected_largest_payload - 1,
creator_.GetCurrentLargestMessagePayload());
}
TEST_P(QuicPacketCreatorTest, PacketTransmissionType) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicAckFrame temp_ack_frame = InitAckFrame(1);
QuicFrame ack_frame(&temp_ack_frame);
ASSERT_FALSE(QuicUtils::IsRetransmittableFrame(ack_frame.type));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
QuicFrame stream_frame(QuicStreamFrame(stream_id,
false, 0u,
absl::string_view()));
ASSERT_TRUE(QuicUtils::IsRetransmittableFrame(stream_frame.type));
QuicFrame stream_frame_2(QuicStreamFrame(stream_id,
false, 1u,
absl::string_view()));
QuicFrame padding_frame{QuicPaddingFrame()};
ASSERT_FALSE(QuicUtils::IsRetransmittableFrame(padding_frame.type));
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
EXPECT_TRUE(creator_.AddFrame(ack_frame, LOSS_RETRANSMISSION));
ASSERT_EQ(serialized_packet_, nullptr);
EXPECT_TRUE(creator_.AddFrame(stream_frame, PTO_RETRANSMISSION));
ASSERT_EQ(serialized_packet_, nullptr);
EXPECT_TRUE(creator_.AddFrame(stream_frame_2, PATH_RETRANSMISSION));
ASSERT_EQ(serialized_packet_, nullptr);
EXPECT_TRUE(creator_.AddFrame(padding_frame, PTO_RETRANSMISSION));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
EXPECT_EQ(serialized_packet_->transmission_type, PATH_RETRANSMISSION);
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest,
PacketBytesRetransmitted_AddFrame_Retransmission) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicAckFrame temp_ack_frame = InitAckFrame(1);
QuicFrame ack_frame(&temp_ack_frame);
EXPECT_TRUE(creator_.AddFrame(ack_frame, LOSS_RETRANSMISSION));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
QuicFrame stream_frame;
const std::string data("data");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false, false, PTO_RETRANSMISSION, &stream_frame));
EXPECT_EQ(4u, stream_frame.stream_frame.data_length);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_FALSE(serialized_packet_->bytes_not_retransmitted.has_value());
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest,
PacketBytesRetransmitted_AddFrame_NotRetransmission) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicAckFrame temp_ack_frame = InitAckFrame(1);
QuicFrame ack_frame(&temp_ack_frame);
EXPECT_TRUE(creator_.AddFrame(ack_frame, NOT_RETRANSMISSION));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
QuicFrame stream_frame;
const std::string data("data");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false, false, NOT_RETRANSMISSION, &stream_frame));
EXPECT_EQ(4u, stream_frame.stream_frame.data_length);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_FALSE(serialized_packet_->bytes_not_retransmitted.has_value());
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest, PacketBytesRetransmitted_AddFrame_MixedFrames) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicAckFrame temp_ack_frame = InitAckFrame(1);
QuicFrame ack_frame(&temp_ack_frame);
EXPECT_TRUE(creator_.AddFrame(ack_frame, NOT_RETRANSMISSION));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
QuicFrame stream_frame;
const std::string data("data");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false, false, NOT_RETRANSMISSION, &stream_frame));
EXPECT_EQ(4u, stream_frame.stream_frame.data_length);
QuicFrame stream_frame2;
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id, data, 0u, false, false, LOSS_RETRANSMISSION, &stream_frame2));
EXPECT_EQ(4u, stream_frame2.stream_frame.data_length);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_TRUE(serialized_packet_->bytes_not_retransmitted.has_value());
ASSERT_GE(serialized_packet_->bytes_not_retransmitted.value(), 4u);
DeleteSerializedPacket();
}
TEST_P(QuicPacketCreatorTest,
PacketBytesRetransmitted_CreateAndSerializeStreamFrame_Retransmission) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const std::string data("test");
producer_.SaveStreamData(GetNthClientInitiatedStreamId(0), data);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
size_t num_bytes_consumed;
creator_.CreateAndSerializeStreamFrame(
GetNthClientInitiatedStreamId(0), data.length(), 0, 0, true,
LOSS_RETRANSMISSION, &num_bytes_consumed);
EXPECT_EQ(4u, num_bytes_consumed);
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_FALSE(serialized_packet_->bytes_not_retransmitted.has_value());
DeleteSerializedPacket();
EXPECT_FALSE(creator_.HasPendingFrames());
}
TEST_P(
QuicPacketCreatorTest,
PacketBytesRetransmitted_CreateAndSerializeStreamFrame_NotRetransmission) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const std::string data("test");
producer_.SaveStreamData(GetNthClientInitiatedStreamId(0), data);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
size_t num_bytes_consumed;
creator_.CreateAndSerializeStreamFrame(
GetNthClientInitiatedStreamId(0), data.length(), 0, 0, true,
NOT_RETRANSMISSION, &num_bytes_consumed);
EXPECT_EQ(4u, num_bytes_consumed);
ASSERT_TRUE(serialized_packet_->encrypted_buffer);
ASSERT_FALSE(serialized_packet_->bytes_not_retransmitted.has_value());
DeleteSerializedPacket();
EXPECT_FALSE(creator_.HasPendingFrames());
}
TEST_P(QuicPacketCreatorTest, RetryToken) {
if (!GetParam().version_serialization ||
!QuicVersionHasLongHeaderLengths(client_framer_.transport_version())) {
return;
}
char retry_token_bytes[] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
creator_.SetRetryToken(
std::string(retry_token_bytes, sizeof(retry_token_bytes)));
frames_.push_back(QuicFrame(QuicPingFrame()));
SerializedPacket serialized = SerializeAllFrames(frames_);
QuicPacketHeader header;
{
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_))
.WillOnce(DoAll(SaveArg<0>(&header), Return(true)));
if (client_framer_.version().HasHeaderProtection()) {
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
}
EXPECT_CALL(framer_visitor_, OnPingFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
}
ProcessPacket(serialized);
ASSERT_TRUE(header.version_flag);
ASSERT_EQ(header.long_packet_type, INITIAL);
ASSERT_EQ(header.retry_token.length(), sizeof(retry_token_bytes));
quiche::test::CompareCharArraysWithHexError(
"retry token", header.retry_token.data(), header.retry_token.length(),
retry_token_bytes, sizeof(retry_token_bytes));
}
TEST_P(QuicPacketCreatorTest, GetConnectionId) {
EXPECT_EQ(TestConnectionId(2), creator_.GetDestinationConnectionId());
EXPECT_EQ(EmptyQuicConnectionId(), creator_.GetSourceConnectionId());
}
TEST_P(QuicPacketCreatorTest, ClientConnectionId) {
if (!client_framer_.version().SupportsClientConnectionIds()) {
return;
}
EXPECT_EQ(TestConnectionId(2), creator_.GetDestinationConnectionId());
EXPECT_EQ(EmptyQuicConnectionId(), creator_.GetSourceConnectionId());
creator_.SetClientConnectionId(TestConnectionId(0x33));
EXPECT_EQ(TestConnectionId(2), creator_.GetDestinationConnectionId());
EXPECT_EQ(TestConnectionId(0x33), creator_.GetSourceConnectionId());
}
TEST_P(QuicPacketCreatorTest, CoalesceStreamFrames) {
InSequence s;
if (!GetParam().version_serialization) {
creator_.StopSendingVersion();
}
const size_t max_plaintext_size =
client_framer_.GetMaxPlaintextSize(creator_.max_packet_length());
EXPECT_FALSE(creator_.HasPendingFrames());
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicStreamId stream_id1 = QuicUtils::GetFirstBidirectionalStreamId(
client_framer_.transport_version(), Perspective::IS_CLIENT);
QuicStreamId stream_id2 = GetNthClientInitiatedStreamId(1);
EXPECT_FALSE(creator_.HasPendingStreamFramesOfStream(stream_id1));
EXPECT_EQ(max_plaintext_size -
GetPacketHeaderSize(
client_framer_.transport_version(),
creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_),
0, QuicPacketCreatorPeer::GetLengthLength(&creator_)),
creator_.BytesFree());
StrictMock<MockDebugDelegate> debug;
creator_.set_debug_delegate(&debug);
QuicFrame frame;
const std::string data1("test");
EXPECT_CALL(debug, OnFrameAddedToPacket(_));
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id1, data1, 0u, false, false, NOT_RETRANSMISSION, &frame));
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingStreamFramesOfStream(stream_id1));
const std::string data2("coalesce");
const auto previous_size = creator_.PacketSize();
QuicStreamFrame target(stream_id1, true, 0, data1.length() + data2.length());
EXPECT_CALL(debug, OnStreamFrameCoalesced(target));
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id1, data2, 4u, true, false, NOT_RETRANSMISSION, &frame));
EXPECT_EQ(frame.stream_frame.data_length,
creator_.PacketSize() - previous_size);
const auto length = creator_.BytesFree() - 10u;
const std::string data3(length, 'x');
EXPECT_CALL(debug, OnFrameAddedToPacket(_));
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id2, data3, 0u, false, false, NOT_RETRANSMISSION, &frame));
EXPECT_TRUE(creator_.HasPendingStreamFramesOfStream(stream_id2));
EXPECT_CALL(debug, OnStreamFrameCoalesced(_));
const std::string data4("somerandomdata");
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
stream_id2, data4, length, false, false, NOT_RETRANSMISSION, &frame));
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
EXPECT_CALL(framer_visitor_, OnPacketComplete());
ProcessPacket(*serialized_packet_);
}
TEST_P(QuicPacketCreatorTest, SaveNonRetransmittableFrames) {
QuicAckFrame ack_frame(InitAckFrame(1));
frames_.push_back(QuicFrame(&ack_frame));
frames_.push_back(QuicFrame(QuicPaddingFrame(-1)));
SerializedPacket serialized = SerializeAllFrames(frames_);
ASSERT_EQ(2u, serialized.nonretransmittable_frames.size());
EXPECT_EQ(ACK_FRAME, serialized.nonretransmittable_frames[0].type);
EXPECT_EQ(PADDING_FRAME, serialized.nonretransmittable_frames[1].type);
EXPECT_LT(
0,
serialized.nonretransmittable_frames[1].padding_frame.num_padding_bytes);
frames_.clear();
SerializedPacket packet = QuicPacketCreatorPeer::SerializeAllFrames(
&creator_, serialized.nonretransmittable_frames, buffer_,
kMaxOutgoingPacketSize);
EXPECT_EQ(serialized.encrypted_length, packet.encrypted_length);
}
TEST_P(QuicPacketCreatorTest, SerializeCoalescedPacket) {
QuicCoalescedPacket coalesced;
quiche::SimpleBufferAllocator allocator;
QuicSocketAddress self_address(QuicIpAddress::Loopback4(), 1);
QuicSocketAddress peer_address(QuicIpAddress::Loopback4(), 2);
for (size_t i = ENCRYPTION_INITIAL; i < NUM_ENCRYPTION_LEVELS; ++i) {
EncryptionLevel level = static_cast<EncryptionLevel>(i);
creator_.set_encryption_level(level);
QuicAckFrame ack_frame(InitAckFrame(1));
if (level != ENCRYPTION_ZERO_RTT) {
frames_.push_back(QuicFrame(&ack_frame));
}
if (level != ENCRYPTION_INITIAL && level != ENCRYPTION_HANDSHAKE) {
frames_.push_back(
QuicFrame(QuicStreamFrame(1, false, 0u, absl::string_view())));
}
SerializedPacket serialized = SerializeAllFrames(frames_);
EXPECT_EQ(level, serialized.encryption_level);
frames_.clear();
ASSERT_TRUE(coalesced.MaybeCoalescePacket(
serialized, self_address, peer_address, &allocator,
creator_.max_packet_length(), ECN_NOT_ECT));
}
char buffer[kMaxOutgoingPacketSize];
size_t coalesced_length = creator_.SerializeCoalescedPacket(
coalesced, buffer, kMaxOutgoingPacketSize);
ASSERT_EQ(coalesced.max_packet_length(), coalesced_length);
if (!QuicVersionHasLongHeaderLengths(server_framer_.transport_version())) {
return;
}
std::unique_ptr<QuicEncryptedPacket> packets[NUM_ENCRYPTION_LEVELS];
packets[ENCRYPTION_INITIAL] =
std::make_unique<QuicEncryptedPacket>(buffer, coalesced_length);
for (size_t i = ENCRYPTION_INITIAL; i < NUM_ENCRYPTION_LEVELS; ++i) {
InSequence s;
EXPECT_CALL(framer_visitor_, OnPacket());
EXPECT_CALL(framer_visitor_, OnUnauthenticatedPublicHeader(_));
if (i < ENCRYPTION_FORWARD_SECURE) {
EXPECT_CALL(framer_visitor_, OnCoalescedPacket(_))
.WillOnce(Invoke([i, &packets](const QuicEncryptedPacket& packet) {
packets[i + 1] = packet.Clone();
}));
}
EXPECT_CALL(framer_visitor_, OnUnauthenticatedHeader(_));
EXPECT_CALL(framer_visitor_, OnDecryptedPacket(_, _));
EXPECT_CALL(framer_visitor_, OnPacketHeader(_));
if (i != ENCRYPTION_ZERO_RTT) {
if (i != ENCRYPTION_INITIAL) {
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_))
.Times(testing::AtMost(1));
}
EXPECT_CALL(framer_visitor_, OnAckFrameStart(_, _))
.WillOnce(Return(true));
EXPECT_CALL(framer_visitor_,
OnAckRange(QuicPacketNumber(1), QuicPacketNumber(2)))
.WillOnce(Return(true));
EXPECT_CALL(framer_visitor_, OnAckFrameEnd(_, _)).WillOnce(Return(true));
}
if (i == ENCRYPTION_INITIAL) {
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
}
if (i == ENCRYPTION_ZERO_RTT) {
EXPECT_CALL(framer_visitor_, OnPaddingFrame(_));
}
if (i != ENCRYPTION_INITIAL && i != ENCRYPTION_HANDSHAKE) {
EXPECT_CALL(framer_visitor_, OnStreamFrame(_));
}
EXPECT_CALL(framer_visitor_, OnPacketComplete());
server_framer_.ProcessPacket(*packets[i]);
}
}
TEST_P(QuicPacketCreatorTest, SoftMaxPacketLength) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicByteCount previous_max_packet_length = creator_.max_packet_length();
const size_t overhead =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
QuicPacketCreator::MinPlaintextPacketSize(
client_framer_.version(),
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_)) +
GetEncryptionOverhead();
creator_.SetSoftMaxPacketLength(overhead - 1);
EXPECT_EQ(previous_max_packet_length, creator_.max_packet_length());
creator_.SetSoftMaxPacketLength(overhead);
EXPECT_EQ(overhead, creator_.max_packet_length());
ASSERT_TRUE(creator_.HasRoomForStreamFrame(
GetNthClientInitiatedStreamId(1), kMaxIetfVarInt,
std::numeric_limits<uint32_t>::max()));
EXPECT_EQ(previous_max_packet_length, creator_.max_packet_length());
creator_.SetSoftMaxPacketLength(overhead);
if (client_framer_.version().UsesTls()) {
creator_.SetMaxDatagramFrameSize(kMaxAcceptedDatagramFrameSize);
}
EXPECT_LT(1u, creator_.GetCurrentLargestMessagePayload());
EXPECT_EQ(overhead, creator_.max_packet_length());
ASSERT_TRUE(creator_.HasRoomForMessageFrame(
creator_.GetCurrentLargestMessagePayload()));
EXPECT_EQ(previous_max_packet_length, creator_.max_packet_length());
creator_.SetSoftMaxPacketLength(overhead);
EXPECT_EQ(overhead, creator_.max_packet_length());
const std::string data = "crypto data";
QuicFrame frame;
if (!QuicVersionUsesCryptoFrames(client_framer_.transport_version())) {
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
QuicUtils::GetCryptoStreamId(client_framer_.transport_version()), data,
kOffset, false, true, NOT_RETRANSMISSION, &frame));
size_t bytes_consumed = frame.stream_frame.data_length;
EXPECT_LT(0u, bytes_consumed);
} else {
producer_.SaveCryptoData(ENCRYPTION_INITIAL, kOffset, data);
ASSERT_TRUE(creator_.ConsumeCryptoDataToFillCurrentPacket(
ENCRYPTION_INITIAL, data.length(), kOffset,
true, NOT_RETRANSMISSION, &frame));
size_t bytes_consumed = frame.crypto_frame->data_length;
EXPECT_LT(0u, bytes_consumed);
}
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
creator_.SetSoftMaxPacketLength(overhead);
EXPECT_EQ(overhead, creator_.max_packet_length());
QuicAckFrame ack_frame(InitAckFrame(10u));
EXPECT_TRUE(creator_.AddFrame(QuicFrame(&ack_frame), NOT_RETRANSMISSION));
EXPECT_TRUE(creator_.HasPendingFrames());
}
TEST_P(QuicPacketCreatorTest,
ChangingEncryptionLevelRemovesSoftMaxPacketLength) {
if (!client_framer_.version().CanSendCoalescedPackets()) {
return;
}
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const QuicByteCount previous_max_packet_length = creator_.max_packet_length();
const size_t min_acceptable_packet_size =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
QuicPacketCreator::MinPlaintextPacketSize(
client_framer_.version(),
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_)) +
GetEncryptionOverhead();
creator_.SetSoftMaxPacketLength(min_acceptable_packet_size);
EXPECT_EQ(creator_.max_packet_length(), min_acceptable_packet_size);
creator_.set_encryption_level(ENCRYPTION_HANDSHAKE);
QuicAckFrame ack_frame(InitAckFrame(1));
frames_.push_back(QuicFrame(&ack_frame));
SerializedPacket serialized = SerializeAllFrames(frames_);
EXPECT_EQ(serialized.encryption_level, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(creator_.max_packet_length(), previous_max_packet_length);
}
TEST_P(QuicPacketCreatorTest, MinPayloadLength) {
ParsedQuicVersion version = client_framer_.version();
for (QuicPacketNumberLength pn_length :
{PACKET_1BYTE_PACKET_NUMBER, PACKET_2BYTE_PACKET_NUMBER,
PACKET_3BYTE_PACKET_NUMBER, PACKET_4BYTE_PACKET_NUMBER}) {
if (!version.HasHeaderProtection()) {
EXPECT_EQ(creator_.MinPlaintextPacketSize(version, pn_length), 0);
} else {
EXPECT_EQ(creator_.MinPlaintextPacketSize(version, pn_length),
(version.UsesTls() ? 4 : 8) - pn_length);
}
}
}
TEST_P(QuicPacketCreatorTest, PadWhenAlmostMaxLength) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const size_t overhead =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
GetEncryptionOverhead() +
GetStreamFrameOverhead(client_framer_.transport_version());
size_t capacity = kDefaultMaxPacketSize - overhead;
for (size_t bytes_free = 1; bytes_free <= 2; bytes_free++) {
std::string data(capacity - bytes_free, 'A');
QuicFrame frame;
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
GetNthClientInitiatedStreamId(1), data, kOffset, false,
true, NOT_RETRANSMISSION, &frame));
EXPECT_EQ(2u, creator_.ExpansionOnNewFrame());
EXPECT_EQ(0u, creator_.BytesFree());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
EXPECT_EQ(serialized_packet_->encrypted_length, kDefaultMaxPacketSize);
DeleteSerializedPacket();
}
}
TEST_P(QuicPacketCreatorTest, MorePendingPaddingThanBytesFree) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
const size_t overhead =
GetPacketHeaderOverhead(client_framer_.transport_version()) +
GetEncryptionOverhead() +
GetStreamFrameOverhead(client_framer_.transport_version());
size_t capacity = kDefaultMaxPacketSize - overhead;
const size_t pending_padding = 10;
std::string data(capacity - pending_padding, 'A');
QuicFrame frame;
ASSERT_TRUE(creator_.ConsumeDataToFillCurrentPacket(
GetNthClientInitiatedStreamId(1), data, kOffset, false,
false, NOT_RETRANSMISSION, &frame));
creator_.AddPendingPadding(pending_padding);
EXPECT_EQ(2u, creator_.ExpansionOnNewFrame());
EXPECT_EQ(pending_padding - 2u, creator_.BytesFree());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke(this, &QuicPacketCreatorTest::SaveSerializedPacket));
creator_.FlushCurrentPacket();
EXPECT_EQ(serialized_packet_->encrypted_length, kDefaultMaxPacketSize);
DeleteSerializedPacket();
}
class MockDelegate : public QuicPacketCreator::DelegateInterface {
public:
MockDelegate() {}
MockDelegate(const MockDelegate&) = delete;
MockDelegate& operator=(const MockDelegate&) = delete;
~MockDelegate() override {}
MOCK_METHOD(bool, ShouldGeneratePacket,
(HasRetransmittableData retransmittable, IsHandshake handshake),
(override));
MOCK_METHOD(void, MaybeBundleOpportunistically,
(TransmissionType transmission_type), (override));
MOCK_METHOD(QuicByteCount, GetFlowControlSendWindowSize, (QuicStreamId),
(override));
MOCK_METHOD(QuicPacketBuffer, GetPacketBuffer, (), (override));
MOCK_METHOD(void, OnSerializedPacket, (SerializedPacket), (override));
MOCK_METHOD(void, OnUnrecoverableError, (QuicErrorCode, const std::string&),
(override));
MOCK_METHOD(SerializedPacketFate, GetSerializedPacketFate,
(bool, EncryptionLevel), (override));
void SetCanWriteAnything() {
EXPECT_CALL(*this, ShouldGeneratePacket(_, _)).WillRepeatedly(Return(true));
EXPECT_CALL(*this, ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA, _))
.WillRepeatedly(Return(true));
}
void SetCanNotWrite() {
EXPECT_CALL(*this, ShouldGeneratePacket(_, _))
.WillRepeatedly(Return(false));
EXPECT_CALL(*this, ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA, _))
.WillRepeatedly(Return(false));
}
void SetCanWriteOnlyNonRetransmittable() {
EXPECT_CALL(*this, ShouldGeneratePacket(_, _))
.WillRepeatedly(Return(false));
EXPECT_CALL(*this, ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA, _))
.WillRepeatedly(Return(true));
}
};
struct PacketContents {
PacketContents()
: num_ack_frames(0),
num_connection_close_frames(0),
num_goaway_frames(0),
num_rst_stream_frames(0),
num_stop_waiting_frames(0),
num_stream_frames(0),
num_crypto_frames(0),
num_ping_frames(0),
num_mtu_discovery_frames(0),
num_padding_frames(0) {}
size_t num_ack_frames;
size_t num_connection_close_frames;
size_t num_goaway_frames;
size_t num_rst_stream_frames;
size_t num_stop_waiting_frames;
size_t num_stream_frames;
size_t num_crypto_frames;
size_t num_ping_frames;
size_t num_mtu_discovery_frames;
size_t num_padding_frames;
};
class MultiplePacketsTestPacketCreator : public QuicPacketCreator {
public:
MultiplePacketsTestPacketCreator(
QuicConnectionId connection_id, QuicFramer* framer,
QuicRandom* random_generator,
QuicPacketCreator::DelegateInterface* delegate,
SimpleDataProducer* producer)
: QuicPacketCreator(connection_id, framer, random_generator, delegate),
ack_frame_(InitAckFrame(1)),
delegate_(static_cast<MockDelegate*>(delegate)),
producer_(producer) {}
bool ConsumeRetransmittableControlFrame(const QuicFrame& frame,
bool bundle_ack) {
QuicFrames frames;
if (bundle_ack) {
frames.push_back(QuicFrame(&ack_frame_));
}
EXPECT_CALL(*delegate_, MaybeBundleOpportunistically(_))
.WillOnce(Invoke([this, frames = std::move(frames)] {
FlushAckFrame(frames);
return QuicFrames();
}));
return QuicPacketCreator::ConsumeRetransmittableControlFrame(frame);
}
QuicConsumedData ConsumeDataFastPath(QuicStreamId id,
absl::string_view data) {
if (!data.empty()) {
producer_->SaveStreamData(id, data);
}
return QuicPacketCreator::ConsumeDataFastPath(id, data.length(),
0,
true, 0);
}
QuicConsumedData ConsumeData(QuicStreamId id, absl::string_view data,
QuicStreamOffset offset,
StreamSendingState state) {
if (!data.empty()) {
producer_->SaveStreamData(id, data);
}
EXPECT_CALL(*delegate_, MaybeBundleOpportunistically(_)).Times(1);
return QuicPacketCreator::ConsumeData(id, data.length(), offset, state);
}
MessageStatus AddMessageFrame(QuicMessageId message_id,
quiche::QuicheMemSlice message) {
if (!has_ack() && delegate_->ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA,
NOT_HANDSHAKE)) {
EXPECT_CALL(*delegate_, MaybeBundleOpportunistically(_)).Times(1);
}
return QuicPacketCreator::AddMessageFrame(message_id,
absl::MakeSpan(&message, 1));
}
size_t ConsumeCryptoData(EncryptionLevel level, absl::string_view data,
QuicStreamOffset offset) {
producer_->SaveCryptoData(level, offset, data);
EXPECT_CALL(*delegate_, MaybeBundleOpportunistically(_)).Times(1);
return QuicPacketCreator::ConsumeCryptoData(level, data.length(), offset);
}
QuicAckFrame ack_frame_;
MockDelegate* delegate_;
SimpleDataProducer* producer_;
};
class QuicPacketCreatorMultiplePacketsTest : public QuicTest {
public:
QuicPacketCreatorMultiplePacketsTest()
: framer_(AllSupportedVersions(), QuicTime::Zero(),
Perspective::IS_CLIENT, kQuicDefaultConnectionIdLength),
creator_(TestConnectionId(), &framer_, &random_creator_, &delegate_,
&producer_),
ack_frame_(InitAckFrame(1)) {
EXPECT_CALL(delegate_, GetPacketBuffer())
.WillRepeatedly(Return(QuicPacketBuffer()));
EXPECT_CALL(delegate_, GetSerializedPacketFate(_, _))
.WillRepeatedly(Return(SEND_TO_WRITER));
EXPECT_CALL(delegate_, GetFlowControlSendWindowSize(_))
.WillRepeatedly(Return(std::numeric_limits<QuicByteCount>::max()));
creator_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
framer_.set_data_producer(&producer_);
if (simple_framer_.framer()->version().KnowsWhichDecrypterToUse()) {
simple_framer_.framer()->InstallDecrypter(
ENCRYPTION_FORWARD_SECURE, std::make_unique<TaggingDecrypter>());
}
creator_.AttachPacketFlusher();
}
~QuicPacketCreatorMultiplePacketsTest() override {}
void SavePacket(SerializedPacket packet) {
QUICHE_DCHECK(packet.release_encrypted_buffer == nullptr);
packet.encrypted_buffer = CopyBuffer(packet);
packet.release_encrypted_buffer = [](const char* p) { delete[] p; };
packets_.push_back(std::move(packet));
}
protected:
QuicRstStreamFrame* CreateRstStreamFrame() {
return new QuicRstStreamFrame(1, 1, QUIC_STREAM_NO_ERROR, 0);
}
QuicGoAwayFrame* CreateGoAwayFrame() {
return new QuicGoAwayFrame(2, QUIC_NO_ERROR, 1, std::string());
}
void CheckPacketContains(const PacketContents& contents,
size_t packet_index) {
ASSERT_GT(packets_.size(), packet_index);
const SerializedPacket& packet = packets_[packet_index];
size_t num_retransmittable_frames =
contents.num_connection_close_frames + contents.num_goaway_frames +
contents.num_rst_stream_frames + contents.num_stream_frames +
contents.num_crypto_frames + contents.num_ping_frames;
size_t num_frames =
contents.num_ack_frames + contents.num_stop_waiting_frames +
contents.num_mtu_discovery_frames + contents.num_padding_frames +
num_retransmittable_frames;
if (num_retransmittable_frames == 0) {
ASSERT_TRUE(packet.retransmittable_frames.empty());
} else {
EXPECT_EQ(num_retransmittable_frames,
packet.retransmittable_frames.size());
}
ASSERT_TRUE(packet.encrypted_buffer != nullptr);
ASSERT_TRUE(simple_framer_.ProcessPacket(
QuicEncryptedPacket(packet.encrypted_buffer, packet.encrypted_length)));
size_t num_padding_frames = 0;
if (contents.num_padding_frames == 0) {
num_padding_frames = simple_framer_.padding_frames().size();
}
EXPECT_EQ(num_frames + num_padding_frames, simple_framer_.num_frames());
EXPECT_EQ(contents.num_ack_frames, simple_framer_.ack_frames().size());
EXPECT_EQ(contents.num_connection_close_frames,
simple_framer_.connection_close_frames().size());
EXPECT_EQ(contents.num_goaway_frames,
simple_framer_.goaway_frames().size());
EXPECT_EQ(contents.num_rst_stream_frames,
simple_framer_.rst_stream_frames().size());
EXPECT_EQ(contents.num_stream_frames,
simple_framer_.stream_frames().size());
EXPECT_EQ(contents.num_crypto_frames,
simple_framer_.crypto_frames().size());
EXPECT_EQ(contents.num_stop_waiting_frames,
simple_framer_.stop_waiting_frames().size());
if (contents.num_padding_frames != 0) {
EXPECT_EQ(contents.num_padding_frames,
simple_framer_.padding_frames().size());
}
EXPECT_EQ(contents.num_ping_frames + contents.num_mtu_discovery_frames,
simple_framer_.ping_frames().size());
}
void CheckPacketHasSingleStreamFrame(size_t packet_index) {
ASSERT_GT(packets_.size(), packet_index);
const SerializedPacket& packet = packets_[packet_index];
ASSERT_FALSE(packet.retransmittable_frames.empty());
EXPECT_EQ(1u, packet.retransmittable_frames.size());
ASSERT_TRUE(packet.encrypted_buffer != nullptr);
ASSERT_TRUE(simple_framer_.ProcessPacket(
QuicEncryptedPacket(packet.encrypted_buffer, packet.encrypted_length)));
EXPECT_EQ(1u, simple_framer_.num_frames());
EXPECT_EQ(1u, simple_framer_.stream_frames().size());
}
void CheckAllPacketsHaveSingleStreamFrame() {
for (size_t i = 0; i < packets_.size(); i++) {
CheckPacketHasSingleStreamFrame(i);
}
}
QuicFramer framer_;
MockRandom random_creator_;
StrictMock<MockDelegate> delegate_;
MultiplePacketsTestPacketCreator creator_;
SimpleQuicFramer simple_framer_;
std::vector<SerializedPacket> packets_;
QuicAckFrame ack_frame_;
struct iovec iov_;
quiche::SimpleBufferAllocator allocator_;
private:
std::unique_ptr<char[]> data_array_;
SimpleDataProducer producer_;
};
TEST_F(QuicPacketCreatorMultiplePacketsTest, AddControlFrame_NotWritable) {
delegate_.SetCanNotWrite();
QuicRstStreamFrame* rst_frame = CreateRstStreamFrame();
const bool consumed =
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
false);
EXPECT_FALSE(consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
delete rst_frame;
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
WrongEncryptionLevelForStreamDataFastPath) {
creator_.set_encryption_level(ENCRYPTION_HANDSHAKE);
delegate_.SetCanWriteAnything();
const std::string data(10000, '?');
EXPECT_CALL(delegate_, OnSerializedPacket(_)).Times(0);
EXPECT_QUIC_BUG(
{
EXPECT_CALL(delegate_, OnUnrecoverableError(_, _));
creator_.ConsumeDataFastPath(
QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT),
data);
},
"");
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, AddControlFrame_OnlyAckWritable) {
delegate_.SetCanWriteOnlyNonRetransmittable();
QuicRstStreamFrame* rst_frame = CreateRstStreamFrame();
const bool consumed =
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
false);
EXPECT_FALSE(consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
delete rst_frame;
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
AddControlFrame_WritableAndShouldNotFlush) {
delegate_.SetCanWriteAnything();
creator_.ConsumeRetransmittableControlFrame(QuicFrame(CreateRstStreamFrame()),
false);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
AddControlFrame_NotWritableBatchThenFlush) {
delegate_.SetCanNotWrite();
QuicRstStreamFrame* rst_frame = CreateRstStreamFrame();
const bool consumed =
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
false);
EXPECT_FALSE(consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
delete rst_frame;
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
AddControlFrame_WritableAndShouldFlush) {
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.ConsumeRetransmittableControlFrame(QuicFrame(CreateRstStreamFrame()),
false);
creator_.Flush();
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_rst_stream_frames = 1;
CheckPacketContains(contents, 0);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeCryptoData) {
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
std::string data = "crypto data";
size_t consumed_bytes =
creator_.ConsumeCryptoData(ENCRYPTION_INITIAL, data, 0);
creator_.Flush();
EXPECT_EQ(data.length(), consumed_bytes);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_crypto_frames = 1;
contents.num_padding_frames = 1;
CheckPacketContains(contents, 0);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeCryptoDataCheckShouldGeneratePacket) {
delegate_.SetCanNotWrite();
EXPECT_CALL(delegate_, OnSerializedPacket(_)).Times(0);
std::string data = "crypto data";
size_t consumed_bytes =
creator_.ConsumeCryptoData(ENCRYPTION_INITIAL, data, 0);
creator_.Flush();
EXPECT_EQ(0u, consumed_bytes);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeDataAdjustWriteLengthAfterBundledData) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
creator_.SetTransmissionType(NOT_RETRANSMISSION);
delegate_.SetCanWriteAnything();
const std::string data(1000, 'D');
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT);
EXPECT_CALL(delegate_, GetFlowControlSendWindowSize(stream_id))
.WillOnce(Return(data.length() - 1));
QuicConsumedData consumed = creator_.ConsumeData(stream_id, data, 0u, FIN);
EXPECT_EQ(consumed.bytes_consumed, data.length() - 1);
EXPECT_FALSE(consumed.fin_consumed);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeDataDoesNotAdjustWriteLengthAfterBundledData) {
creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
creator_.SetTransmissionType(NOT_RETRANSMISSION);
delegate_.SetCanWriteAnything();
const std::string data(1000, 'D');
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT);
EXPECT_CALL(delegate_, GetFlowControlSendWindowSize(stream_id))
.WillOnce(Return(data.length()));
QuicConsumedData consumed = creator_.ConsumeData(stream_id, data, 0u, FIN);
EXPECT_EQ(consumed.bytes_consumed, data.length());
EXPECT_TRUE(consumed.fin_consumed);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeData_NotWritable) {
delegate_.SetCanNotWrite();
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, FIN);
EXPECT_EQ(0u, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeData_WritableAndShouldNotFlush) {
delegate_.SetCanWriteAnything();
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, FIN);
EXPECT_EQ(3u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeData_WritableAndShouldFlush) {
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, FIN);
creator_.Flush();
EXPECT_EQ(3u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeData_Handshake) {
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
const std::string data = "foo bar";
size_t consumed_bytes = 0;
if (QuicVersionUsesCryptoFrames(framer_.transport_version())) {
consumed_bytes = creator_.ConsumeCryptoData(ENCRYPTION_INITIAL, data, 0);
} else {
consumed_bytes =
creator_
.ConsumeData(
QuicUtils::GetCryptoStreamId(framer_.transport_version()), data,
0, NO_FIN)
.bytes_consumed;
}
EXPECT_EQ(7u, consumed_bytes);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
if (QuicVersionUsesCryptoFrames(framer_.transport_version())) {
contents.num_crypto_frames = 1;
} else {
contents.num_stream_frames = 1;
}
contents.num_padding_frames = 1;
CheckPacketContains(contents, 0);
ASSERT_EQ(1u, packets_.size());
ASSERT_EQ(kDefaultMaxPacketSize, creator_.max_packet_length());
EXPECT_EQ(kDefaultMaxPacketSize, packets_[0].encrypted_length);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeData_Handshake_PaddingDisabled) {
creator_.set_fully_pad_crypto_handshake_packets(false);
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
const std::string data = "foo";
size_t bytes_consumed = 0;
if (QuicVersionUsesCryptoFrames(framer_.transport_version())) {
bytes_consumed = creator_.ConsumeCryptoData(ENCRYPTION_INITIAL, data, 0);
} else {
bytes_consumed =
creator_
.ConsumeData(
QuicUtils::GetCryptoStreamId(framer_.transport_version()), data,
0, NO_FIN)
.bytes_consumed;
}
EXPECT_EQ(3u, bytes_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
if (QuicVersionUsesCryptoFrames(framer_.transport_version())) {
contents.num_crypto_frames = 1;
} else {
contents.num_stream_frames = 1;
}
contents.num_padding_frames = 0;
CheckPacketContains(contents, 0);
ASSERT_EQ(1u, packets_.size());
ASSERT_EQ(kDefaultMaxPacketSize, creator_.max_packet_length());
size_t expected_packet_length = 31;
if (QuicVersionUsesCryptoFrames(framer_.transport_version())) {
expected_packet_length = 32;
}
EXPECT_EQ(expected_packet_length, packets_[0].encrypted_length);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeData_EmptyData) {
delegate_.SetCanWriteAnything();
EXPECT_QUIC_BUG(creator_.ConsumeData(
QuicUtils::QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT),
{}, 0, NO_FIN),
"Attempt to consume empty data without FIN.");
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeDataMultipleTimes_WritableAndShouldNotFlush) {
delegate_.SetCanWriteAnything();
creator_.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT),
"foo", 0, FIN);
QuicConsumedData consumed = creator_.ConsumeData(3, "quux", 3, NO_FIN);
EXPECT_EQ(4u, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeData_BatchOperations) {
delegate_.SetCanWriteAnything();
creator_.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT),
"foo", 0, NO_FIN);
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
"quux", 3, FIN);
EXPECT_EQ(4u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.Flush();
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConsumeData_FramesPreviouslyQueued) {
size_t length =
TaggingEncrypter(0x00).GetCiphertextSize(0) +
GetPacketHeaderSize(
framer_.transport_version(),
creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_), 0,
QuicPacketCreatorPeer::GetLengthLength(&creator_)) +
QuicFramer::GetMinStreamFrameSize(framer_.transport_version(), 1, 0,
false, 3) +
3 +
QuicFramer::GetMinStreamFrameSize(framer_.transport_version(), 1, 0, true,
1) +
1;
creator_.SetMaxPacketLength(length);
delegate_.SetCanWriteAnything();
{
InSequence dummy;
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
}
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, NO_FIN);
EXPECT_EQ(3u, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
"bar", 3, FIN);
EXPECT_EQ(3u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
creator_.FlushCurrentPacket();
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
CheckPacketContains(contents, 1);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeDataFastPath) {
delegate_.SetCanWriteAnything();
creator_.SetTransmissionType(LOSS_RETRANSMISSION);
const std::string data(10000, '?');
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicConsumedData consumed = creator_.ConsumeDataFastPath(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data);
EXPECT_EQ(10000u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
EXPECT_FALSE(packets_.empty());
SerializedPacket& packet = packets_.back();
EXPECT_TRUE(!packet.retransmittable_frames.empty());
EXPECT_EQ(LOSS_RETRANSMISSION, packet.transmission_type);
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
const QuicStreamFrame& stream_frame =
packet.retransmittable_frames.front().stream_frame;
EXPECT_EQ(10000u, stream_frame.data_length + stream_frame.offset);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeDataLarge) {
delegate_.SetCanWriteAnything();
const std::string data(10000, '?');
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data, 0, FIN);
EXPECT_EQ(10000u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
EXPECT_FALSE(packets_.empty());
SerializedPacket& packet = packets_.back();
EXPECT_TRUE(!packet.retransmittable_frames.empty());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
const QuicStreamFrame& stream_frame =
packet.retransmittable_frames.front().stream_frame;
EXPECT_EQ(10000u, stream_frame.data_length + stream_frame.offset);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeDataLargeSendAckFalse) {
delegate_.SetCanNotWrite();
QuicRstStreamFrame* rst_frame = CreateRstStreamFrame();
const bool success =
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
true);
EXPECT_FALSE(success);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
delegate_.SetCanWriteAnything();
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
false);
const std::string data(10000, '?');
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.ConsumeRetransmittableControlFrame(QuicFrame(CreateRstStreamFrame()),
true);
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data, 0, FIN);
creator_.Flush();
EXPECT_EQ(10000u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_FALSE(packets_.empty());
SerializedPacket& packet = packets_.back();
EXPECT_TRUE(!packet.retransmittable_frames.empty());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
const QuicStreamFrame& stream_frame =
packet.retransmittable_frames.front().stream_frame;
EXPECT_EQ(10000u, stream_frame.data_length + stream_frame.offset);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConsumeDataLargeSendAckTrue) {
delegate_.SetCanNotWrite();
delegate_.SetCanWriteAnything();
const std::string data(10000, '?');
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data, 0, FIN);
creator_.Flush();
EXPECT_EQ(10000u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_FALSE(packets_.empty());
SerializedPacket& packet = packets_.back();
EXPECT_TRUE(!packet.retransmittable_frames.empty());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
const QuicStreamFrame& stream_frame =
packet.retransmittable_frames.front().stream_frame;
EXPECT_EQ(10000u, stream_frame.data_length + stream_frame.offset);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, NotWritableThenBatchOperations) {
delegate_.SetCanNotWrite();
QuicRstStreamFrame* rst_frame = CreateRstStreamFrame();
const bool consumed =
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
true);
EXPECT_FALSE(consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_FALSE(creator_.HasPendingStreamFramesOfStream(3));
delegate_.SetCanWriteAnything();
EXPECT_TRUE(
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
false));
creator_.ConsumeData(3, "quux", 0, NO_FIN);
if (!VersionHasIetfQuicFrames(framer_.transport_version())) {
creator_.ConsumeRetransmittableControlFrame(QuicFrame(CreateGoAwayFrame()),
false);
}
EXPECT_TRUE(creator_.HasPendingStreamFramesOfStream(3));
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.Flush();
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_FALSE(creator_.HasPendingStreamFramesOfStream(3));
PacketContents contents;
contents.num_ack_frames = 0;
if (!VersionHasIetfQuicFrames(framer_.transport_version())) {
contents.num_goaway_frames = 1;
} else {
contents.num_goaway_frames = 0;
}
contents.num_rst_stream_frames = 1;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, NotWritableThenBatchOperations2) {
delegate_.SetCanNotWrite();
QuicRstStreamFrame* rst_frame = CreateRstStreamFrame();
const bool success =
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
true);
EXPECT_FALSE(success);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
delegate_.SetCanWriteAnything();
{
InSequence dummy;
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
}
EXPECT_TRUE(
creator_.ConsumeRetransmittableControlFrame(QuicFrame(rst_frame),
false));
size_t data_len = kDefaultMaxPacketSize + 100;
const std::string data(data_len, '?');
QuicConsumedData consumed = creator_.ConsumeData(3, data, 0, FIN);
EXPECT_EQ(data_len, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
if (!VersionHasIetfQuicFrames(framer_.transport_version())) {
creator_.ConsumeRetransmittableControlFrame(QuicFrame(CreateGoAwayFrame()),
false);
}
creator_.Flush();
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
PacketContents contents;
contents.num_ack_frames = 0;
contents.num_rst_stream_frames = 1;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
PacketContents contents2;
if (!VersionHasIetfQuicFrames(framer_.transport_version())) {
contents2.num_goaway_frames = 1;
} else {
contents2.num_goaway_frames = 0;
}
contents2.num_stream_frames = 1;
CheckPacketContains(contents2, 1);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, PacketTransmissionType) {
delegate_.SetCanWriteAnything();
creator_.SetTransmissionType(LOSS_RETRANSMISSION);
size_t data_len = 1220;
const std::string data(data_len, '?');
QuicStreamId stream1_id = QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT);
QuicConsumedData consumed = creator_.ConsumeData(stream1_id, data, 0, NO_FIN);
EXPECT_EQ(data_len, consumed.bytes_consumed);
ASSERT_EQ(0u, creator_.BytesFree())
<< "Test setup failed: Please increase data_len to "
<< data_len + creator_.BytesFree() << " bytes.";
creator_.SetTransmissionType(NOT_RETRANSMISSION);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicStreamId stream2_id = stream1_id + 4;
consumed = creator_.ConsumeData(stream2_id, data, 0, NO_FIN);
EXPECT_EQ(data_len, consumed.bytes_consumed);
ASSERT_EQ(1u, packets_.size());
ASSERT_TRUE(packets_[0].encrypted_buffer);
ASSERT_EQ(1u, packets_[0].retransmittable_frames.size());
EXPECT_EQ(stream1_id,
packets_[0].retransmittable_frames[0].stream_frame.stream_id);
EXPECT_EQ(packets_[0].transmission_type, LOSS_RETRANSMISSION);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, TestConnectionIdLength) {
QuicFramerPeer::SetPerspective(&framer_, Perspective::IS_SERVER);
creator_.SetServerConnectionIdLength(0);
EXPECT_EQ(0, creator_.GetDestinationConnectionIdLength());
for (size_t i = 1; i < 10; i++) {
creator_.SetServerConnectionIdLength(i);
EXPECT_EQ(0, creator_.GetDestinationConnectionIdLength());
}
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, SetMaxPacketLength_Initial) {
delegate_.SetCanWriteAnything();
size_t data_len = 3 * kDefaultMaxPacketSize + 1;
size_t packet_len = kDefaultMaxPacketSize + 100;
ASSERT_LE(packet_len, kMaxOutgoingPacketSize);
creator_.SetMaxPacketLength(packet_len);
EXPECT_EQ(packet_len, creator_.max_packet_length());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.Times(3)
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
const std::string data(data_len, '?');
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data,
0, FIN);
EXPECT_EQ(data_len, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
ASSERT_EQ(3u, packets_.size());
EXPECT_EQ(packet_len, packets_[0].encrypted_length);
EXPECT_EQ(packet_len, packets_[1].encrypted_length);
CheckAllPacketsHaveSingleStreamFrame();
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, SetMaxPacketLength_Middle) {
delegate_.SetCanWriteAnything();
size_t data_len = kDefaultMaxPacketSize;
size_t packet_len = kDefaultMaxPacketSize + 100;
ASSERT_LE(packet_len, kMaxOutgoingPacketSize);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.Times(3)
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
const std::string data(data_len, '?');
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data,
0, NO_FIN);
creator_.Flush();
EXPECT_EQ(data_len, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
ASSERT_EQ(2u, packets_.size());
creator_.SetMaxPacketLength(packet_len);
EXPECT_EQ(packet_len, creator_.max_packet_length());
creator_.AttachPacketFlusher();
consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data, data_len, FIN);
creator_.Flush();
EXPECT_EQ(data_len, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
ASSERT_EQ(3u, packets_.size());
EXPECT_EQ(kDefaultMaxPacketSize, packets_[0].encrypted_length);
EXPECT_LE(kDefaultMaxPacketSize, packets_[2].encrypted_length);
CheckAllPacketsHaveSingleStreamFrame();
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
SetMaxPacketLength_MidpacketFlush) {
delegate_.SetCanWriteAnything();
size_t first_write_len = kDefaultMaxPacketSize / 2;
size_t packet_len = kDefaultMaxPacketSize + 100;
size_t second_write_len = packet_len + 1;
ASSERT_LE(packet_len, kMaxOutgoingPacketSize);
const std::string first_write(first_write_len, '?');
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
first_write,
0, NO_FIN);
EXPECT_EQ(first_write_len, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
ASSERT_EQ(0u, packets_.size());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.FlushCurrentPacket();
creator_.SetMaxPacketLength(packet_len);
EXPECT_EQ(packet_len, creator_.max_packet_length());
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
const std::string second_write(second_write_len, '?');
consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
second_write,
first_write_len, FIN);
EXPECT_EQ(second_write_len, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
ASSERT_EQ(2u, packets_.size());
EXPECT_GT(kDefaultMaxPacketSize, packets_[0].encrypted_length);
EXPECT_EQ(packet_len, packets_[1].encrypted_length);
CheckAllPacketsHaveSingleStreamFrame();
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
GenerateConnectivityProbingPacket) {
delegate_.SetCanWriteAnything();
std::unique_ptr<SerializedPacket> probing_packet;
if (VersionHasIetfQuicFrames(framer_.transport_version())) {
QuicPathFrameBuffer payload = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xfe}};
probing_packet =
creator_.SerializePathChallengeConnectivityProbingPacket(payload);
} else {
probing_packet = creator_.SerializeConnectivityProbingPacket();
}
ASSERT_TRUE(simple_framer_.ProcessPacket(QuicEncryptedPacket(
probing_packet->encrypted_buffer, probing_packet->encrypted_length)));
EXPECT_EQ(2u, simple_framer_.num_frames());
if (VersionHasIetfQuicFrames(framer_.transport_version())) {
EXPECT_EQ(1u, simple_framer_.path_challenge_frames().size());
} else {
EXPECT_EQ(1u, simple_framer_.ping_frames().size());
}
EXPECT_EQ(1u, simple_framer_.padding_frames().size());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
GenerateMtuDiscoveryPacket_Simple) {
delegate_.SetCanWriteAnything();
const size_t target_mtu = kDefaultMaxPacketSize + 100;
static_assert(target_mtu < kMaxOutgoingPacketSize,
"The MTU probe used by the test exceeds maximum packet size");
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.GenerateMtuDiscoveryPacket(target_mtu);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
ASSERT_EQ(1u, packets_.size());
EXPECT_EQ(target_mtu, packets_[0].encrypted_length);
PacketContents contents;
contents.num_mtu_discovery_frames = 1;
contents.num_padding_frames = 1;
CheckPacketContains(contents, 0);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
GenerateMtuDiscoveryPacket_SurroundedByData) {
delegate_.SetCanWriteAnything();
const size_t target_mtu = kDefaultMaxPacketSize + 100;
static_assert(target_mtu < kMaxOutgoingPacketSize,
"The MTU probe used by the test exceeds maximum packet size");
const size_t data_len = target_mtu + 1;
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.Times(5)
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
const std::string data(data_len, '?');
QuicConsumedData consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data,
0, NO_FIN);
creator_.Flush();
EXPECT_EQ(data_len, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
creator_.GenerateMtuDiscoveryPacket(target_mtu);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
creator_.AttachPacketFlusher();
consumed = creator_.ConsumeData(
QuicUtils::GetFirstBidirectionalStreamId(framer_.transport_version(),
Perspective::IS_CLIENT),
data,
data_len, FIN);
creator_.Flush();
EXPECT_EQ(data_len, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
ASSERT_EQ(5u, packets_.size());
EXPECT_EQ(kDefaultMaxPacketSize, packets_[0].encrypted_length);
EXPECT_EQ(target_mtu, packets_[2].encrypted_length);
EXPECT_EQ(kDefaultMaxPacketSize, packets_[3].encrypted_length);
PacketContents probe_contents;
probe_contents.num_mtu_discovery_frames = 1;
probe_contents.num_padding_frames = 1;
CheckPacketHasSingleStreamFrame(0);
CheckPacketHasSingleStreamFrame(1);
CheckPacketContains(probe_contents, 2);
CheckPacketHasSingleStreamFrame(3);
CheckPacketHasSingleStreamFrame(4);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
ConnectionCloseFrameLargerThanPacketSize) {
delegate_.SetCanWriteAnything();
char buf[2000] = {};
absl::string_view error_details(buf, 2000);
const QuicErrorCode kQuicErrorCode = QUIC_PACKET_WRITE_ERROR;
QuicConnectionCloseFrame* frame = new QuicConnectionCloseFrame(
framer_.transport_version(), kQuicErrorCode, NO_IETF_QUIC_ERROR,
std::string(error_details),
0);
creator_.ConsumeRetransmittableControlFrame(QuicFrame(frame),
false);
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
RandomPaddingAfterFinSingleStreamSinglePacket) {
const QuicByteCount kStreamFramePayloadSize = 100u;
char buf[kStreamFramePayloadSize] = {};
const QuicStreamId kDataStreamId = 5;
size_t length =
TaggingEncrypter(0x00).GetCiphertextSize(0) +
GetPacketHeaderSize(
framer_.transport_version(),
creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_), 0,
QuicPacketCreatorPeer::GetLengthLength(&creator_)) +
QuicFramer::GetMinStreamFrameSize(
framer_.transport_version(), kDataStreamId, 0,
false,
kStreamFramePayloadSize + kMaxNumRandomPaddingBytes) +
kStreamFramePayloadSize + kMaxNumRandomPaddingBytes;
creator_.SetMaxPacketLength(length);
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicConsumedData consumed = creator_.ConsumeData(
kDataStreamId, absl::string_view(buf, kStreamFramePayloadSize), 0,
FIN_AND_PADDING);
creator_.Flush();
EXPECT_EQ(kStreamFramePayloadSize, consumed.bytes_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_EQ(1u, packets_.size());
PacketContents contents;
contents.num_padding_frames = 1;
contents.num_stream_frames = 1;
CheckPacketContains(contents, 0);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
RandomPaddingAfterFinSingleStreamMultiplePackets) {
const QuicByteCount kStreamFramePayloadSize = 100u;
char buf[kStreamFramePayloadSize] = {};
const QuicStreamId kDataStreamId = 5;
size_t length =
TaggingEncrypter(0x00).GetCiphertextSize(0) +
GetPacketHeaderSize(
framer_.transport_version(),
creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_), 0,
QuicPacketCreatorPeer::GetLengthLength(&creator_)) +
QuicFramer::GetMinStreamFrameSize(
framer_.transport_version(), kDataStreamId, 0,
false, kStreamFramePayloadSize + 1) +
kStreamFramePayloadSize + 1;
creator_.SetMaxPacketLength(length);
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicConsumedData consumed = creator_.ConsumeData(
kDataStreamId, absl::string_view(buf, kStreamFramePayloadSize), 0,
FIN_AND_PADDING);
creator_.Flush();
EXPECT_EQ(kStreamFramePayloadSize, consumed.bytes_consumed);
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_LE(1u, packets_.size());
PacketContents contents;
contents.num_stream_frames = 1;
contents.num_padding_frames = 1;
CheckPacketContains(contents, 0);
for (size_t i = 1; i < packets_.size(); ++i) {
contents.num_stream_frames = 0;
contents.num_padding_frames = 1;
CheckPacketContains(contents, i);
}
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
RandomPaddingAfterFinMultipleStreamsMultiplePackets) {
const QuicByteCount kStreamFramePayloadSize = 100u;
char buf[kStreamFramePayloadSize] = {};
const QuicStreamId kDataStreamId1 = 5;
const QuicStreamId kDataStreamId2 = 6;
size_t length =
TaggingEncrypter(0x00).GetCiphertextSize(0) +
GetPacketHeaderSize(
framer_.transport_version(),
creator_.GetDestinationConnectionIdLength(),
creator_.GetSourceConnectionIdLength(),
QuicPacketCreatorPeer::SendVersionInPacket(&creator_),
!kIncludeDiversificationNonce,
QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
QuicPacketCreatorPeer::GetRetryTokenLengthLength(&creator_), 0,
QuicPacketCreatorPeer::GetLengthLength(&creator_)) +
QuicFramer::GetMinStreamFrameSize(
framer_.transport_version(), kDataStreamId1, 0,
false, kStreamFramePayloadSize) +
kStreamFramePayloadSize +
QuicFramer::GetMinStreamFrameSize(framer_.transport_version(),
kDataStreamId1, 0,
false, 1) +
1;
creator_.SetMaxPacketLength(length);
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillRepeatedly(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
QuicConsumedData consumed = creator_.ConsumeData(
kDataStreamId1, absl::string_view(buf, kStreamFramePayloadSize), 0,
FIN_AND_PADDING);
EXPECT_EQ(kStreamFramePayloadSize, consumed.bytes_consumed);
consumed = creator_.ConsumeData(
kDataStreamId2, absl::string_view(buf, kStreamFramePayloadSize), 0,
FIN_AND_PADDING);
EXPECT_EQ(kStreamFramePayloadSize, consumed.bytes_consumed);
creator_.Flush();
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_FALSE(creator_.HasPendingRetransmittableFrames());
EXPECT_LE(2u, packets_.size());
PacketContents contents;
contents.num_stream_frames = 2;
CheckPacketContains(contents, 0);
contents.num_stream_frames = 1;
contents.num_padding_frames = 1;
CheckPacketContains(contents, 1);
for (size_t i = 2; i < packets_.size(); ++i) {
contents.num_stream_frames = 0;
contents.num_padding_frames = 1;
CheckPacketContains(contents, i);
}
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, AddMessageFrame) {
if (framer_.version().UsesTls()) {
creator_.SetMaxDatagramFrameSize(kMaxAcceptedDatagramFrameSize);
}
delegate_.SetCanWriteAnything();
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT),
"foo", 0, FIN);
EXPECT_EQ(MESSAGE_STATUS_SUCCESS,
creator_.AddMessageFrame(1, MemSliceFromString("message")));
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
EXPECT_EQ(MESSAGE_STATUS_SUCCESS,
creator_.AddMessageFrame(
2, MemSliceFromString(std::string(
creator_.GetCurrentLargestMessagePayload(), 'a'))));
EXPECT_TRUE(creator_.HasPendingRetransmittableFrames());
EXPECT_EQ(MESSAGE_STATUS_TOO_LARGE,
creator_.AddMessageFrame(
3, MemSliceFromString(std::string(
creator_.GetCurrentLargestMessagePayload() + 10, 'a'))));
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ConnectionId) {
creator_.SetServerConnectionId(TestConnectionId(0x1337));
EXPECT_EQ(TestConnectionId(0x1337), creator_.GetDestinationConnectionId());
EXPECT_EQ(EmptyQuicConnectionId(), creator_.GetSourceConnectionId());
if (!framer_.version().SupportsClientConnectionIds()) {
return;
}
creator_.SetClientConnectionId(TestConnectionId(0x33));
EXPECT_EQ(TestConnectionId(0x1337), creator_.GetDestinationConnectionId());
EXPECT_EQ(TestConnectionId(0x33), creator_.GetSourceConnectionId());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest, ExtraPaddingNeeded) {
if (!framer_.version().HasHeaderProtection()) {
return;
}
delegate_.SetCanWriteAnything();
EXPECT_EQ(QuicPacketCreatorPeer::GetPacketNumberLength(&creator_),
PACKET_1BYTE_PACKET_NUMBER);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(
Invoke(this, &QuicPacketCreatorMultiplePacketsTest::SavePacket));
creator_.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
framer_.transport_version(), Perspective::IS_CLIENT),
"", 0, FIN);
creator_.Flush();
ASSERT_FALSE(packets_[0].nonretransmittable_frames.empty());
QuicFrame padding = packets_[0].nonretransmittable_frames[0];
EXPECT_EQ(padding.padding_frame.num_padding_bytes, 1);
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
PeerAddressContextWithSameAddress) {
QuicConnectionId client_connection_id = TestConnectionId(1);
QuicConnectionId server_connection_id = TestConnectionId(2);
QuicSocketAddress peer_addr(QuicIpAddress::Any4(), 12345);
creator_.SetDefaultPeerAddress(peer_addr);
creator_.SetClientConnectionId(client_connection_id);
creator_.SetServerConnectionId(server_connection_id);
EXPECT_CALL(delegate_, ShouldGeneratePacket(_, _))
.WillRepeatedly(Return(true));
EXPECT_EQ(3u, creator_
.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
creator_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, NO_FIN)
.bytes_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
{
QuicPacketCreator::ScopedPeerAddressContext context(
&creator_, peer_addr, client_connection_id, server_connection_id);
ASSERT_EQ(client_connection_id, creator_.GetClientConnectionId());
ASSERT_EQ(server_connection_id, creator_.GetServerConnectionId());
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_EQ(3u, creator_
.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
creator_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, FIN)
.bytes_consumed);
}
EXPECT_TRUE(creator_.HasPendingFrames());
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke([=](SerializedPacket packet) {
EXPECT_EQ(peer_addr, packet.peer_address);
ASSERT_EQ(2u, packet.retransmittable_frames.size());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.back().type);
}));
creator_.FlushCurrentPacket();
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
PeerAddressContextWithDifferentAddress) {
QuicSocketAddress peer_addr(QuicIpAddress::Any4(), 12345);
creator_.SetDefaultPeerAddress(peer_addr);
EXPECT_CALL(delegate_, ShouldGeneratePacket(_, _))
.WillRepeatedly(Return(true));
EXPECT_EQ(3u, creator_
.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
creator_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, NO_FIN)
.bytes_consumed);
QuicSocketAddress peer_addr1(QuicIpAddress::Any4(), 12346);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke([=](SerializedPacket packet) {
EXPECT_EQ(peer_addr, packet.peer_address);
ASSERT_EQ(1u, packet.retransmittable_frames.size());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
}))
.WillOnce(Invoke([=](SerializedPacket packet) {
EXPECT_EQ(peer_addr1, packet.peer_address);
ASSERT_EQ(1u, packet.retransmittable_frames.size());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
}));
EXPECT_TRUE(creator_.HasPendingFrames());
{
QuicConnectionId client_connection_id = TestConnectionId(1);
QuicConnectionId server_connection_id = TestConnectionId(2);
QuicPacketCreator::ScopedPeerAddressContext context(
&creator_, peer_addr1, client_connection_id, server_connection_id);
ASSERT_EQ(client_connection_id, creator_.GetClientConnectionId());
ASSERT_EQ(server_connection_id, creator_.GetServerConnectionId());
EXPECT_FALSE(creator_.HasPendingFrames());
EXPECT_EQ(3u, creator_
.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
creator_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, FIN)
.bytes_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
}
EXPECT_FALSE(creator_.HasPendingFrames());
}
TEST_F(QuicPacketCreatorMultiplePacketsTest,
NestedPeerAddressContextWithDifferentAddress) {
QuicConnectionId client_connection_id1 = creator_.GetClientConnectionId();
QuicConnectionId server_connection_id1 = creator_.GetServerConnectionId();
QuicSocketAddress peer_addr(QuicIpAddress::Any4(), 12345);
creator_.SetDefaultPeerAddress(peer_addr);
QuicPacketCreator::ScopedPeerAddressContext context(
&creator_, peer_addr, client_connection_id1, server_connection_id1);
ASSERT_EQ(client_connection_id1, creator_.GetClientConnectionId());
ASSERT_EQ(server_connection_id1, creator_.GetServerConnectionId());
EXPECT_CALL(delegate_, ShouldGeneratePacket(_, _))
.WillRepeatedly(Return(true));
EXPECT_EQ(3u, creator_
.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
creator_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, NO_FIN)
.bytes_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
QuicSocketAddress peer_addr1(QuicIpAddress::Any4(), 12346);
EXPECT_CALL(delegate_, OnSerializedPacket(_))
.WillOnce(Invoke([=, this](SerializedPacket packet) {
EXPECT_EQ(peer_addr, packet.peer_address);
ASSERT_EQ(1u, packet.retransmittable_frames.size());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
QuicConnectionId client_connection_id2 = TestConnectionId(3);
QuicConnectionId server_connection_id2 = TestConnectionId(4);
QuicPacketCreator::ScopedPeerAddressContext context(
&creator_, peer_addr1, client_connection_id2,
server_connection_id2);
ASSERT_EQ(client_connection_id2, creator_.GetClientConnectionId());
ASSERT_EQ(server_connection_id2, creator_.GetServerConnectionId());
EXPECT_CALL(delegate_, ShouldGeneratePacket(_, _))
.WillRepeatedly(Return(true));
EXPECT_EQ(3u, creator_
.ConsumeData(QuicUtils::GetFirstBidirectionalStreamId(
creator_.transport_version(),
Perspective::IS_CLIENT),
"foo", 0, NO_FIN)
.bytes_consumed);
EXPECT_TRUE(creator_.HasPendingFrames());
creator_.FlushCurrentPacket();
}))
.WillOnce(Invoke([=](SerializedPacket packet) {
EXPECT_EQ(peer_addr1, packet.peer_address);
ASSERT_EQ(1u, packet.retransmittable_frames.size());
EXPECT_EQ(STREAM_FRAME, packet.retransmittable_frames.front().type);
}));
creator_.FlushCurrentPacket();
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_packet_creator.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_packet_creator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
bdce681d-16ba-4605-99c8-21cd2cd7c581 | cpp | tensorflow/tensorflow | coordinator | tensorflow/cc/training/coordinator.cc | tensorflow/cc/training/coordinator_test.cc | #include "tensorflow/cc/training/coordinator.h"
#include "absl/status/status.h"
#include "tensorflow/core/framework/cost_graph.pb.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
Coordinator::Coordinator() : Coordinator(std::vector<error::Code>()) {}
Coordinator::Coordinator(const std::vector<error::Code>& clean_stop_errors)
: should_stop_(false) {
if (clean_stop_errors.empty()) {
clean_stop_errors_.insert(error::OUT_OF_RANGE);
} else {
for (const auto& code : clean_stop_errors) {
clean_stop_errors_.insert(static_cast<int>(code));
}
}
}
Coordinator::~Coordinator() {
RequestStop().IgnoreError();
Join().IgnoreError();
}
Status Coordinator::RegisterRunner(std::unique_ptr<RunnerInterface> runner) {
{
mutex_lock l(mu_);
if (should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"The coordinator has been stopped.");
}
}
mutex_lock l(runners_lock_);
runners_.push_back(std::move(runner));
return absl::OkStatus();
}
bool Coordinator::AllRunnersStopped() {
mutex_lock l(runners_lock_);
for (const auto& runner : runners_) {
if (runner->IsRunning()) {
return false;
}
}
return true;
}
Status Coordinator::RequestStop() {
mutex_lock l(mu_);
if (should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"The Coordinator is not running.");
}
should_stop_ = true;
wait_for_stop_.notify_all();
return absl::OkStatus();
}
bool Coordinator::ShouldStop() {
mutex_lock l(mu_);
return should_stop_;
}
Status Coordinator::Join() {
{
mutex_lock l(mu_);
if (!should_stop_) {
return Status(absl::StatusCode::kFailedPrecondition,
"Joining coordinator without requesting to stop.");
}
}
{
mutex_lock l(runners_lock_);
for (const auto& t : runners_) {
ReportStatus(t->Join());
}
runners_.clear();
}
return GetStatus();
}
void Coordinator::ReportStatus(const Status& status) {
mutex_lock l(status_lock_);
if (status.ok() || !status_.ok() ||
clean_stop_errors_.count(static_cast<int>(status.code())) > 0) {
return;
}
status_ = status;
}
Status Coordinator::GetStatus() {
mutex_lock l(status_lock_);
return status_;
}
void Coordinator::WaitForStop() {
mutex_lock l(mu_);
while (!should_stop_) {
wait_for_stop_.wait(l);
}
}
Status Coordinator::ExportCostGraph(CostGraphDef* cost_graph) const {
mutex_lock l(runners_lock_);
for (auto& t : runners_) {
Status s = t->ExportCostGraph(cost_graph);
if (!s.ok()) {
return s;
}
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/training/coordinator.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using error::Code;
void WaitForStopThread(Coordinator* coord, Notification* about_to_wait,
Notification* done) {
about_to_wait->Notify();
coord->WaitForStop();
done->Notify();
}
TEST(CoordinatorTest, TestStopAndWaitOnStop) {
Coordinator coord;
EXPECT_EQ(coord.ShouldStop(), false);
Notification about_to_wait;
Notification done;
Env::Default()->SchedClosure(
std::bind(&WaitForStopThread, &coord, &about_to_wait, &done));
about_to_wait.WaitForNotification();
Env::Default()->SleepForMicroseconds(1000 * 1000);
EXPECT_FALSE(done.HasBeenNotified());
TF_EXPECT_OK(coord.RequestStop());
done.WaitForNotification();
EXPECT_TRUE(coord.ShouldStop());
}
class MockQueueRunner : public RunnerInterface {
public:
explicit MockQueueRunner(Coordinator* coord) {
coord_ = coord;
join_counter_ = nullptr;
thread_pool_.reset(new thread::ThreadPool(Env::Default(), "test-pool", 10));
stopped_ = false;
}
MockQueueRunner(Coordinator* coord, int* join_counter)
: MockQueueRunner(coord) {
join_counter_ = join_counter;
}
void StartCounting(std::atomic<int>* counter, int until,
Notification* start = nullptr) {
thread_pool_->Schedule(
std::bind(&MockQueueRunner::CountThread, this, counter, until, start));
}
void StartSettingStatus(const Status& status, BlockingCounter* counter,
Notification* start) {
thread_pool_->Schedule(std::bind(&MockQueueRunner::SetStatusThread, this,
status, counter, start));
}
Status Join() override {
if (join_counter_ != nullptr) {
(*join_counter_)++;
}
thread_pool_.reset();
return status_;
}
Status GetStatus() { return status_; }
void SetStatus(const Status& status) { status_ = status; }
bool IsRunning() const override { return !stopped_; };
void Stop() { stopped_ = true; }
private:
void CountThread(std::atomic<int>* counter, int until, Notification* start) {
if (start != nullptr) start->WaitForNotification();
while (!coord_->ShouldStop() && counter->load() < until) {
(*counter)++;
Env::Default()->SleepForMicroseconds(10 * 1000);
}
coord_->RequestStop().IgnoreError();
}
void SetStatusThread(const Status& status, BlockingCounter* counter,
Notification* start) {
start->WaitForNotification();
SetStatus(status);
counter->DecrementCount();
}
std::unique_ptr<thread::ThreadPool> thread_pool_;
Status status_;
Coordinator* coord_;
int* join_counter_;
bool stopped_;
};
TEST(CoordinatorTest, TestRealStop) {
std::atomic<int> counter(0);
Coordinator coord;
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartCounting(&counter, 100);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartCounting(&counter, 100);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
while (counter.load() == 0)
;
TF_EXPECT_OK(coord.RequestStop());
int temp_counter = counter.load();
Env::Default()->SleepForMicroseconds(1000 * 1000);
EXPECT_EQ(temp_counter, counter.load());
TF_EXPECT_OK(coord.Join());
}
TEST(CoordinatorTest, TestRequestStop) {
Coordinator coord;
std::atomic<int> counter(0);
Notification start;
std::unique_ptr<MockQueueRunner> qr;
for (int i = 0; i < 10; i++) {
qr.reset(new MockQueueRunner(&coord));
qr->StartCounting(&counter, 10, &start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
}
start.Notify();
coord.WaitForStop();
EXPECT_EQ(coord.ShouldStop(), true);
EXPECT_EQ(counter.load(), 10);
TF_EXPECT_OK(coord.Join());
}
TEST(CoordinatorTest, TestJoin) {
Coordinator coord;
int join_counter = 0;
std::unique_ptr<MockQueueRunner> qr1(
new MockQueueRunner(&coord, &join_counter));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(
new MockQueueRunner(&coord, &join_counter));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
TF_EXPECT_OK(coord.RequestStop());
TF_EXPECT_OK(coord.Join());
EXPECT_EQ(join_counter, 2);
}
TEST(CoordinatorTest, StatusReporting) {
Coordinator coord({Code::CANCELLED, Code::OUT_OF_RANGE});
Notification start;
BlockingCounter counter(3);
std::unique_ptr<MockQueueRunner> qr1(new MockQueueRunner(&coord));
qr1->StartSettingStatus(Status(absl::StatusCode::kCancelled, ""), &counter,
&start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr1)));
std::unique_ptr<MockQueueRunner> qr2(new MockQueueRunner(&coord));
qr2->StartSettingStatus(Status(absl::StatusCode::kInvalidArgument, ""),
&counter, &start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr2)));
std::unique_ptr<MockQueueRunner> qr3(new MockQueueRunner(&coord));
qr3->StartSettingStatus(Status(absl::StatusCode::kOutOfRange, ""), &counter,
&start);
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr3)));
start.Notify();
counter.Wait();
TF_EXPECT_OK(coord.RequestStop());
EXPECT_EQ(coord.Join().code(), absl::StatusCode::kInvalidArgument);
}
TEST(CoordinatorTest, JoinWithoutStop) {
Coordinator coord;
std::unique_ptr<MockQueueRunner> qr(new MockQueueRunner(&coord));
TF_ASSERT_OK(coord.RegisterRunner(std::move(qr)));
EXPECT_EQ(coord.Join().code(), Code::FAILED_PRECONDITION);
}
TEST(CoordinatorTest, AllRunnersStopped) {
Coordinator coord;
MockQueueRunner* qr = new MockQueueRunner(&coord);
TF_ASSERT_OK(coord.RegisterRunner(std::unique_ptr<RunnerInterface>(qr)));
EXPECT_FALSE(coord.AllRunnersStopped());
qr->Stop();
EXPECT_TRUE(coord.AllRunnersStopped());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/coordinator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/training/coordinator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a581d489-5070-4705-82dd-ec7c21b84a0a | cpp | tensorflow/tensorflow | exponential | tensorflow/lite/experimental/shlo/ops/exponential.cc | tensorflow/lite/experimental/shlo/ops/exponential_test.cc | #include "tensorflow/lite/experimental/shlo/ops/exponential.h"
#include <cmath>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Exponential {
template <class T>
T operator()(T v) const {
return std::exp(v);
}
};
template <>
F16 Exponential::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Exponential::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
ExponentialOp Create(ExponentialOp::Attributes) { return {}; }
absl::Status Prepare(ExponentialOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(CheckSupportedTypes(
CheckCtx("cosine"), input, IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("cosine"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(ExponentialOp& op, const Tensor& input, Tensor& output) {
Exponential exponential;
if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), exponential,
input, output)
} else if (IsFloatTensor(input)) {
DISPATCH_FLOAT(detail::EvaluateNoQuantization, input.tensor_element_type(),
exponential, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.tanh: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/exponential.h"
#include <cmath>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/bf16.h"
#include "tensorflow/lite/experimental/shlo/f16.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<ExponentialOp> {
static std::string Get() { return "Exponential"; }
};
namespace {
struct Exponential {
template <class T>
T operator()(T v) const {
return std::exp(v);
}
} exponential_ref;
template <>
F16 Exponential::operator()<F16>(F16 val) const {
return F16(operator()(static_cast<float>(val)));
}
template <>
BF16 Exponential::operator()<BF16>(BF16 val) const {
return BF16(operator()(static_cast<float>(val)));
}
INSTANTIATE_TYPED_TEST_SUITE_P(Exponential,
UnaryElementwiseOpShapePropagationTest,
ExponentialOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Exponential, UnaryElementwiseSameBaselineElementTypeConstraintTest,
UnaryElementwiseConstraint1Types<ExponentialOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<ExponentialOp, ConcatTypes<BoolTestType, IntTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Exponential, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct ExponentialTest : ::testing::Test {};
TYPED_TEST_SUITE(ExponentialTest, FloatTestTypes, TestParamNames);
TYPED_TEST(ExponentialTest, FloatTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), exponential_ref);
auto op = Create(ExponentialOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
template <class T>
struct QuantizedExponentialTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedExponentialTest, QuantizedTestTypes, TestParamNames);
TYPED_TEST(QuantizedExponentialTest, PerTensorWorks) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = exponential_ref(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(ExponentialOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/exponential_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
efe8eb53-2350-4abc-aafc-adeb5a15e0e5 | cpp | google/tensorstore | dimension_units | tensorstore/index_space/dimension_units.cc | tensorstore/index_space/dimension_units_test.cc | #include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
Result<DimensionUnitsVector> TransformInputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector input_units) {
if (!transform.valid()) return input_units;
const DimensionIndex input_rank = transform.input_rank(),
output_rank = transform.output_rank();
assert(input_units.size() == input_rank);
std::optional<Unit> output_units[kMaxRank];
DimensionSet seen_input_dims;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const Index stride = map.stride();
if (stride == 0) continue;
const DimensionIndex input_dim = map.input_dimension();
const auto& input_unit = input_units[input_dim];
if (!input_unit) continue;
seen_input_dims[input_dim] = true;
auto& output_unit = output_units[output_dim];
output_unit = input_unit;
*output_unit /= std::abs(static_cast<double>(stride));
}
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (!input_units[input_dim] || seen_input_dims[input_dim]) continue;
return absl::InvalidArgumentError(tensorstore::StrCat(
"No output dimension corresponds to input dimension ", input_dim,
" with unit ", *input_units[input_dim]));
}
input_units.resize(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
input_units[output_dim] = std::move(output_units[output_dim]);
}
return input_units;
}
DimensionUnitsVector TransformOutputDimensionUnits(
IndexTransformView<> transform, DimensionUnitsVector output_units) {
if (!transform.valid()) return output_units;
const DimensionIndex input_rank = transform.input_rank(),
output_rank = transform.output_rank();
assert(output_units.size() == output_rank);
DimensionSet one_to_one_input_dims =
internal::GetOneToOneInputDimensions(transform).one_to_one;
std::optional<Unit> input_units[kMaxRank];
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& output_unit = output_units[output_dim];
if (!output_unit) continue;
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const Index stride = map.stride();
if (stride == 0) continue;
const DimensionIndex input_dim = map.input_dimension();
if (!one_to_one_input_dims[input_dim]) continue;
auto& input_unit = input_units[input_dim];
input_unit = output_unit;
*input_unit *= std::abs(static_cast<double>(stride));
}
output_units.resize(input_rank);
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
output_units[input_dim] = std::move(input_units[input_dim]);
}
return output_units;
}
absl::Status MergeDimensionUnits(DimensionUnitsVector& existing_units,
span<const std::optional<Unit>> new_units) {
assert(existing_units.empty() || existing_units.size() == new_units.size());
existing_units.resize(new_units.size());
for (size_t i = 0; i < new_units.size(); ++i) {
auto& existing_unit = existing_units[i];
auto& new_unit = new_units[i];
if (!new_unit) continue;
if (existing_unit && existing_unit != new_unit) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot merge dimension units ", DimensionUnitsToString(new_units),
" and ", DimensionUnitsToString(existing_units)));
}
}
for (size_t i = 0; i < new_units.size(); ++i) {
auto& existing_unit = existing_units[i];
auto& new_unit = new_units[i];
if (!new_unit || existing_unit) continue;
existing_unit = new_unit;
}
return absl::OkStatus();
}
std::string DimensionUnitsToString(span<const std::optional<Unit>> u) {
std::string result = "[";
std::string_view sep = "";
for (const auto& unit : u) {
result += sep;
sep = ", ";
if (!unit) {
result += "null";
} else {
result += tensorstore::QuoteString(tensorstore::StrCat(*unit));
}
}
result += "]";
return result;
}
} | #include "tensorstore/index_space/dimension_units.h"
#include <stddef.h>
#include <iterator>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/unit.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionUnitsToString;
using ::tensorstore::DimensionUnitsVector;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeDimensionUnits;
using ::tensorstore::TransformInputDimensionUnits;
using ::tensorstore::TransformOutputDimensionUnits;
using ::tensorstore::Unit;
TEST(DimensionUnitsToStringTest, Basic) {
EXPECT_EQ("[null, \"4 nm\"]", DimensionUnitsToString(DimensionUnitsVector{
std::nullopt, Unit("4nm")}));
}
TEST(MergeDimensionUnitsTest, BothUnspecified) {
DimensionUnitsVector existing_units{std::nullopt, std::nullopt};
DimensionUnitsVector new_units{std::nullopt, std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units,
::testing::ElementsAre(std::nullopt, std::nullopt));
}
TEST(MergeDimensionUnitsTest, OneSpecifiedOneUnspecified) {
DimensionUnitsVector existing_units{std::nullopt, Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units, ::testing::ElementsAre(Unit("8nm"), Unit("4nm")));
}
TEST(MergeDimensionUnitsTest, BothSpecifiedSame) {
DimensionUnitsVector existing_units{Unit("8nm"), Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), std::nullopt};
TENSORSTORE_EXPECT_OK(MergeDimensionUnits(existing_units, new_units));
EXPECT_THAT(existing_units, ::testing::ElementsAre(Unit("8nm"), Unit("4nm")));
}
TEST(MergeDimensionUnitsTest, BothSpecifiedDistinct) {
DimensionUnitsVector existing_units{std::nullopt, Unit("4nm")};
DimensionUnitsVector new_units{Unit("8nm"), Unit("5nm")};
EXPECT_THAT(
MergeDimensionUnits(existing_units, new_units),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge dimension units \\[\"8 nm\", \"5 nm\"\\] "
"and \\[null, \"4 nm\"\\]"));
EXPECT_THAT(existing_units,
::testing::ElementsAre(std::nullopt, Unit("4nm")));
}
std::optional<Unit> MakeRandomUnit(absl::BitGenRef gen) {
constexpr std::string_view kBaseUnits[] = {
"",
"nm",
"um",
};
if (absl::Bernoulli(gen, 0.2)) return std::nullopt;
const double multiplier = absl::Uniform<int>(gen, 5, 20);
const auto base_unit =
kBaseUnits[absl::Uniform<size_t>(gen, 0, std::size(kBaseUnits))];
return Unit(multiplier, std::string(base_unit));
}
DimensionUnitsVector MakeRandomDimensionUnits(DimensionIndex rank,
absl::BitGenRef gen) {
DimensionUnitsVector units(rank);
for (auto& unit : units) {
unit = MakeRandomUnit(gen);
}
return units;
}
TEST(TransformOutputDimensionUnitsTest, InvertibleRoundTrip) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_DIMENSION_UNITS_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto domain = tensorstore::IndexDomain(box);
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain);
auto output_units = MakeRandomDimensionUnits(domain.rank(), gen);
auto input_units = TransformOutputDimensionUnits(transform, output_units);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inv_transform,
InverseTransform(transform));
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
::testing::Optional(::testing::ElementsAreArray(output_units)));
EXPECT_THAT(TransformOutputDimensionUnits(inv_transform, input_units),
::testing::ElementsAreArray(output_units));
}
}
TEST(TransformOutputDimensionUnitsTest, StridedNonInvertibleRoundTrip) {
constexpr size_t kNumIterations = 100;
for (size_t i = 0; i < kNumIterations; ++i) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_TRANSFORM_DIMENSION_UNITS_TEST_SEED")};
auto box = tensorstore::internal::MakeRandomBox(gen);
auto domain = tensorstore::IndexDomain(box);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_stride = 4;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, p);
auto output_units = MakeRandomDimensionUnits(domain.rank(), gen);
auto input_units = TransformOutputDimensionUnits(transform, output_units);
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
::testing::Optional(::testing::ElementsAreArray(output_units)));
}
}
TEST(TransformInputDimensionUnitsTest, NoCorrespondingOutputDimension) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IndexTransformBuilder(1, 0).Finalize());
DimensionUnitsVector input_units{"4nm"};
EXPECT_THAT(TransformInputDimensionUnits(transform, input_units),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"No output dimension corresponds to "
"input dimension 0 with unit 4 nm"));
}
TEST(TransformOutputDimensionUnitsTest, NonUnique) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IndexTransformBuilder(2, 3)
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 0)
.output_single_input_dimension(2, 1)
.Finalize());
DimensionUnitsVector output_units{"4nm", "5nm", "6nm"};
EXPECT_THAT(TransformOutputDimensionUnits(transform, output_units),
::testing::ElementsAre(std::nullopt, Unit("6nm")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_units.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/dimension_units_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
83652685-43bf-41f1-a1cd-a1370e304951 | cpp | abseil/abseil-cpp | no_destructor | absl/base/no_destructor.h | absl/base/no_destructor_test.cc | #ifndef ABSL_BASE_NO_DESTRUCTOR_H_
#define ABSL_BASE_NO_DESTRUCTOR_H_
#include <new>
#include <type_traits>
#include <utility>
#include "absl/base/config.h"
#include "absl/base/nullability.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename T>
class NoDestructor {
public:
template <typename... Ts,
typename std::enable_if<!std::is_same<void(std::decay_t<Ts>&...),
void(NoDestructor&)>::value,
int>::type = 0>
explicit constexpr NoDestructor(Ts&&... args)
: impl_(std::forward<Ts>(args)...) {}
explicit constexpr NoDestructor(const T& x) : impl_(x) {}
explicit constexpr NoDestructor(T&& x)
: impl_(std::move(x)) {}
NoDestructor(const NoDestructor&) = delete;
NoDestructor& operator=(const NoDestructor&) = delete;
T& operator*() { return *get(); }
absl::Nonnull<T*> operator->() { return get(); }
absl::Nonnull<T*> get() { return impl_.get(); }
const T& operator*() const { return *get(); }
absl::Nonnull<const T*> operator->() const { return get(); }
absl::Nonnull<const T*> get() const { return impl_.get(); }
private:
class DirectImpl {
public:
template <typename... Args>
explicit constexpr DirectImpl(Args&&... args)
: value_(std::forward<Args>(args)...) {}
absl::Nonnull<const T*> get() const { return &value_; }
absl::Nonnull<T*> get() { return &value_; }
private:
T value_;
};
class PlacementImpl {
public:
template <typename... Args>
explicit PlacementImpl(Args&&... args) {
new (&space_) T(std::forward<Args>(args)...);
}
absl::Nonnull<const T*> get() const {
return Launder(reinterpret_cast<const T*>(&space_));
}
absl::Nonnull<T*> get() { return Launder(reinterpret_cast<T*>(&space_)); }
private:
template <typename P>
static absl::Nonnull<P*> Launder(absl::Nonnull<P*> p) {
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
return std::launder(p);
#elif ABSL_HAVE_BUILTIN(__builtin_launder)
return __builtin_launder(p);
#else
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
return p;
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
#endif
#endif
}
alignas(T) unsigned char space_[sizeof(T)];
};
std::conditional_t<std::is_trivially_destructible<T>::value, DirectImpl,
PlacementImpl>
impl_;
};
#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
template <typename T>
NoDestructor(T) -> NoDestructor<T>;
#endif
ABSL_NAMESPACE_END
}
#endif | #include "absl/base/no_destructor.h"
#include <array>
#include <initializer_list>
#include <string>
#include <type_traits>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
namespace {
struct Blob {
Blob() : val(42) {}
Blob(int x, int y) : val(x + y) {}
Blob(std::initializer_list<int> xs) {
val = 0;
for (auto& x : xs) val += x;
}
Blob(const Blob& ) = delete;
Blob(Blob&& b) noexcept : val(b.val) {
b.moved_out = true;
}
~Blob() { ABSL_INTERNAL_CHECK(moved_out, "~Blob"); }
int val;
bool moved_out = false;
};
struct TypeWithDeletedDestructor {
~TypeWithDeletedDestructor() = delete;
};
TEST(NoDestructorTest, DestructorNeverCalled) {
absl::NoDestructor<TypeWithDeletedDestructor> a;
(void)a;
}
TEST(NoDestructorTest, Noncopyable) {
using T = absl::NoDestructor<int>;
EXPECT_FALSE((std::is_constructible<T, T>::value));
EXPECT_FALSE((std::is_constructible<T, const T>::value));
EXPECT_FALSE((std::is_constructible<T, T&>::value));
EXPECT_FALSE((std::is_constructible<T, const T&>::value));
EXPECT_FALSE((std::is_assignable<T&, T>::value));
EXPECT_FALSE((std::is_assignable<T&, const T>::value));
EXPECT_FALSE((std::is_assignable<T&, T&>::value));
EXPECT_FALSE((std::is_assignable<T&, const T&>::value));
}
TEST(NoDestructorTest, Interface) {
EXPECT_TRUE(std::is_trivially_destructible<absl::NoDestructor<Blob>>::value);
EXPECT_TRUE(
std::is_trivially_destructible<absl::NoDestructor<const Blob>>::value);
{
absl::NoDestructor<Blob> b;
EXPECT_EQ(42, (*b).val);
(*b).val = 55;
EXPECT_EQ(55, b->val);
b->val = 66;
EXPECT_EQ(66, b.get()->val);
b.get()->val = 42;
EXPECT_EQ(42, (*b).val);
}
{
absl::NoDestructor<const Blob> b(70, 7);
EXPECT_EQ(77, (*b).val);
EXPECT_EQ(77, b->val);
EXPECT_EQ(77, b.get()->val);
}
{
const absl::NoDestructor<Blob> b{
{20, 28, 40}};
EXPECT_EQ(88, (*b).val);
EXPECT_EQ(88, b->val);
EXPECT_EQ(88, b.get()->val);
}
}
TEST(NoDestructorTest, SfinaeRegressionAbstractArg) {
struct Abstract {
virtual ~Abstract() = default;
virtual int foo() const = 0;
};
struct Concrete : Abstract {
int foo() const override { return 17; }
};
struct UsesAbstractInConstructor {
explicit UsesAbstractInConstructor(const Abstract& abstract)
: i(abstract.foo()) {}
int i;
};
Concrete input;
absl::NoDestructor<UsesAbstractInConstructor> foo1(input);
EXPECT_EQ(foo1->i, 17);
absl::NoDestructor<UsesAbstractInConstructor> foo2(
static_cast<const Abstract&>(input));
EXPECT_EQ(foo2->i, 17);
}
std::string* Str0() {
static absl::NoDestructor<std::string> x;
return x.get();
}
extern const std::string& Str2();
const char* Str1() {
static absl::NoDestructor<std::string> x(Str2() + "_Str1");
return x->c_str();
}
const std::string& Str2() {
static absl::NoDestructor<std::string> x("Str2");
return *x;
}
const std::string& Str2Copy() {
static absl::NoDestructor<std::string> x(Str2());
return *x;
}
typedef std::array<std::string, 3> MyArray;
const MyArray& Array() {
static absl::NoDestructor<MyArray> x{{{"foo", "bar", "baz"}}};
return *x;
}
typedef std::vector<int> MyVector;
const MyVector& Vector() {
static absl::NoDestructor<MyVector> x{{1, 2, 3}};
return *x;
}
const int& Int() {
static absl::NoDestructor<int> x;
return *x;
}
TEST(NoDestructorTest, StaticPattern) {
EXPECT_TRUE(
std::is_trivially_destructible<absl::NoDestructor<std::string>>::value);
EXPECT_TRUE(
std::is_trivially_destructible<absl::NoDestructor<MyArray>>::value);
EXPECT_TRUE(
std::is_trivially_destructible<absl::NoDestructor<MyVector>>::value);
EXPECT_TRUE(std::is_trivially_destructible<absl::NoDestructor<int>>::value);
EXPECT_EQ(*Str0(), "");
Str0()->append("foo");
EXPECT_EQ(*Str0(), "foo");
EXPECT_EQ(std::string(Str1()), "Str2_Str1");
EXPECT_EQ(Str2(), "Str2");
EXPECT_EQ(Str2Copy(), "Str2");
EXPECT_THAT(Array(), testing::ElementsAre("foo", "bar", "baz"));
EXPECT_THAT(Vector(), testing::ElementsAre(1, 2, 3));
EXPECT_EQ(0, Int());
}
#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
TEST(NoDestructorTest, ClassTemplateArgumentDeduction) {
absl::NoDestructor i(1);
static_assert(std::is_same<decltype(i), absl::NoDestructor<int>>::value,
"Expected deduced type to be int.");
}
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/no_destructor.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/no_destructor_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
9722a3da-f201-47f3-8264-7d91231aa0cb | cpp | abseil/abseil-cpp | parse | absl/flags/parse.cc | absl/flags/parse_test.cc | #include "absl/flags/parse.h"
#include <stdlib.h>
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#ifdef _WIN32
#include <windows.h>
#endif
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/flags/commandlineflag.h"
#include "absl/flags/config.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/commandlineflag.h"
#include "absl/flags/internal/flag.h"
#include "absl/flags/internal/parse.h"
#include "absl/flags/internal/private_handle_accessor.h"
#include "absl/flags/internal/program_name.h"
#include "absl/flags/internal/usage.h"
#include "absl/flags/reflection.h"
#include "absl/flags/usage.h"
#include "absl/flags/usage_config.h"
#include "absl/strings/ascii.h"
#include "absl/strings/internal/damerau_levenshtein_distance.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
namespace {
ABSL_CONST_INIT absl::Mutex processing_checks_guard(absl::kConstInit);
ABSL_CONST_INIT bool flagfile_needs_processing
ABSL_GUARDED_BY(processing_checks_guard) = false;
ABSL_CONST_INIT bool fromenv_needs_processing
ABSL_GUARDED_BY(processing_checks_guard) = false;
ABSL_CONST_INIT bool tryfromenv_needs_processing
ABSL_GUARDED_BY(processing_checks_guard) = false;
ABSL_CONST_INIT absl::Mutex specified_flags_guard(absl::kConstInit);
ABSL_CONST_INIT std::vector<const CommandLineFlag*>* specified_flags
ABSL_GUARDED_BY(specified_flags_guard) = nullptr;
ABSL_CONST_INIT const size_t kMaxHints = 100;
ABSL_CONST_INIT const size_t kMaxDistance = 3;
struct SpecifiedFlagsCompare {
bool operator()(const CommandLineFlag* a, const CommandLineFlag* b) const {
return a->Name() < b->Name();
}
bool operator()(const CommandLineFlag* a, absl::string_view b) const {
return a->Name() < b;
}
bool operator()(absl::string_view a, const CommandLineFlag* b) const {
return a < b->Name();
}
};
}
}
ABSL_NAMESPACE_END
}
ABSL_FLAG(std::vector<std::string>, flagfile, {},
"comma-separated list of files to load flags from")
.OnUpdate([]() {
if (absl::GetFlag(FLAGS_flagfile).empty()) return;
absl::MutexLock l(&absl::flags_internal::processing_checks_guard);
if (absl::flags_internal::flagfile_needs_processing) {
ABSL_INTERNAL_LOG(WARNING, "flagfile set twice before it is handled");
}
absl::flags_internal::flagfile_needs_processing = true;
});
ABSL_FLAG(std::vector<std::string>, fromenv, {},
"comma-separated list of flags to set from the environment"
" [use 'export FLAGS_flag1=value']")
.OnUpdate([]() {
if (absl::GetFlag(FLAGS_fromenv).empty()) return;
absl::MutexLock l(&absl::flags_internal::processing_checks_guard);
if (absl::flags_internal::fromenv_needs_processing) {
ABSL_INTERNAL_LOG(WARNING, "fromenv set twice before it is handled.");
}
absl::flags_internal::fromenv_needs_processing = true;
});
ABSL_FLAG(std::vector<std::string>, tryfromenv, {},
"comma-separated list of flags to try to set from the environment if "
"present")
.OnUpdate([]() {
if (absl::GetFlag(FLAGS_tryfromenv).empty()) return;
absl::MutexLock l(&absl::flags_internal::processing_checks_guard);
if (absl::flags_internal::tryfromenv_needs_processing) {
ABSL_INTERNAL_LOG(WARNING,
"tryfromenv set twice before it is handled.");
}
absl::flags_internal::tryfromenv_needs_processing = true;
});
ABSL_FLAG(std::vector<std::string>, undefok, {},
"comma-separated list of flag names that it is okay to specify "
"on the command line even if the program does not define a flag "
"with that name");
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
namespace {
class ArgsList {
public:
ArgsList() : next_arg_(0) {}
ArgsList(int argc, char* argv[]) : args_(argv, argv + argc), next_arg_(0) {}
explicit ArgsList(const std::vector<std::string>& args)
: args_(args), next_arg_(0) {}
bool ReadFromFlagfile(const std::string& flag_file_name);
size_t Size() const { return args_.size() - next_arg_; }
size_t FrontIndex() const { return next_arg_; }
absl::string_view Front() const { return args_[next_arg_]; }
void PopFront() { next_arg_++; }
private:
std::vector<std::string> args_;
size_t next_arg_;
};
bool ArgsList::ReadFromFlagfile(const std::string& flag_file_name) {
std::ifstream flag_file(flag_file_name);
if (!flag_file) {
flags_internal::ReportUsageError(
absl::StrCat("Can't open flagfile ", flag_file_name), true);
return false;
}
args_.emplace_back("");
std::string line;
bool success = true;
while (std::getline(flag_file, line)) {
absl::string_view stripped = absl::StripLeadingAsciiWhitespace(line);
if (stripped.empty() || stripped[0] == '#') {
continue;
}
if (stripped[0] == '-') {
if (stripped == "--") {
flags_internal::ReportUsageError(
"Flagfile can't contain position arguments or --", true);
success = false;
break;
}
args_.emplace_back(stripped);
continue;
}
flags_internal::ReportUsageError(
absl::StrCat("Unexpected line in the flagfile ", flag_file_name, ": ",
line),
true);
success = false;
}
return success;
}
bool GetEnvVar(const char* var_name, std::string& var_value) {
#ifdef _WIN32
char buf[1024];
auto get_res = GetEnvironmentVariableA(var_name, buf, sizeof(buf));
if (get_res >= sizeof(buf)) {
return false;
}
if (get_res == 0) {
return false;
}
var_value = std::string(buf, get_res);
#else
const char* val = ::getenv(var_name);
if (val == nullptr) {
return false;
}
var_value = val;
#endif
return true;
}
std::tuple<absl::string_view, absl::string_view, bool> SplitNameAndValue(
absl::string_view arg) {
absl::ConsumePrefix(&arg, "-");
if (arg.empty()) {
return std::make_tuple("", "", false);
}
auto equal_sign_pos = arg.find('=');
absl::string_view flag_name = arg.substr(0, equal_sign_pos);
absl::string_view value;
bool is_empty_value = false;
if (equal_sign_pos != absl::string_view::npos) {
value = arg.substr(equal_sign_pos + 1);
is_empty_value = value.empty();
}
return std::make_tuple(flag_name, value, is_empty_value);
}
std::tuple<CommandLineFlag*, bool> LocateFlag(absl::string_view flag_name) {
CommandLineFlag* flag = absl::FindCommandLineFlag(flag_name);
bool is_negative = false;
if (!flag && absl::ConsumePrefix(&flag_name, "no")) {
flag = absl::FindCommandLineFlag(flag_name);
is_negative = true;
}
return std::make_tuple(flag, is_negative);
}
void CheckDefaultValuesParsingRoundtrip() {
#ifndef NDEBUG
flags_internal::ForEachFlag([&](CommandLineFlag& flag) {
if (flag.IsRetired()) return;
#define ABSL_FLAGS_INTERNAL_IGNORE_TYPE(T, _) \
if (flag.IsOfType<T>()) return;
ABSL_FLAGS_INTERNAL_SUPPORTED_TYPES(ABSL_FLAGS_INTERNAL_IGNORE_TYPE)
#undef ABSL_FLAGS_INTERNAL_IGNORE_TYPE
flags_internal::PrivateHandleAccessor::CheckDefaultValueParsingRoundtrip(
flag);
});
#endif
}
bool ReadFlagfiles(const std::vector<std::string>& flagfiles,
std::vector<ArgsList>& input_args) {
bool success = true;
for (auto it = flagfiles.rbegin(); it != flagfiles.rend(); ++it) {
ArgsList al;
if (al.ReadFromFlagfile(*it)) {
input_args.push_back(al);
} else {
success = false;
}
}
return success;
}
bool ReadFlagsFromEnv(const std::vector<std::string>& flag_names,
std::vector<ArgsList>& input_args,
bool fail_on_absent_in_env) {
bool success = true;
std::vector<std::string> args;
args.emplace_back("");
for (const auto& flag_name : flag_names) {
if (flag_name == "fromenv" || flag_name == "tryfromenv") {
flags_internal::ReportUsageError(
absl::StrCat("Infinite recursion on flag ", flag_name), true);
success = false;
continue;
}
const std::string envname = absl::StrCat("FLAGS_", flag_name);
std::string envval;
if (!GetEnvVar(envname.c_str(), envval)) {
if (fail_on_absent_in_env) {
flags_internal::ReportUsageError(
absl::StrCat(envname, " not found in environment"), true);
success = false;
}
continue;
}
args.push_back(absl::StrCat("--", flag_name, "=", envval));
}
if (success) {
input_args.emplace_back(args);
}
return success;
}
bool HandleGeneratorFlags(std::vector<ArgsList>& input_args,
std::vector<std::string>& flagfile_value) {
bool success = true;
absl::MutexLock l(&flags_internal::processing_checks_guard);
if (flags_internal::flagfile_needs_processing) {
auto flagfiles = absl::GetFlag(FLAGS_flagfile);
if (input_args.size() == 1) {
flagfile_value.insert(flagfile_value.end(), flagfiles.begin(),
flagfiles.end());
}
success &= ReadFlagfiles(flagfiles, input_args);
flags_internal::flagfile_needs_processing = false;
}
if (flags_internal::fromenv_needs_processing) {
auto flags_list = absl::GetFlag(FLAGS_fromenv);
success &= ReadFlagsFromEnv(flags_list, input_args, true);
flags_internal::fromenv_needs_processing = false;
}
if (flags_internal::tryfromenv_needs_processing) {
auto flags_list = absl::GetFlag(FLAGS_tryfromenv);
success &= ReadFlagsFromEnv(flags_list, input_args, false);
flags_internal::tryfromenv_needs_processing = false;
}
return success;
}
void ResetGeneratorFlags(const std::vector<std::string>& flagfile_value) {
if (!flagfile_value.empty()) {
absl::SetFlag(&FLAGS_flagfile, flagfile_value);
absl::MutexLock l(&flags_internal::processing_checks_guard);
flags_internal::flagfile_needs_processing = false;
}
if (!absl::GetFlag(FLAGS_fromenv).empty()) {
absl::SetFlag(&FLAGS_fromenv, {});
}
if (!absl::GetFlag(FLAGS_tryfromenv).empty()) {
absl::SetFlag(&FLAGS_tryfromenv, {});
}
absl::MutexLock l(&flags_internal::processing_checks_guard);
flags_internal::fromenv_needs_processing = false;
flags_internal::tryfromenv_needs_processing = false;
}
std::tuple<bool, absl::string_view> DeduceFlagValue(const CommandLineFlag& flag,
absl::string_view value,
bool is_negative,
bool is_empty_value,
ArgsList* curr_list) {
if (flag.IsOfType<bool>()) {
if (value.empty()) {
if (is_empty_value) {
flags_internal::ReportUsageError(
absl::StrCat(
"Missing the value after assignment for the boolean flag '",
flag.Name(), "'"),
true);
return std::make_tuple(false, "");
}
value = is_negative ? "0" : "1";
} else if (is_negative) {
flags_internal::ReportUsageError(
absl::StrCat("Negative form with assignment is not valid for the "
"boolean flag '",
flag.Name(), "'"),
true);
return std::make_tuple(false, "");
}
} else if (is_negative) {
flags_internal::ReportUsageError(
absl::StrCat("Negative form is not valid for the flag '", flag.Name(),
"'"),
true);
return std::make_tuple(false, "");
} else if (value.empty() && (!is_empty_value)) {
if (curr_list->Size() == 1) {
flags_internal::ReportUsageError(
absl::StrCat("Missing the value for the flag '", flag.Name(), "'"),
true);
return std::make_tuple(false, "");
}
curr_list->PopFront();
value = curr_list->Front();
if (!value.empty() && value[0] == '-' && flag.IsOfType<std::string>()) {
auto maybe_flag_name = std::get<0>(SplitNameAndValue(value.substr(1)));
if (maybe_flag_name.empty() ||
std::get<0>(LocateFlag(maybe_flag_name)) != nullptr) {
ABSL_INTERNAL_LOG(
WARNING,
absl::StrCat("Did you really mean to set flag '", flag.Name(),
"' to the value '", value, "'?"));
}
}
}
return std::make_tuple(true, value);
}
bool CanIgnoreUndefinedFlag(absl::string_view flag_name) {
auto undefok = absl::GetFlag(FLAGS_undefok);
if (std::find(undefok.begin(), undefok.end(), flag_name) != undefok.end()) {
return true;
}
if (absl::ConsumePrefix(&flag_name, "no") &&
std::find(undefok.begin(), undefok.end(), flag_name) != undefok.end()) {
return true;
}
return false;
}
void ReportUnrecognizedFlags(
const std::vector<UnrecognizedFlag>& unrecognized_flags,
bool report_as_fatal_error) {
for (const auto& unrecognized : unrecognized_flags) {
std::vector<std::string> misspelling_hints;
if (unrecognized.source == UnrecognizedFlag::kFromArgv) {
misspelling_hints =
flags_internal::GetMisspellingHints(unrecognized.flag_name);
}
if (misspelling_hints.empty()) {
flags_internal::ReportUsageError(
absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
"'"),
report_as_fatal_error);
} else {
flags_internal::ReportUsageError(
absl::StrCat("Unknown command line flag '", unrecognized.flag_name,
"'. Did you mean: ",
absl::StrJoin(misspelling_hints, ", "), " ?"),
report_as_fatal_error);
}
}
}
}
bool WasPresentOnCommandLine(absl::string_view flag_name) {
absl::ReaderMutexLock l(&specified_flags_guard);
ABSL_INTERNAL_CHECK(specified_flags != nullptr,
"ParseCommandLine is not invoked yet");
return std::binary_search(specified_flags->begin(), specified_flags->end(),
flag_name, SpecifiedFlagsCompare{});
}
struct BestHints {
explicit BestHints(uint8_t _max) : best_distance(_max + 1) {}
bool AddHint(absl::string_view hint, uint8_t distance) {
if (hints.size() >= kMaxHints) return false;
if (distance == best_distance) {
hints.emplace_back(hint);
}
if (distance < best_distance) {
best_distance = distance;
hints = std::vector<std::string>{std::string(hint)};
}
return true;
}
uint8_t best_distance;
std::vector<std::string> hints;
};
std::vector<std::string> GetMisspellingHints(const absl::string_view flag) {
const size_t maxCutoff = std::min(flag.size() / 2 + 1, kMaxDistance);
auto undefok = absl::GetFlag(FLAGS_undefok);
BestHints best_hints(static_cast<uint8_t>(maxCutoff));
flags_internal::ForEachFlag([&](const CommandLineFlag& f) {
if (best_hints.hints.size() >= kMaxHints) return;
uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance(
flag, f.Name(), best_hints.best_distance);
best_hints.AddHint(f.Name(), distance);
if (f.IsOfType<bool>()) {
const std::string negated_flag = absl::StrCat("no", f.Name());
distance = strings_internal::CappedDamerauLevenshteinDistance(
flag, negated_flag, best_hints.best_distance);
best_hints.AddHint(negated_flag, distance);
}
});
absl::c_for_each(undefok, [&](const absl::string_view f) {
if (best_hints.hints.size() >= kMaxHints) return;
uint8_t distance = strings_internal::CappedDamerauLevenshteinDistance(
flag, f, best_hints.best_distance);
best_hints.AddHint(absl::StrCat(f, " (undefok)"), distance);
});
return best_hints.hints;
}
std::vector<char*> ParseCommandLineImpl(int argc, char* argv[],
UsageFlagsAction usage_flag_action,
OnUndefinedFlag undef_flag_action,
std::ostream& error_help_output) {
std::vector<char*> positional_args;
std::vector<UnrecognizedFlag> unrecognized_flags;
auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
argc, argv, positional_args, unrecognized_flags, usage_flag_action);
if (undef_flag_action != OnUndefinedFlag::kIgnoreUndefined) {
flags_internal::ReportUnrecognizedFlags(
unrecognized_flags,
(undef_flag_action == OnUndefinedFlag::kAbortIfUndefined));
if (undef_flag_action == OnUndefinedFlag::kAbortIfUndefined) {
if (!unrecognized_flags.empty()) {
flags_internal::HandleUsageFlags(error_help_output,
ProgramUsageMessage()); std::exit(1);
}
}
}
flags_internal::MaybeExit(help_mode);
return positional_args;
}
HelpMode ParseAbseilFlagsOnlyImpl(
int argc, char* argv[], std::vector<char*>& positional_args,
std::vector<UnrecognizedFlag>& unrecognized_flags,
UsageFlagsAction usage_flag_action) {
ABSL_INTERNAL_CHECK(argc > 0, "Missing argv[0]");
using flags_internal::ArgsList;
using flags_internal::specified_flags;
std::vector<std::string> flagfile_value;
std::vector<ArgsList> input_args;
flags_internal::FinalizeRegistry();
flags_internal::CheckDefaultValuesParsingRoundtrip();
input_args.push_back(ArgsList(argc, argv));
if (flags_internal::ProgramInvocationName() == "UNKNOWN") {
flags_internal::SetProgramInvocationName(argv[0]);
}
positional_args.push_back(argv[0]);
absl::MutexLock l(&flags_internal::specified_flags_guard);
if (specified_flags == nullptr) {
specified_flags = new std::vector<const CommandLineFlag*>;
} else {
specified_flags->clear();
}
bool success = true;
while (!input_args.empty()) {
success &= flags_internal::HandleGeneratorFlags(input_args, flagfile_value);
ArgsList& curr_list = input_args.back();
curr_list.PopFront();
if (curr_list.Size() == 0) {
input_args.pop_back();
continue;
}
absl::string_view arg(curr_list.Front());
bool arg_from_argv = input_args.size() == 1;
if (!absl::ConsumePrefix(&arg, "-") || arg.empty()) {
ABSL_INTERNAL_CHECK(arg_from_argv,
"Flagfile cannot contain positional argument");
positional_args.push_back(argv[curr_list.FrontIndex()]);
continue;
}
absl::string_view flag_name;
absl::string_view value;
bool is_empty_value = false;
std::tie(flag_name, value, is_empty_value) =
flags_internal::SplitNameAndValue(arg);
if (flag_name.empty()) {
ABSL_INTERNAL_CHECK(arg_from_argv,
"Flagfile cannot contain positional argument");
curr_list.PopFront();
break;
}
CommandLineFlag* flag = nullptr;
bool is_negative = false;
std::tie(flag, is_negative) = flags_internal::LocateFlag(flag_name);
if (flag == nullptr) {
if (flags_internal::DeduceUsageFlags(flag_name, value)) {
continue;
}
unrecognized_flags.emplace_back(arg_from_argv
? UnrecognizedFlag::kFromArgv
: UnrecognizedFlag::kFromFlagfile,
flag_name);
continue;
}
bool value_success = true;
std::tie(value_success, value) = flags_internal::DeduceFlagValue(
*flag, value, is_negative, is_empty_value, &curr_list);
success &= value_success;
std::string error;
if (!flags_internal::PrivateHandleAccessor::ParseFrom(
*flag, value, flags_internal::SET_FLAGS_VALUE,
flags_internal::kCommandLine, error)) {
if (flag->IsRetired()) continue;
flags_internal::ReportUsageError(error, true);
success = false;
} else {
specified_flags->push_back(flag);
}
}
flags_internal::ResetGeneratorFlags(flagfile_value);
if (!input_args.empty()) {
for (size_t arg_index = input_args.back().FrontIndex();
arg_index < static_cast<size_t>(argc); ++arg_index) {
positional_args.push_back(argv[arg_index]);
}
}
specified_flags->shrink_to_fit();
std::sort(specified_flags->begin(), specified_flags->end(),
flags_internal::SpecifiedFlagsCompare{});
std::vector<UnrecognizedFlag> filtered;
filtered.reserve(unrecognized_flags.size());
for (const auto& unrecognized : unrecognized_flags) {
if (flags_internal::CanIgnoreUndefinedFlag(unrecognized.flag_name))
continue;
filtered.push_back(unrecognized);
}
std::swap(unrecognized_flags, filtered);
if (!success) {
#if ABSL_FLAGS_STRIP_NAMES
flags_internal::ReportUsageError(
"NOTE: command line flags are disabled in this build", true);
#else
flags_internal::HandleUsageFlags(std::cerr, ProgramUsageMessage());
#endif
return HelpMode::kFull;
}
return usage_flag_action == UsageFlagsAction::kHandleUsage
? flags_internal::HandleUsageFlags(std::cout,
ProgramUsageMessage())
: HelpMode::kNone;
}
}
void ParseAbseilFlagsOnly(int argc, char* argv[],
std::vector<char*>& positional_args,
std::vector<UnrecognizedFlag>& unrecognized_flags) {
auto help_mode = flags_internal::ParseAbseilFlagsOnlyImpl(
argc, argv, positional_args, unrecognized_flags,
flags_internal::UsageFlagsAction::kHandleUsage);
flags_internal::MaybeExit(help_mode);
}
void ReportUnrecognizedFlags(
const std::vector<UnrecognizedFlag>& unrecognized_flags) {
flags_internal::ReportUnrecognizedFlags(unrecognized_flags, true);
}
std::vector<char*> ParseCommandLine(int argc, char* argv[]) {
return flags_internal::ParseCommandLineImpl(
argc, argv, flags_internal::UsageFlagsAction::kHandleUsage,
flags_internal::OnUndefinedFlag::kAbortIfUndefined);
}
ABSL_NAMESPACE_END
} | #include "absl/flags/parse.h"
#include <stdlib.h>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/internal/scoped_set_env.h"
#include "absl/flags/config.h"
#include "absl/flags/flag.h"
#include "absl/flags/internal/parse.h"
#include "absl/flags/internal/usage.h"
#include "absl/flags/reflection.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "absl/types/span.h"
#ifdef _WIN32
#include <windows.h>
#endif
#define FLAG_MULT(x) F3(x)
#define TEST_FLAG_HEADER FLAG_HEADER_
#define F(name) ABSL_FLAG(int, name, 0, "")
#define F1(name) \
F(name##1); \
F(name##2); \
F(name##3); \
F(name##4); \
F(name##5)
#define F2(name) \
F1(name##1); \
F1(name##2); \
F1(name##3); \
F1(name##4); \
F1(name##5)
#define F3(name) \
F2(name##1); \
F2(name##2); \
F2(name##3); \
F2(name##4); \
F2(name##5)
FLAG_MULT(TEST_FLAG_HEADER);
namespace {
using absl::base_internal::ScopedSetEnv;
struct UDT {
UDT() = default;
UDT(const UDT&) = default;
UDT& operator=(const UDT&) = default;
UDT(int v) : value(v) {}
int value;
};
bool AbslParseFlag(absl::string_view in, UDT* udt, std::string* err) {
if (in == "A") {
udt->value = 1;
return true;
}
if (in == "AAA") {
udt->value = 10;
return true;
}
*err = "Use values A, AAA instead";
return false;
}
std::string AbslUnparseFlag(const UDT& udt) {
return udt.value == 1 ? "A" : "AAA";
}
std::string GetTestTmpDirEnvVar(const char* const env_var_name) {
#ifdef _WIN32
char buf[MAX_PATH];
auto get_res = GetEnvironmentVariableA(env_var_name, buf, sizeof(buf));
if (get_res >= sizeof(buf) || get_res == 0) {
return "";
}
return std::string(buf, get_res);
#else
const char* val = ::getenv(env_var_name);
if (val == nullptr) {
return "";
}
return val;
#endif
}
const std::string& GetTestTempDir() {
static std::string* temp_dir_name = []() -> std::string* {
std::string* res = new std::string(GetTestTmpDirEnvVar("TEST_TMPDIR"));
if (res->empty()) {
*res = GetTestTmpDirEnvVar("TMPDIR");
}
if (res->empty()) {
#ifdef _WIN32
char temp_path_buffer[MAX_PATH];
auto len = GetTempPathA(MAX_PATH, temp_path_buffer);
if (len < MAX_PATH && len != 0) {
std::string temp_dir_name = temp_path_buffer;
if (!absl::EndsWith(temp_dir_name, "\\")) {
temp_dir_name.push_back('\\');
}
absl::StrAppend(&temp_dir_name, "parse_test.", GetCurrentProcessId());
if (CreateDirectoryA(temp_dir_name.c_str(), nullptr)) {
*res = temp_dir_name;
}
}
#else
char temp_dir_template[] = "/tmp/parse_test.XXXXXX";
if (auto* unique_name = ::mkdtemp(temp_dir_template)) {
*res = unique_name;
}
#endif
}
if (res->empty()) {
LOG(FATAL) << "Failed to make temporary directory for data files";
}
#ifdef _WIN32
*res += "\\";
#else
*res += "/";
#endif
return res;
}();
return *temp_dir_name;
}
struct FlagfileData {
const absl::string_view file_name;
const absl::Span<const char* const> file_lines;
};
constexpr const char* const ff1_data[] = {
"# comment ",
" # comment ",
"",
" ",
"--int_flag=-1",
" --string_flag=q2w2 ",
" ## ",
" --double_flag=0.1",
"--bool_flag=Y "
};
constexpr const char* const ff2_data[] = {
"# Setting legacy flag",
"--legacy_int=1111",
"--legacy_bool",
"--nobool_flag",
"--legacy_str=aqsw",
"--int_flag=100",
" ## ============="
};
const char* GetFlagfileFlag(const std::vector<FlagfileData>& ffd,
std::string& flagfile_flag) {
flagfile_flag = "--flagfile=";
absl::string_view separator;
for (const auto& flagfile_data : ffd) {
std::string flagfile_name =
absl::StrCat(GetTestTempDir(), flagfile_data.file_name);
std::ofstream flagfile_out(flagfile_name);
for (auto line : flagfile_data.file_lines) {
flagfile_out << absl::Substitute(line, GetTestTempDir()) << "\n";
}
absl::StrAppend(&flagfile_flag, separator, flagfile_name);
separator = ",";
}
return flagfile_flag.c_str();
}
}
ABSL_FLAG(int, int_flag, 1, "");
ABSL_FLAG(double, double_flag, 1.1, "");
ABSL_FLAG(std::string, string_flag, "a", "");
ABSL_FLAG(bool, bool_flag, false, "");
ABSL_FLAG(UDT, udt_flag, -1, "");
ABSL_RETIRED_FLAG(int, legacy_int, 1, "");
ABSL_RETIRED_FLAG(bool, legacy_bool, false, "");
ABSL_RETIRED_FLAG(std::string, legacy_str, "l", "");
namespace {
namespace flags = absl::flags_internal;
using testing::AllOf;
using testing::ElementsAreArray;
using testing::HasSubstr;
class ParseTest : public testing::Test {
public:
~ParseTest() override { flags::SetFlagsHelpMode(flags::HelpMode::kNone); }
void SetUp() override {
#if ABSL_FLAGS_STRIP_NAMES
GTEST_SKIP() << "This test requires flag names to be present";
#endif
}
private:
absl::FlagSaver flag_saver_;
};
template <int N>
flags::HelpMode InvokeParseAbslOnlyImpl(const char* (&in_argv)[N]) {
std::vector<char*> positional_args;
std::vector<absl::UnrecognizedFlag> unrecognized_flags;
return flags::ParseAbseilFlagsOnlyImpl(N, const_cast<char**>(in_argv),
positional_args, unrecognized_flags,
flags::UsageFlagsAction::kHandleUsage);
}
template <int N>
void InvokeParseAbslOnly(const char* (&in_argv)[N]) {
std::vector<char*> positional_args;
std::vector<absl::UnrecognizedFlag> unrecognized_flags;
absl::ParseAbseilFlagsOnly(2, const_cast<char**>(in_argv), positional_args,
unrecognized_flags);
}
template <int N>
std::vector<char*> InvokeParseCommandLineImpl(const char* (&in_argv)[N]) {
return flags::ParseCommandLineImpl(
N, const_cast<char**>(in_argv), flags::UsageFlagsAction::kHandleUsage,
flags::OnUndefinedFlag::kAbortIfUndefined, std::cerr);
}
template <int N>
std::vector<char*> InvokeParse(const char* (&in_argv)[N]) {
return absl::ParseCommandLine(N, const_cast<char**>(in_argv));
}
template <int N>
void TestParse(const char* (&in_argv)[N], int int_flag_value,
double double_flag_val, absl::string_view string_flag_val,
bool bool_flag_val, int exp_position_args = 0) {
auto out_args = InvokeParse(in_argv);
EXPECT_EQ(out_args.size(), 1 + exp_position_args);
EXPECT_STREQ(out_args[0], "testbin");
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), int_flag_value);
EXPECT_NEAR(absl::GetFlag(FLAGS_double_flag), double_flag_val, 0.0001);
EXPECT_EQ(absl::GetFlag(FLAGS_string_flag), string_flag_val);
EXPECT_EQ(absl::GetFlag(FLAGS_bool_flag), bool_flag_val);
}
TEST_F(ParseTest, TestEmptyArgv) {
const char* in_argv[] = {"testbin"};
auto out_args = InvokeParse(in_argv);
EXPECT_EQ(out_args.size(), 1);
EXPECT_STREQ(out_args[0], "testbin");
}
TEST_F(ParseTest, TestValidIntArg) {
const char* in_args1[] = {
"testbin",
"--int_flag=10",
};
TestParse(in_args1, 10, 1.1, "a", false);
const char* in_args2[] = {
"testbin",
"-int_flag=020",
};
TestParse(in_args2, 20, 1.1, "a", false);
const char* in_args3[] = {
"testbin",
"--int_flag",
"-30",
};
TestParse(in_args3, -30, 1.1, "a", false);
const char* in_args4[] = {
"testbin",
"-int_flag",
"0x21",
};
TestParse(in_args4, 33, 1.1, "a", false);
}
TEST_F(ParseTest, TestValidDoubleArg) {
const char* in_args1[] = {
"testbin",
"--double_flag=2.3",
};
TestParse(in_args1, 1, 2.3, "a", false);
const char* in_args2[] = {
"testbin",
"--double_flag=0x1.2",
};
TestParse(in_args2, 1, 1.125, "a", false);
const char* in_args3[] = {
"testbin",
"--double_flag",
"99.7",
};
TestParse(in_args3, 1, 99.7, "a", false);
const char* in_args4[] = {
"testbin",
"--double_flag",
"0x20.1",
};
TestParse(in_args4, 1, 32.0625, "a", false);
}
TEST_F(ParseTest, TestValidStringArg) {
const char* in_args1[] = {
"testbin",
"--string_flag=aqswde",
};
TestParse(in_args1, 1, 1.1, "aqswde", false);
const char* in_args2[] = {
"testbin",
"-string_flag=a=b=c",
};
TestParse(in_args2, 1, 1.1, "a=b=c", false);
const char* in_args3[] = {
"testbin",
"--string_flag",
"zaxscd",
};
TestParse(in_args3, 1, 1.1, "zaxscd", false);
const char* in_args4[] = {
"testbin",
"-string_flag",
"--int_flag",
};
TestParse(in_args4, 1, 1.1, "--int_flag", false);
const char* in_args5[] = {
"testbin",
"--string_flag",
"--no_a_flag=11",
};
TestParse(in_args5, 1, 1.1, "--no_a_flag=11", false);
}
TEST_F(ParseTest, TestValidBoolArg) {
const char* in_args1[] = {
"testbin",
"--bool_flag",
};
TestParse(in_args1, 1, 1.1, "a", true);
const char* in_args2[] = {
"testbin",
"--nobool_flag",
};
TestParse(in_args2, 1, 1.1, "a", false);
const char* in_args3[] = {
"testbin",
"--bool_flag=true",
};
TestParse(in_args3, 1, 1.1, "a", true);
const char* in_args4[] = {
"testbin",
"-bool_flag=false",
};
TestParse(in_args4, 1, 1.1, "a", false);
}
TEST_F(ParseTest, TestValidUDTArg) {
const char* in_args1[] = {
"testbin",
"--udt_flag=A",
};
InvokeParse(in_args1);
EXPECT_EQ(absl::GetFlag(FLAGS_udt_flag).value, 1);
const char* in_args2[] = {"testbin", "--udt_flag", "AAA"};
InvokeParse(in_args2);
EXPECT_EQ(absl::GetFlag(FLAGS_udt_flag).value, 10);
}
TEST_F(ParseTest, TestValidMultipleArg) {
const char* in_args1[] = {
"testbin", "--bool_flag", "--int_flag=2",
"--double_flag=0.1", "--string_flag=asd",
};
TestParse(in_args1, 2, 0.1, "asd", true);
const char* in_args2[] = {
"testbin", "--string_flag=", "--nobool_flag", "--int_flag",
"-011", "--double_flag", "-1e-2",
};
TestParse(in_args2, -11, -0.01, "", false);
const char* in_args3[] = {
"testbin", "--int_flag", "-0", "--string_flag", "\"\"",
"--bool_flag=true", "--double_flag=1e18",
};
TestParse(in_args3, 0, 1e18, "\"\"", true);
}
TEST_F(ParseTest, TestPositionalArgs) {
const char* in_args1[] = {
"testbin",
"p1",
"p2",
};
TestParse(in_args1, 1, 1.1, "a", false, 2);
auto out_args1 = InvokeParse(in_args1);
EXPECT_STREQ(out_args1[1], "p1");
EXPECT_STREQ(out_args1[2], "p2");
const char* in_args2[] = {
"testbin",
"--int_flag=2",
"p1",
};
TestParse(in_args2, 2, 1.1, "a", false, 1);
auto out_args2 = InvokeParse(in_args2);
EXPECT_STREQ(out_args2[1], "p1");
const char* in_args3[] = {"testbin", "p1", "--int_flag=3",
"p2", "--bool_flag", "true"};
TestParse(in_args3, 3, 1.1, "a", true, 3);
auto out_args3 = InvokeParse(in_args3);
EXPECT_STREQ(out_args3[1], "p1");
EXPECT_STREQ(out_args3[2], "p2");
EXPECT_STREQ(out_args3[3], "true");
const char* in_args4[] = {
"testbin",
"--",
"p1",
"p2",
};
TestParse(in_args4, 3, 1.1, "a", true, 2);
auto out_args4 = InvokeParse(in_args4);
EXPECT_STREQ(out_args4[1], "p1");
EXPECT_STREQ(out_args4[2], "p2");
const char* in_args5[] = {
"testbin", "p1", "--int_flag=4", "--", "--bool_flag", "false", "p2",
};
TestParse(in_args5, 4, 1.1, "a", true, 4);
auto out_args5 = InvokeParse(in_args5);
EXPECT_STREQ(out_args5[1], "p1");
EXPECT_STREQ(out_args5[2], "--bool_flag");
EXPECT_STREQ(out_args5[3], "false");
EXPECT_STREQ(out_args5[4], "p2");
}
using ParseDeathTest = ParseTest;
TEST_F(ParseDeathTest, TestUndefinedArg) {
const char* in_args1[] = {
"testbin",
"--undefined_flag",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1),
"Unknown command line flag 'undefined_flag'");
const char* in_args2[] = {
"testbin",
"--noprefixed_flag",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2),
"Unknown command line flag 'noprefixed_flag'");
const char* in_args3[] = {
"testbin",
"--Int_flag=1",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3),
"Unknown command line flag 'Int_flag'");
}
TEST_F(ParseDeathTest, TestInvalidBoolFlagFormat) {
const char* in_args1[] = {
"testbin",
"--bool_flag=",
};
EXPECT_DEATH_IF_SUPPORTED(
InvokeParse(in_args1),
"Missing the value after assignment for the boolean flag 'bool_flag'");
const char* in_args2[] = {
"testbin",
"--nobool_flag=true",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2),
"Negative form with assignment is not valid for the boolean "
"flag 'bool_flag'");
}
TEST_F(ParseDeathTest, TestInvalidNonBoolFlagFormat) {
const char* in_args1[] = {
"testbin",
"--nostring_flag",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1),
"Negative form is not valid for the flag 'string_flag'");
const char* in_args2[] = {
"testbin",
"--int_flag",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2),
"Missing the value for the flag 'int_flag'");
}
TEST_F(ParseDeathTest, TestInvalidUDTFlagFormat) {
const char* in_args1[] = {
"testbin",
"--udt_flag=1",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1),
"Illegal value '1' specified for flag 'udt_flag'; Use values A, "
"AAA instead");
const char* in_args2[] = {
"testbin",
"--udt_flag",
"AA",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2),
"Illegal value 'AA' specified for flag 'udt_flag'; Use values "
"A, AAA instead");
}
TEST_F(ParseDeathTest, TestFlagSuggestions) {
const char* in_args1[] = {
"testbin",
"--legacy_boo",
};
EXPECT_DEATH_IF_SUPPORTED(
InvokeParse(in_args1),
"Unknown command line flag 'legacy_boo'. Did you mean: legacy_bool ?");
const char* in_args2[] = {"testbin", "--foo", "--undefok=foo1"};
EXPECT_DEATH_IF_SUPPORTED(
InvokeParse(in_args2),
"Unknown command line flag 'foo'. Did you mean: foo1 \\(undefok\\)?");
const char* in_args3[] = {
"testbin",
"--nolegacy_ino",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3),
"Unknown command line flag 'nolegacy_ino'. Did "
"you mean: nolegacy_bool, legacy_int ?");
}
TEST_F(ParseTest, GetHints) {
EXPECT_THAT(absl::flags_internal::GetMisspellingHints("legacy_boo"),
testing::ContainerEq(std::vector<std::string>{"legacy_bool"}));
EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_itn"),
testing::ContainerEq(std::vector<std::string>{"legacy_int"}));
EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_int1"),
testing::ContainerEq(std::vector<std::string>{"legacy_int"}));
EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_int"),
testing::ContainerEq(std::vector<std::string>{"legacy_int"}));
EXPECT_THAT(absl::flags_internal::GetMisspellingHints("nolegacy_ino"),
testing::ContainerEq(
std::vector<std::string>{"nolegacy_bool", "legacy_int"}));
EXPECT_THAT(
absl::flags_internal::GetMisspellingHints("FLAG_HEADER_000").size(), 100);
}
TEST_F(ParseTest, TestLegacyFlags) {
const char* in_args1[] = {
"testbin",
"--legacy_int=11",
};
TestParse(in_args1, 1, 1.1, "a", false);
const char* in_args2[] = {
"testbin",
"--legacy_bool",
};
TestParse(in_args2, 1, 1.1, "a", false);
const char* in_args3[] = {
"testbin", "--legacy_int", "22", "--int_flag=2",
"--legacy_bool", "true", "--legacy_str", "--string_flag=qwe",
};
TestParse(in_args3, 2, 1.1, "a", false, 1);
}
TEST_F(ParseTest, TestSimpleValidFlagfile) {
std::string flagfile_flag;
const char* in_args1[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff1", absl::MakeConstSpan(ff1_data)}},
flagfile_flag),
};
TestParse(in_args1, -1, 0.1, "q2w2 ", true);
const char* in_args2[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff2", absl::MakeConstSpan(ff2_data)}},
flagfile_flag),
};
TestParse(in_args2, 100, 0.1, "q2w2 ", false);
}
TEST_F(ParseTest, TestValidMultiFlagfile) {
std::string flagfile_flag;
const char* in_args1[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff2", absl::MakeConstSpan(ff2_data)},
{"parse_test.ff1", absl::MakeConstSpan(ff1_data)}},
flagfile_flag),
};
TestParse(in_args1, -1, 0.1, "q2w2 ", true);
}
TEST_F(ParseTest, TestFlagfileMixedWithRegularFlags) {
std::string flagfile_flag;
const char* in_args1[] = {
"testbin", "--int_flag=3",
GetFlagfileFlag({{"parse_test.ff1", absl::MakeConstSpan(ff1_data)}},
flagfile_flag),
"-double_flag=0.2"};
TestParse(in_args1, -1, 0.2, "q2w2 ", true);
}
TEST_F(ParseTest, TestFlagfileInFlagfile) {
std::string flagfile_flag;
constexpr const char* const ff3_data[] = {
"--flagfile=$0/parse_test.ff1",
"--flagfile=$0/parse_test.ff2",
};
GetFlagfileFlag({{"parse_test.ff2", absl::MakeConstSpan(ff2_data)},
{"parse_test.ff1", absl::MakeConstSpan(ff1_data)}},
flagfile_flag);
const char* in_args1[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff3", absl::MakeConstSpan(ff3_data)}},
flagfile_flag),
};
TestParse(in_args1, 100, 0.1, "q2w2 ", false);
}
TEST_F(ParseDeathTest, TestInvalidFlagfiles) {
std::string flagfile_flag;
constexpr const char* const ff4_data[] = {
"--unknown_flag=10"
};
const char* in_args1[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff4",
absl::MakeConstSpan(ff4_data)}}, flagfile_flag),
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1),
"Unknown command line flag 'unknown_flag'");
constexpr const char* const ff5_data[] = {
"--int_flag 10",
};
const char* in_args2[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff5",
absl::MakeConstSpan(ff5_data)}}, flagfile_flag),
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args2),
"Unknown command line flag 'int_flag 10'");
constexpr const char* const ff6_data[] = {
"--int_flag=10", "--", "arg1", "arg2", "arg3",
};
const char* in_args3[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff6", absl::MakeConstSpan(ff6_data)}},
flagfile_flag),
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args3),
"Flagfile can't contain position arguments or --");
const char* in_args4[] = {
"testbin",
"--flagfile=invalid_flag_file",
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args4),
"Can't open flagfile invalid_flag_file");
constexpr const char* const ff7_data[] = {
"--int_flag=10",
"*bin*",
"--str_flag=aqsw",
};
const char* in_args5[] = {
"testbin",
GetFlagfileFlag({{"parse_test.ff7", absl::MakeConstSpan(ff7_data)}},
flagfile_flag),
};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args5),
"Unexpected line in the flagfile .*: \\*bin\\*");
}
TEST_F(ParseTest, TestReadingRequiredFlagsFromEnv) {
const char* in_args1[] = {"testbin",
"--fromenv=int_flag,bool_flag,string_flag"};
ScopedSetEnv set_int_flag("FLAGS_int_flag", "33");
ScopedSetEnv set_bool_flag("FLAGS_bool_flag", "True");
ScopedSetEnv set_string_flag("FLAGS_string_flag", "AQ12");
TestParse(in_args1, 33, 1.1, "AQ12", true);
}
TEST_F(ParseDeathTest, TestReadingUnsetRequiredFlagsFromEnv) {
const char* in_args1[] = {"testbin", "--fromenv=int_flag"};
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1),
"FLAGS_int_flag not found in environment");
}
TEST_F(ParseDeathTest, TestRecursiveFlagsFromEnv) {
const char* in_args1[] = {"testbin", "--fromenv=tryfromenv"};
ScopedSetEnv set_tryfromenv("FLAGS_tryfromenv", "int_flag");
EXPECT_DEATH_IF_SUPPORTED(InvokeParse(in_args1),
"Infinite recursion on flag tryfromenv");
}
TEST_F(ParseTest, TestReadingOptionalFlagsFromEnv) {
const char* in_args1[] = {
"testbin", "--tryfromenv=int_flag,bool_flag,string_flag,other_flag"};
ScopedSetEnv set_int_flag("FLAGS_int_flag", "17");
ScopedSetEnv set_bool_flag("FLAGS_bool_flag", "Y");
TestParse(in_args1, 17, 1.1, "a", true);
}
TEST_F(ParseTest, TestReadingFlagsFromEnvMoxedWithRegularFlags) {
const char* in_args1[] = {
"testbin",
"--bool_flag=T",
"--tryfromenv=int_flag,bool_flag",
"--int_flag=-21",
};
ScopedSetEnv set_int_flag("FLAGS_int_flag", "-15");
ScopedSetEnv set_bool_flag("FLAGS_bool_flag", "F");
TestParse(in_args1, -21, 1.1, "a", false);
}
TEST_F(ParseDeathTest, TestSimpleHelpFlagHandling) {
const char* in_args1[] = {
"testbin",
"--help",
};
EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kImportant);
EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(1), "");
const char* in_args2[] = {
"testbin",
"--help",
"--int_flag=3",
};
EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kImportant);
EXPECT_EQ(absl::GetFlag(FLAGS_int_flag), 3);
const char* in_args3[] = {"testbin", "--help", "some_positional_arg"};
EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args3), flags::HelpMode::kImportant);
}
TEST_F(ParseTest, TestSubstringHelpFlagHandling) {
const char* in_args1[] = {
"testbin",
"--help=abcd",
};
EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kMatch);
EXPECT_EQ(flags::GetFlagsHelpMatchSubstr(), "abcd");
}
TEST_F(ParseDeathTest, TestVersionHandling) {
const char* in_args1[] = {
"testbin",
"--version",
};
EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kVersion);
}
TEST_F(ParseTest, TestCheckArgsHandling) {
const char* in_args1[] = {"testbin", "--only_check_args", "--int_flag=211"};
EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args1), flags::HelpMode::kOnlyCheckArgs);
EXPECT_EXIT(InvokeParseAbslOnly(in_args1), testing::ExitedWithCode(0), "");
EXPECT_EXIT(InvokeParse(in_args1), testing::ExitedWithCode(0), "");
const char* in_args2[] = {"testbin", "--only_check_args", "--unknown_flag=a"};
EXPECT_EQ(InvokeParseAbslOnlyImpl(in_args2), flags::HelpMode::kOnlyCheckArgs);
EXPECT_EXIT(InvokeParseAbslOnly(in_args2), testing::ExitedWithCode(0), "");
EXPECT_EXIT(InvokeParse(in_args2), testing::ExitedWithCode(1), "");
}
TEST_F(ParseTest, WasPresentOnCommandLine) {
const char* in_args1[] = {
"testbin", "arg1", "--bool_flag",
"--int_flag=211", "arg2", "--double_flag=1.1",
"--string_flag", "asd", "--",
"--some_flag", "arg4",
};
InvokeParse(in_args1);
EXPECT_TRUE(flags::WasPresentOnCommandLine("bool_flag"));
EXPECT_TRUE(flags::WasPresentOnCommandLine("int_flag"));
EXPECT_TRUE(flags::WasPresentOnCommandLine("double_flag"));
EXPECT_TRUE(flags::WasPresentOnCommandLine("string_flag"));
EXPECT_FALSE(flags::WasPresentOnCommandLine("some_flag"));
EXPECT_FALSE(flags::WasPresentOnCommandLine("another_flag"));
}
TEST_F(ParseTest, ParseAbseilFlagsOnlySuccess) {
const char* in_args[] = {
"testbin",
"arg1",
"--bool_flag",
"--int_flag=211",
"arg2",
"--double_flag=1.1",
"--undef_flag1",
"--undef_flag2=123",
"--string_flag",
"asd",
"--",
"--some_flag",
"arg4",
};
std::vector<char*> positional_args;
std::vector<absl::UnrecognizedFlag> unrecognized_flags;
absl::ParseAbseilFlagsOnly(13, const_cast<char**>(in_args), positional_args,
unrecognized_flags);
EXPECT_THAT(positional_args,
ElementsAreArray(
{absl::string_view("testbin"), absl::string_view("arg1"),
absl::string_view("arg2"), absl::string_view("--some_flag"),
absl::string_view("arg4")}));
EXPECT_THAT(unrecognized_flags,
ElementsAreArray(
{absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
"undef_flag1"),
absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
"undef_flag2")}));
}
TEST_F(ParseDeathTest, ParseAbseilFlagsOnlyFailure) {
const char* in_args[] = {
"testbin",
"--int_flag=21.1",
};
EXPECT_DEATH_IF_SUPPORTED(
InvokeParseAbslOnly(in_args),
"Illegal value '21.1' specified for flag 'int_flag'");
}
TEST_F(ParseTest, UndefOkFlagsAreIgnored) {
const char* in_args[] = {
"testbin", "--undef_flag1",
"--undef_flag2=123", "--undefok=undef_flag2",
"--undef_flag3", "value",
};
std::vector<char*> positional_args;
std::vector<absl::UnrecognizedFlag> unrecognized_flags;
absl::ParseAbseilFlagsOnly(6, const_cast<char**>(in_args), positional_args,
unrecognized_flags);
EXPECT_THAT(positional_args, ElementsAreArray({absl::string_view("testbin"),
absl::string_view("value")}));
EXPECT_THAT(unrecognized_flags,
ElementsAreArray(
{absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
"undef_flag1"),
absl::UnrecognizedFlag(absl::UnrecognizedFlag::kFromArgv,
"undef_flag3")}));
}
TEST_F(ParseTest, AllUndefOkFlagsAreIgnored) {
const char* in_args[] = {
"testbin",
"--undef_flag1",
"--undef_flag2=123",
"--undefok=undef_flag2,undef_flag1,undef_flag3",
"--undef_flag3",
"value",
"--",
"--undef_flag4",
};
std::vector<char*> positional_args;
std::vector<absl::UnrecognizedFlag> unrecognized_flags;
absl::ParseAbseilFlagsOnly(8, const_cast<char**>(in_args), positional_args,
unrecognized_flags);
EXPECT_THAT(positional_args,
ElementsAreArray({absl::string_view("testbin"),
absl::string_view("value"),
absl::string_view("--undef_flag4")}));
EXPECT_THAT(unrecognized_flags, testing::IsEmpty());
}
TEST_F(ParseDeathTest, ExitOnUnrecognizedFlagPrintsHelp) {
const char* in_args[] = {
"testbin",
"--undef_flag1",
"--help=int_flag",
};
EXPECT_EXIT(InvokeParseCommandLineImpl(in_args), testing::ExitedWithCode(1),
AllOf(HasSubstr("Unknown command line flag 'undef_flag1'"),
HasSubstr("Try --helpfull to get a list of all flags")));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/parse.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/parse_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ba3ccac4-9de1-481a-a94c-43c91831dbcf | cpp | tensorflow/tensorflow | tfdataz_metrics | tensorflow/core/data/tfdataz_metrics.cc | tensorflow/core/data/tfdataz_metrics_test.cc | #include "tensorflow/core/data/tfdataz_metrics.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
ApproximateLatencyEstimator::ApproximateLatencyEstimator(const Env& env)
: env_(env),
last_updated_time_mins_(0),
latency_value_counter_(0),
latency_count_counter_(0),
next_slot_(0) {
for (int i = 0; i < kSlots; ++i) {
latency_value_[i] = 0;
latency_count_[i] = 0;
}
}
void ApproximateLatencyEstimator::AddLatency(const int64_t latency_usec)
TF_LOCKS_EXCLUDED(mu_) {
UpdateRingBuffer();
mutex_lock l(mu_);
latency_value_counter_ += latency_usec;
latency_count_counter_ += 1;
}
void ApproximateLatencyEstimator::UpdateRingBuffer() TF_LOCKS_EXCLUDED(mu_) {
int64_t now_minutes =
absl::ToInt64Minutes(absl::Microseconds(env_.NowMicros()));
mutex_lock l(mu_);
int64_t elapsed_minutes = now_minutes - last_updated_time_mins_;
int64_t minutes_to_update = std::min(elapsed_minutes, kSlots);
for (int i = 0; i < minutes_to_update; ++i) {
latency_value_[next_slot_] = latency_value_counter_;
latency_count_[next_slot_] = latency_count_counter_;
IncrementNextSlot();
}
last_updated_time_mins_ = now_minutes;
}
void ApproximateLatencyEstimator::IncrementNextSlot()
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
next_slot_ = (next_slot_ + 1) % kSlots;
}
int ApproximateLatencyEstimator::PrevSlot(int steps)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (next_slot_ - steps + kSlots) % kSlots;
}
absl::Duration ApproximateLatencyEstimator::GetAverageLatency(Duration duration)
TF_LOCKS_EXCLUDED(mu_) {
UpdateRingBuffer();
mutex_lock l(mu_);
double interval_latency =
static_cast<double>(latency_value_counter_ -
latency_value_[PrevSlot(static_cast<int>(duration))]);
double interval_count =
static_cast<double>(latency_count_counter_ -
latency_count_[PrevSlot(static_cast<int>(duration))]);
if (interval_count == 0) {
return absl::ZeroDuration();
}
return absl::Duration(absl::Microseconds(interval_latency)) / interval_count;
}
TfDatazMetricsCollector::TfDatazMetricsCollector(
const Env& env, DatasetBaseIterator* iterator,
std::shared_ptr<model::Model> model)
: iterator_(iterator), model_(std::move(model)), latency_estimator_(env) {}
void TfDatazMetricsCollector::RecordGetNextLatency(
int64_t get_next_latency_usec) {
if (get_next_latency_usec > 0) {
latency_estimator_.AddLatency(get_next_latency_usec);
}
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastOneMinute() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kMinute);
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastFiveMinutes() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kFiveMinutes);
}
absl::Duration TfDatazMetricsCollector::GetAverageLatencyForLastSixtyMinutes() {
return latency_estimator_.GetAverageLatency(
ApproximateLatencyEstimator::Duration::kSixtyMinutes);
}
std::optional<std::string> TfDatazMetricsCollector::DatasetName() {
auto options = iterator_->dataset()->options();
if (options.has_dataset_name()) {
return std::make_optional(options.dataset_name());
}
return std::nullopt;
}
int64_t TfDatazMetricsCollector::GetIteratorTotalMemoryUsage() {
return iterator_->TotalBufferedBytes();
}
std::shared_ptr<model::Model> TfDatazMetricsCollector::GetModel() {
return model_;
}
namespace {
static mutex* get_tfdataz_metrics_registry_lock() {
static mutex tfdataz_metrics_registry_lock(LINKER_INITIALIZED);
return &tfdataz_metrics_registry_lock;
}
using TfDatazMetricsCollectors =
absl::flat_hash_set<std::shared_ptr<TfDatazMetricsCollector>>;
TfDatazMetricsCollectors& tfdataz_metric_collectors() {
static auto& collectors = *new TfDatazMetricsCollectors();
return collectors;
}
}
void TfDatazMetricsRegistry::Register(
std::shared_ptr<TfDatazMetricsCollector> collector) {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
tfdataz_metric_collectors().insert(collector);
}
void TfDatazMetricsRegistry::Deregister(
std::shared_ptr<TfDatazMetricsCollector> collector) {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
tfdataz_metric_collectors().erase(collector);
}
absl::flat_hash_set<std::shared_ptr<TfDatazMetricsCollector>>
TfDatazMetricsRegistry::GetIteratorMetricCollectors() {
mutex_lock l(*get_tfdataz_metrics_registry_lock());
return tfdataz_metric_collectors();
}
}
} | #include "tensorflow/core/data/tfdataz_metrics.h"
#include <memory>
#include <utility>
#include "absl/time/time.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/fake_clock_env.h"
namespace tensorflow {
namespace data {
namespace {
static int64_t k1MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(1));
static int64_t k2MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(2));
static int64_t k5MinutesInMicros = absl::ToInt64Microseconds(absl::Minutes(5));
static int64_t k59MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(59));
static int64_t k60MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(60));
static int64_t k61MinutesInMicros =
absl::ToInt64Microseconds(absl::Minutes(61));
class TfDatazMetricsTest : public ::testing::Test {
protected:
void SetUp() override {
env_ = std::make_unique<FakeClockEnv>(Env::Default());
tfdataz_metrics_ = std::make_unique<TfDatazMetricsCollector>(
*env_, iterator_.get(), nullptr);
}
void TearDown() override {
env_.reset();
tfdataz_metrics_.reset();
}
std::unique_ptr<DatasetBaseIterator> iterator_;
std::unique_ptr<FakeClockEnv> env_;
std::unique_ptr<TfDatazMetricsCollector> tfdataz_metrics_;
};
TEST_F(TfDatazMetricsTest, RecordGetNextLatency) {
tfdataz_metrics_->RecordGetNextLatency(1);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.0);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyForLastOneMinute) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k2MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.5);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyForLastFiveMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k5MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceBySixtyMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k60MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceByFiftyNineMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k59MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
4.0);
}
TEST_F(TfDatazMetricsTest,
GetAverageLatencyForLastSixtyMinutesWithAdvanceBySixtyOneMinutes) {
tfdataz_metrics_->RecordGetNextLatency(1);
env_->AdvanceByMicroseconds(k61MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
tfdataz_metrics_->RecordGetNextLatency(4);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
3.0);
}
TEST_F(TfDatazMetricsTest, GetMultipleAverageLatencies) {
tfdataz_metrics_->RecordGetNextLatency(1);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
1.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
1.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
1.0);
env_->AdvanceByMicroseconds(k1MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(2);
tfdataz_metrics_->RecordGetNextLatency(3);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
2.5);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
2.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
2.0);
env_->AdvanceByMicroseconds(k60MinutesInMicros);
tfdataz_metrics_->RecordGetNextLatency(4);
tfdataz_metrics_->RecordGetNextLatency(5);
tfdataz_metrics_->RecordGetNextLatency(6);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
5.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
5.0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
5.0);
}
TEST_F(TfDatazMetricsTest, GetAverageLatencyWithZeroGetNextCalls) {
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastOneMinute()),
0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastFiveMinutes()),
0);
EXPECT_FLOAT_EQ(absl::ToDoubleMicroseconds(
tfdataz_metrics_->GetAverageLatencyForLastSixtyMinutes()),
0);
}
class ScopedTfDataMetricsRegistration {
public:
explicit ScopedTfDataMetricsRegistration(
std::shared_ptr<TfDatazMetricsCollector> collector)
: collector_(std::move(collector)) {
TfDatazMetricsRegistry::Register(collector_);
}
~ScopedTfDataMetricsRegistration() {
TfDatazMetricsRegistry::Deregister(collector_);
}
void Deregister() { TfDatazMetricsRegistry::Deregister(collector_); }
private:
std::shared_ptr<TfDatazMetricsCollector> collector_;
};
TEST(TfDatazMetricsRegistryTest, Register) {
std::unique_ptr<DatasetBaseIterator> iterator;
auto collector_one = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_two = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
ScopedTfDataMetricsRegistration scoped_registration_one(collector_one);
ScopedTfDataMetricsRegistration scoped_registration_two(collector_two);
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 2);
}
TEST(TfDatazMetricsRegistryTest, Deregister) {
std::unique_ptr<DatasetBaseIterator> iterator;
auto collector_one = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_two = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
auto collector_three = std::make_shared<TfDatazMetricsCollector>(
*Env::Default(), iterator.get(), nullptr);
ScopedTfDataMetricsRegistration scoped_registration_one(collector_one);
ScopedTfDataMetricsRegistration scoped_registration_two(collector_two);
ScopedTfDataMetricsRegistration scoped_registration_three(collector_three);
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 3);
scoped_registration_one.Deregister();
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 2);
scoped_registration_two.Deregister();
scoped_registration_three.Deregister();
EXPECT_EQ(TfDatazMetricsRegistry::GetIteratorMetricCollectors().size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/tfdataz_metrics.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/tfdataz_metrics_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3797474d-2ba5-43bc-bc6a-07e9099bc36a | cpp | google/quiche | quiche_file_utils | quiche/common/platform/api/quiche_file_utils.cc | quiche/common/platform/api/quiche_file_utils_test.cc | #include "quiche/common/platform/api/quiche_file_utils.h"
#include <optional>
#include <string>
#include <vector>
#include "quiche_platform_impl/quiche_file_utils_impl.h"
namespace quiche {
std::string JoinPath(absl::string_view a, absl::string_view b) {
return JoinPathImpl(a, b);
}
std::optional<std::string> ReadFileContents(absl::string_view file) {
return ReadFileContentsImpl(file);
}
bool EnumerateDirectory(absl::string_view path,
std::vector<std::string>& directories,
std::vector<std::string>& files) {
return EnumerateDirectoryImpl(path, directories, files);
}
bool EnumerateDirectoryRecursivelyInner(absl::string_view path,
int recursion_limit,
std::vector<std::string>& files) {
if (recursion_limit < 0) {
return false;
}
std::vector<std::string> local_files;
std::vector<std::string> directories;
if (!EnumerateDirectory(path, directories, local_files)) {
return false;
}
for (const std::string& directory : directories) {
if (!EnumerateDirectoryRecursivelyInner(JoinPath(path, directory),
recursion_limit - 1, files)) {
return false;
}
}
for (const std::string& file : local_files) {
files.push_back(JoinPath(path, file));
}
return true;
}
bool EnumerateDirectoryRecursively(absl::string_view path,
std::vector<std::string>& files) {
constexpr int kRecursionLimit = 20;
return EnumerateDirectoryRecursivelyInner(path, kRecursionLimit, files);
}
} | #include "quiche/common/platform/api/quiche_file_utils.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/strip.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
namespace {
using testing::UnorderedElementsAre;
using testing::UnorderedElementsAreArray;
TEST(QuicheFileUtilsTest, ReadFileContents) {
std::string path = absl::StrCat(QuicheGetCommonSourcePath(),
"/platform/api/testdir/testfile");
std::optional<std::string> contents = ReadFileContents(path);
ASSERT_TRUE(contents.has_value());
EXPECT_EQ(*contents, "This is a test file.");
}
TEST(QuicheFileUtilsTest, ReadFileContentsFileNotFound) {
std::string path =
absl::StrCat(QuicheGetCommonSourcePath(),
"/platform/api/testdir/file-that-does-not-exist");
std::optional<std::string> contents = ReadFileContents(path);
EXPECT_FALSE(contents.has_value());
}
TEST(QuicheFileUtilsTest, EnumerateDirectory) {
std::string path =
absl::StrCat(QuicheGetCommonSourcePath(), "/platform/api/testdir");
std::vector<std::string> dirs;
std::vector<std::string> files;
bool success = EnumerateDirectory(path, dirs, files);
EXPECT_TRUE(success);
EXPECT_THAT(files, UnorderedElementsAre("testfile", "README.md"));
EXPECT_THAT(dirs, UnorderedElementsAre("a"));
}
TEST(QuicheFileUtilsTest, EnumerateDirectoryNoSuchDirectory) {
std::string path = absl::StrCat(QuicheGetCommonSourcePath(),
"/platform/api/testdir/no-such-directory");
std::vector<std::string> dirs;
std::vector<std::string> files;
bool success = EnumerateDirectory(path, dirs, files);
EXPECT_FALSE(success);
}
TEST(QuicheFileUtilsTest, EnumerateDirectoryNotADirectory) {
std::string path = absl::StrCat(QuicheGetCommonSourcePath(),
"/platform/api/testdir/testfile");
std::vector<std::string> dirs;
std::vector<std::string> files;
bool success = EnumerateDirectory(path, dirs, files);
EXPECT_FALSE(success);
}
TEST(QuicheFileUtilsTest, EnumerateDirectoryRecursively) {
std::vector<std::string> expected_paths = {"a/b/c/d/e", "a/subdir/testfile",
"a/z", "testfile", "README.md"};
std::string root_path =
absl::StrCat(QuicheGetCommonSourcePath(), "/platform/api/testdir");
for (std::string& path : expected_paths) {
if (JoinPath("a", "b") == "a\\b") {
absl::c_replace(path, '/', '\\');
}
path = JoinPath(root_path, path);
}
std::vector<std::string> files;
bool success = EnumerateDirectoryRecursively(root_path, files);
EXPECT_TRUE(success);
EXPECT_THAT(files, UnorderedElementsAreArray(expected_paths));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_file_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_file_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
69e4171f-276f-4c20-a4b6-d6c7373352d8 | cpp | tensorflow/tensorflow | gloo_collectives | third_party/xla/xla/pjrt/cpu/gloo_collectives.cc | third_party/xla/xla/pjrt/cpu/gloo_collectives_test.cc | #include "xla/pjrt/cpu/gloo_collectives.h"
#include <complex>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <exception>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "gloo/algorithm.h"
#include "gloo/allgather.h"
#include "gloo/allreduce.h"
#include "gloo/context.h"
#include "gloo/math.h"
#include "gloo/reduce_scatter.h"
#include "gloo/rendezvous/context.h"
#include "gloo/rendezvous/prefix_store.h"
#include "gloo/rendezvous/store.h"
#include "gloo/transport/device.h"
#include "gloo/transport/unbound_buffer.h"
#include "gloo/types.h"
#include "xla/primitive_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/global_device_id.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla::cpu {
GlooCollectivesCommunicator::GlooCollectivesCommunicator(
std::shared_ptr<gloo::Context> context)
: context_(std::move(context)) {}
GlooCollectivesCommunicator::~GlooCollectivesCommunicator() = default;
template <typename T>
static absl::Status SetAllReduceOptions(ReductionKind reduction_kind,
const void* input_buffer,
void* output_buffer,
size_t num_elements,
gloo::AllreduceOptions& options) {
options.setInput(reinterpret_cast<T*>(const_cast<void*>(input_buffer)),
num_elements);
options.setOutput(reinterpret_cast<T*>(const_cast<void*>(output_buffer)),
num_elements);
using ReductionFn = void (*)(void*, const void*, const void*, size_t);
switch (reduction_kind) {
case ReductionKind::SUM:
options.setReduceFunction(static_cast<ReductionFn>(&gloo::sum<T>));
break;
case ReductionKind::PRODUCT:
options.setReduceFunction(static_cast<ReductionFn>(&gloo::product<T>));
break;
case ReductionKind::MIN:
if constexpr (!is_complex_v<T>) {
options.setReduceFunction(static_cast<ReductionFn>(&gloo::min<T>));
} else {
return absl::InvalidArgumentError(
"MIN reduction not supported for complex types");
}
break;
case ReductionKind::MAX:
if constexpr (!is_complex_v<T>) {
options.setReduceFunction(static_cast<ReductionFn>(&gloo::max<T>));
} else {
return absl::InvalidArgumentError(
"MAX reduction not supported for complex types");
}
break;
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::AllReduce(
const RendezvousKey& key, ReductionKind reduction_kind,
PrimitiveType element_type, size_t num_elements, const void* input_buffer,
void* output_buffer, absl::Duration timeout) {
gloo::AllreduceOptions options(context_);
switch (element_type) {
case S8:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int8_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case PRED:
case U8:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint8_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case S16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int16_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case U16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint16_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case S32:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int32_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case U32:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint32_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case S64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<int64_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case U64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<uint64_t>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case F16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<gloo::float16>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case BF16:
TF_RETURN_IF_ERROR(SetAllReduceOptions<bfloat16>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case F32:
TF_RETURN_IF_ERROR(SetAllReduceOptions<float>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case F64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<double>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case C64:
TF_RETURN_IF_ERROR(SetAllReduceOptions<std::complex<float>>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
case C128:
TF_RETURN_IF_ERROR(SetAllReduceOptions<std::complex<double>>(
reduction_kind, input_buffer, output_buffer, num_elements, options));
break;
default:
return absl::InvalidArgumentError("Unknown datatype in allreduce");
}
options.setAlgorithm(gloo::AllreduceOptions::Algorithm::RING);
options.setTimeout(absl::ToChronoMilliseconds(timeout));
try {
gloo::allreduce(options);
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo all-reduce failed: ", e.what()));
}
return absl::OkStatus();
}
static constexpr uint8_t kCollectivePermuteSlotPrefix = 0x40;
absl::Status GlooCollectivesCommunicator::CollectivePermute(
const RendezvousKey& key, size_t num_bytes, std::optional<int> source_rank,
absl::Span<int const> target_ranks, const void* input_buffer,
void* output_buffer, absl::Duration timeout) {
uint32_t tag = 0;
const auto slot = gloo::Slot::build(kCollectivePermuteSlotPrefix, tag);
try {
std::unique_ptr<gloo::transport::UnboundBuffer> in;
std::unique_ptr<gloo::transport::UnboundBuffer> out;
for (int target : target_ranks) {
if (target != context_->rank) {
VLOG(1) << "send from " << context_->rank << " to " << target;
if (!in) {
in = context_->createUnboundBuffer(const_cast<void*>(input_buffer),
num_bytes);
}
in->send(target, slot);
}
}
if (source_rank) {
if (*source_rank == context_->rank) {
std::memcpy(output_buffer, input_buffer, num_bytes);
} else {
VLOG(1) << "recv at " << context_->rank << " from " << *source_rank;
out = context_->createUnboundBuffer(output_buffer, num_bytes);
out->recv(*source_rank, slot);
}
} else {
std::memset(output_buffer, 0, num_bytes);
}
VLOG(1) << "wait for send at " << context_->rank;
auto deadline = absl::ToChronoTime(absl::Now() + timeout);
if (in) {
in->waitSend(deadline);
}
VLOG(1) << "wait for recv at " << context_->rank;
if (out) {
out->waitRecv(deadline);
}
VLOG(1) << "done waiting at " << context_->rank;
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo collective permute failed: ", e.what()));
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::AllToAll(
const RendezvousKey& key, size_t chunk_bytes,
absl::Span<const void* const> input_buffers,
absl::Span<void* const> output_buffers, absl::Duration timeout) {
uint32_t tag = 0;
int my_rank = context_->rank;
int world_size = context_->size;
TF_RET_CHECK(world_size == input_buffers.size());
TF_RET_CHECK(world_size == output_buffers.size());
try {
const auto slot = gloo::Slot::build(gloo::kAlltoallSlotPrefix, tag);
std::vector<std::unique_ptr<gloo::transport::UnboundBuffer>> ins(
context_->size);
std::vector<std::unique_ptr<gloo::transport::UnboundBuffer>> outs(
context_->size);
for (size_t i = 0; i < world_size; ++i) {
if (i != my_rank) {
ins[i] = context_->createUnboundBuffer(
const_cast<void*>(input_buffers[i]), chunk_bytes);
outs[i] = context_->createUnboundBuffer(output_buffers[i], chunk_bytes);
}
}
for (int i = 1; i < world_size; i++) {
int send_rank = (my_rank + i) % world_size;
int recv_rank = (my_rank + world_size - i) % world_size;
ins[send_rank]->send(send_rank, slot);
outs[recv_rank]->recv(recv_rank, slot);
}
std::memcpy(output_buffers[my_rank], input_buffers[my_rank], chunk_bytes);
auto deadline = absl::ToChronoTime(absl::Now() + timeout);
for (int i = 0; i < world_size; i++) {
if (i != my_rank) {
ins[i]->waitSend(deadline);
outs[i]->waitRecv(deadline);
}
}
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo all-to-all failed: ", e.what()));
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::AllGather(const RendezvousKey& key,
size_t chunk_bytes,
const void* input_buffer,
void* output_buffer,
absl::Duration timeout) {
uint32_t tag = 0;
gloo::AllgatherOptions options(context_);
options.setTag(tag);
options.setTimeout(absl::ToChronoMilliseconds(timeout));
options.setInput(reinterpret_cast<char*>(const_cast<void*>(input_buffer)),
chunk_bytes);
options.setOutput(reinterpret_cast<char*>(output_buffer),
chunk_bytes * context_->size);
try {
gloo::allgather(options);
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo AllGather failed: ", e.what()));
}
return absl::OkStatus();
}
template <typename T>
absl::Status ReduceScatterHelper(std::shared_ptr<gloo::Context> context,
ReductionKind reduction_kind, void* buffer,
size_t chunk_elems) {
const gloo::ReductionFunction<T>* reduction_function = nullptr;
if constexpr (is_complex_v<T>) {
switch (reduction_kind) {
case ReductionKind::SUM:
reduction_function = gloo::ReductionFunction<T>::sum;
break;
case ReductionKind::PRODUCT:
reduction_function = gloo::ReductionFunction<T>::product;
break;
default:
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported reduction kind: ", static_cast<int>(reduction_kind)));
}
} else {
switch (reduction_kind) {
case ReductionKind::SUM:
reduction_function = gloo::ReductionFunction<T>::sum;
break;
case ReductionKind::PRODUCT:
reduction_function = gloo::ReductionFunction<T>::product;
break;
case ReductionKind::MAX:
reduction_function = gloo::ReductionFunction<T>::max;
break;
case ReductionKind::MIN:
reduction_function = gloo::ReductionFunction<T>::min;
break;
default:
return absl::InvalidArgumentError(absl::StrCat(
"Unsupported reduction kind: ", static_cast<int>(reduction_kind)));
}
}
try {
std::vector<int> recv_elems(context->size, chunk_elems);
gloo::ReduceScatterHalvingDoubling<T> algorithm(
context, std::vector<T*>{reinterpret_cast<T*>(buffer)},
chunk_elems * context->size, recv_elems, reduction_function);
algorithm.run();
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo ReduceScatter failed: ", e.what()));
}
return absl::OkStatus();
}
absl::Status GlooCollectivesCommunicator::ReduceScatter(
const RendezvousKey& key, ReductionKind reduction_kind,
PrimitiveType element_type, size_t chunk_elems, const void* input_buffer,
void* output_buffer, absl::Duration timeout) {
size_t chunk_bytes = chunk_elems * primitive_util::ByteWidth(element_type);
std::unique_ptr<char[]> temp(new char[chunk_bytes * context_->size]);
std::memcpy(temp.get(), input_buffer, chunk_bytes * context_->size);
switch (element_type) {
case S8:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int8_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case PRED:
case U8:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint8_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case S16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int16_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case U16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint16_t>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case S32:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int32_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case U32:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint32_t>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case S64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<int64_t>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case U64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<uint64_t>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case BF16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<bfloat16>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case F16:
TF_RETURN_IF_ERROR(ReduceScatterHelper<gloo::float16>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case F32:
TF_RETURN_IF_ERROR(ReduceScatterHelper<float>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case F64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<double>(context_, reduction_kind,
temp.get(), chunk_elems));
break;
case C64:
TF_RETURN_IF_ERROR(ReduceScatterHelper<std::complex<float>>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
case C128:
TF_RETURN_IF_ERROR(ReduceScatterHelper<std::complex<double>>(
context_, reduction_kind, temp.get(), chunk_elems));
break;
default:
return absl::InvalidArgumentError("Unknown datatype in reducescatter");
}
std::memcpy(output_buffer, temp.get(), chunk_bytes);
return absl::OkStatus();
}
GlooCollectives::GlooCollectives(
std::unique_ptr<gloo::rendezvous::Store> store,
std::shared_ptr<gloo::transport::Device> device)
: store_(std::move(store)), device_(std::move(device)) {}
GlooCollectives::~GlooCollectives() = default;
absl::StatusOr<std::shared_ptr<CollectivesCommunicator>>
GlooCollectives::GetCommunicator(
absl::Span<GlobalDeviceId const> global_devices, int rank) {
Context* context;
{
absl::MutexLock lock(&mu_);
auto& context_ref = contexts_[std::make_tuple(
std::vector<GlobalDeviceId>(global_devices.begin(),
global_devices.end()),
rank)];
if (!context_ref) {
context_ref = std::make_unique<Context>();
}
context = context_ref.get();
}
absl::MutexLock context_lock(&context->mu);
if (context->communicator) {
return context->communicator;
}
auto gloo_context =
std::make_shared<gloo::rendezvous::Context>(rank, global_devices.size());
auto prefix_store = gloo::rendezvous::PrefixStore(
absl::StrCat("gloo/",
absl::StrJoin(global_devices, ",",
[](std::string* out, GlobalDeviceId id) {
absl::StrAppend(out, id.value());
})),
*store_);
try {
gloo_context->connectFullMesh(prefix_store, device_);
} catch (std::exception& e) {
return absl::UnknownError(
absl::StrCat("Gloo context initialization failed: ", e.what()));
}
context->communicator =
std::make_shared<GlooCollectivesCommunicator>(std::move(gloo_context));
return context->communicator;
}
} | #include "xla/pjrt/cpu/gloo_collectives.h"
#include <unistd.h>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#if defined(__linux__)
#include "gloo/transport/tcp/attr.h"
#include "gloo/transport/tcp/device.h"
#elif defined(__APPLE__)
#include "gloo/transport/uv/device.h"
#endif
#include "xla/executable_run_options.h"
#include "xla/pjrt/cpu/gloo_kv_store.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/global_device_id.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla::cpu {
namespace {
using ::testing::Each;
using ::testing::Eq;
constexpr int kNumParticipants = 2;
constexpr size_t kBufferSize = 256;
constexpr absl::Duration kTimeout = absl::Seconds(5);
absl::StatusOr<std::shared_ptr<CollectivesCommunicator>> GetCommunicator(
size_t kNumParticipants, absl::Span<GlobalDeviceId const> global_devices,
const std::shared_ptr<xla::KeyValueStoreInterface>& kv_store, int rank) {
auto collectives = std::make_shared<cpu::GlooCollectives>(
std::make_unique<cpu::GlooKeyValueStore>(kv_store),
#if defined(__linux__)
gloo::transport::tcp::CreateDevice(gloo::transport::tcp::attr()));
#elif defined(__APPLE__)
gloo::transport::uv::CreateDevice(gloo::transport::uv::attr()));
#endif
return collectives->GetCommunicator(global_devices, rank);
}
RendezvousKey MakeRendezvousKey(std::vector<GlobalDeviceId> global_devices) {
return RendezvousKey(RunId(0), global_devices, kNumParticipants,
RendezvousKey::CollectiveOpKind::kCrossModule,
0);
}
absl::StatusOr<std::vector<uint8_t>> AllReduce(
const std::shared_ptr<xla::KeyValueStoreInterface>& kv_store,
const std::vector<uint8_t>& input_buffer,
std::vector<GlobalDeviceId> global_devices, int rank) {
std::vector<uint8_t> output_buffer(kBufferSize);
RendezvousKey rendezvous_key = MakeRendezvousKey(global_devices);
TF_ASSIGN_OR_RETURN(
auto communicator,
GetCommunicator(kNumParticipants, global_devices, kv_store, rank));
TF_RETURN_IF_ERROR(communicator->AllReduce(
rendezvous_key, xla::ReductionKind::SUM, xla::PrimitiveType::U8,
kBufferSize, input_buffer.data(), output_buffer.data(), kTimeout));
return output_buffer;
}
TEST(GlooCollectives, AllReduce) {
std::vector<GlobalDeviceId> global_devices;
global_devices.reserve(kNumParticipants);
for (int rank = 0; rank < kNumParticipants; ++rank) {
global_devices.push_back(GlobalDeviceId(rank));
}
auto kv_store = std::make_shared<xla::InMemoryKeyValueStore>();
std::vector<absl::StatusOr<std::vector<uint8_t>>> output_buffers(
kNumParticipants);
{
tsl::thread::ThreadPool thread_pool(
tsl::Env::Default(), "AllReduceParticipants", kNumParticipants);
for (int rank = 0; rank < kNumParticipants; ++rank) {
thread_pool.Schedule(
[rank, &output_buffers, &kv_store, &global_devices]() {
std::vector<uint8_t> input_buffer(kBufferSize, rank + 1);
output_buffers[rank] =
AllReduce(kv_store, input_buffer, global_devices, rank);
});
}
}
for (int rank = 0; rank < kNumParticipants; ++rank) {
TF_ASSERT_OK(output_buffers[rank].status());
}
for (int rank = 0; rank < kNumParticipants; ++rank) {
EXPECT_THAT(output_buffers[rank].value(),
Each(Eq(kNumParticipants * (kNumParticipants + 1) / 2)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/gloo_collectives.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/gloo_collectives_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fde120b0-1aa9-42e2-bdb5-409300d0c5d4 | cpp | google/cel-cpp | shadowable_value_step | eval/eval/shadowable_value_step.cc | eval/eval/shadowable_value_step_test.cc | #include "eval/eval/shadowable_value_step.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "internal/status_macros.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::Value;
class ShadowableValueStep : public ExpressionStepBase {
public:
ShadowableValueStep(std::string identifier, cel::Value value, int64_t expr_id)
: ExpressionStepBase(expr_id),
identifier_(std::move(identifier)),
value_(std::move(value)) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
std::string identifier_;
Value value_;
};
absl::Status ShadowableValueStep::Evaluate(ExecutionFrame* frame) const {
cel::Value result;
CEL_ASSIGN_OR_RETURN(auto found,
frame->modern_activation().FindVariable(
frame->value_factory(), identifier_, result));
if (found) {
frame->value_stack().Push(std::move(result));
} else {
frame->value_stack().Push(value_);
}
return absl::OkStatus();
}
class DirectShadowableValueStep : public DirectExpressionStep {
public:
DirectShadowableValueStep(std::string identifier, cel::Value value,
int64_t expr_id)
: DirectExpressionStep(expr_id),
identifier_(std::move(identifier)),
value_(std::move(value)) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute) const override;
private:
std::string identifier_;
Value value_;
};
absl::Status DirectShadowableValueStep::Evaluate(
ExecutionFrameBase& frame, Value& result, AttributeTrail& attribute) const {
CEL_ASSIGN_OR_RETURN(auto found,
frame.activation().FindVariable(frame.value_manager(),
identifier_, result));
if (!found) {
result = value_;
}
return absl::OkStatus();
}
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateShadowableValueStep(
std::string identifier, cel::Value value, int64_t expr_id) {
return absl::make_unique<ShadowableValueStep>(std::move(identifier),
std::move(value), expr_id);
}
std::unique_ptr<DirectExpressionStep> CreateDirectShadowableValueStep(
std::string identifier, cel::Value value, int64_t expr_id) {
return std::make_unique<DirectShadowableValueStep>(std::move(identifier),
std::move(value), expr_id);
}
} | #include "eval/eval/shadowable_value_step.h"
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "base/type_provider.h"
#include "common/value.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/evaluator_core.h"
#include "eval/internal/interop.h"
#include "eval/public/activation.h"
#include "eval/public/cel_value.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::TypeProvider;
using ::cel::interop_internal::CreateTypeValueFromView;
using ::google::protobuf::Arena;
using ::testing::Eq;
absl::StatusOr<CelValue> RunShadowableExpression(std::string identifier,
cel::Value value,
const Activation& activation,
Arena* arena) {
CEL_ASSIGN_OR_RETURN(
auto step,
CreateShadowableValueStep(std::move(identifier), std::move(value), 1));
ExecutionPath path;
path.push_back(std::move(step));
CelExpressionFlatImpl impl(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), cel::RuntimeOptions{}));
return impl.Evaluate(activation, arena);
}
TEST(ShadowableValueStepTest, TestEvaluateNoShadowing) {
std::string type_name = "google.api.expr.runtime.TestMessage";
Activation activation;
Arena arena;
auto type_value = CreateTypeValueFromView(&arena, type_name);
auto status =
RunShadowableExpression(type_name, type_value, activation, &arena);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsCelType());
EXPECT_THAT(value.CelTypeOrDie().value(), Eq(type_name));
}
TEST(ShadowableValueStepTest, TestEvaluateShadowedIdentifier) {
std::string type_name = "int";
auto shadow_value = CelValue::CreateInt64(1024L);
Activation activation;
activation.InsertValue(type_name, shadow_value);
Arena arena;
auto type_value = CreateTypeValueFromView(&arena, type_name);
auto status =
RunShadowableExpression(type_name, type_value, activation, &arena);
ASSERT_OK(status);
auto value = status.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_THAT(value.Int64OrDie(), Eq(1024L));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/shadowable_value_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/shadowable_value_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
9daa5f58-6a74-4c1f-a089-e61cfed04987 | cpp | tensorflow/tensorflow | eigen_attention | tensorflow/core/kernels/eigen_attention.h | tensorflow/core/kernels/eigen_attention_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_ATTENTION_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_ATTENTION_H_
#include "unsupported/Eigen/CXX11/Tensor"
namespace Eigen {
enum ExtractGlimpsesNoiseMode {
UNIFORM = 0,
GAUSSIAN = 1,
ZERO = 2,
};
namespace {
template <typename Index>
struct GlimpseExtractionOp {
GlimpseExtractionOp(const Index width, const Index height,
const std::vector<IndexPair<float> >& offsets,
const bool normalized, const bool centered,
const ExtractGlimpsesNoiseMode noise, const int version)
: width_(width),
height_(height),
offsets_(offsets),
normalized_(normalized),
centered_(centered),
noise_(noise),
version_(version) {}
template <typename Input>
DSizes<Index, 4> dimensions(const Input& input) const {
typedef typename internal::traits<Input>::Index IndexType;
typedef TensorRef<Tensor<typename internal::traits<Input>::Scalar, 4,
internal::traits<Input>::Layout, IndexType> >
Ref;
Ref in(input);
DSizes<Index, 4> dims = in.dimensions();
dims[0] = in.dimension(0);
dims[1] = width_;
dims[2] = height_;
dims[3] = in.dimension(3);
return dims;
}
template <typename Input, typename Output, typename Device>
EIGEN_DEVICE_FUNC void eval(const Input& input, Output& output,
const Device& device) const {
typedef typename internal::traits<Input>::Index IndexType;
typedef TensorRef<Tensor<typename internal::traits<Input>::Scalar, 4,
internal::traits<Input>::Layout, IndexType> >
Ref;
Ref in(input);
const Index num_channels = in.dimension(0);
const Index input_width = in.dimension(1);
const Index input_height = in.dimension(2);
const Index batch_size = in.dimension(3);
eigen_assert(input_width > 0);
eigen_assert(input_height > 0);
internal::NormalRandomGenerator<float> gen;
internal::UniformRandomGenerator<float> unigen;
for (Index i = 0; i < batch_size; ++i) {
float x = offsets_[i].first, y = offsets_[i].second;
if (version_ == 1) {
if (normalized_) {
x *= input_width;
y *= input_height;
}
if (centered_) {
x /= 2.0f;
y /= 2.0f;
x += input_width / 2.0f;
y += input_height / 2.0f;
}
x -= width_ / 2.0f;
y -= height_ / 2.0f;
} else {
if (normalized_) {
x *= input_width;
y *= input_height;
if (centered_) {
x /= 2.0f;
y /= 2.0f;
x += input_width / 2.0f;
y += input_height / 2.0f;
x -= width_ / 2.0f;
y -= height_ / 2.0f;
}
} else {
if (centered_) {
x += input_width / 2.0f;
y += input_height / 2.0f;
}
}
}
const Index offset_x = (Index)x;
const Index offset_y = (Index)y;
Index glimpse_width = width_;
Index glimpse_height = height_;
bool partial_overlap = false;
DSizes<Index, 3> slice_offset(0, offset_x, offset_y);
DSizes<Index, 3> slice_extent(num_channels, width_, height_);
DSizes<Index, 3> base_offset(0, 0, 0);
if (offset_x < 0) {
slice_offset[1] = 0;
glimpse_width = (std::max<Index>)(0, width_ + offset_x);
slice_extent[1] = glimpse_width;
base_offset[1] = width_ - glimpse_width;
partial_overlap = true;
} else if (offset_x + width_ >= input_width) {
glimpse_width = (std::max<Index>)(0, input_width - offset_x);
slice_extent[1] = glimpse_width;
partial_overlap = true;
}
if (offset_y < 0) {
slice_offset[2] = 0;
glimpse_height = (std::max<Index>)(0, height_ + offset_y);
slice_extent[2] = glimpse_height;
base_offset[2] = height_ - glimpse_height;
partial_overlap = true;
} else if (offset_y + height_ >= input_height) {
glimpse_height = (std::max<Index>)(0, input_height - offset_y);
slice_extent[2] = glimpse_height;
partial_overlap = true;
}
slice_extent[1] = std::min<Index>(input_width, slice_extent[1]);
slice_extent[2] = std::min<Index>(input_height, slice_extent[2]);
if (partial_overlap) {
switch (noise_) {
case ZERO: {
output.template chip<3>(i).device(device) =
output.template chip<3>(i).constant(0);
} break;
case UNIFORM: {
typedef std::remove_const_t<
typename internal::traits<Input>::Scalar>
Scalar;
TensorFixedSize<Scalar, Sizes<> > mini;
mini.device(device) = input.template chip<3>(i).minimum();
TensorFixedSize<float, Sizes<> > range;
range.device(device) = (input.template chip<3>(i).maximum() - mini)
.template cast<float>();
DSizes<Index, 3> glimpse_size(num_channels, width_, height_);
TensorMap<Tensor<float, 3> > tmp(nullptr, glimpse_size);
output.template chip<3>(i).device(device) =
mini.reshape(Sizes<1, 1, 1>()).broadcast(glimpse_size) +
(tmp.random(unigen) *
range.reshape(Sizes<1, 1, 1>()).broadcast(glimpse_size))
.template cast<Scalar>();
} break;
case GAUSSIAN: {
DSizes<Index, 2> glimpse_size(width_, height_);
DSizes<Index, 2> input_size(input_width, input_height);
typedef std::remove_const_t<
typename internal::traits<Input>::Scalar>
Scalar;
for (int j = 0; j < num_channels; ++j) {
TensorFixedSize<Scalar, Sizes<> > mean;
mean.device(device) = input.template chip<3>(i)
.template chip<0>(j)
.template cast<float>()
.mean();
TensorFixedSize<float, Sizes<> > sigma;
sigma.device(device) =
(input.template chip<3>(i)
.template chip<0>(j)
.template cast<float>() -
mean.reshape(Sizes<1, 1>()).broadcast(input_size))
.square()
.mean()
.sqrt();
TensorFixedSize<Scalar, Sizes<> > mini;
mini.device(device) =
input.template chip<3>(i).template chip<0>(j).minimum();
TensorFixedSize<float, Sizes<> > maxi;
maxi.device(device) =
input.template chip<3>(i).template chip<0>(j).maximum();
TensorMap<Tensor<float, 2> > tmp(nullptr, glimpse_size);
output.template chip<3>(i).template chip<0>(j).device(device) =
(mean.reshape(Sizes<1, 1>()).broadcast(glimpse_size) +
(tmp.random(gen) *
sigma.reshape(Sizes<1, 1>()).broadcast(glimpse_size))
.template cast<Scalar>())
.cwiseMin(
maxi.reshape(Sizes<1, 1>()).broadcast(glimpse_size))
.cwiseMax(
mini.reshape(Sizes<1, 1>()).broadcast(glimpse_size));
}
} break;
}
if (glimpse_width == 0 || glimpse_height == 0) {
continue;
}
output.template chip<3>(i)
.slice(base_offset, slice_extent)
.device(device) =
input.template chip<3>(i).slice(slice_offset, slice_extent);
} else {
output.template chip<3>(i).device(device) =
input.template chip<3>(i).slice(slice_offset, slice_extent);
}
}
}
private:
const Index width_;
const Index height_;
const std::vector<IndexPair<float> > offsets_;
const bool normalized_;
const bool centered_;
const ExtractGlimpsesNoiseMode noise_;
const int version_;
};
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorCustomUnaryOp<
const GlimpseExtractionOp<typename internal::traits<Input>::Index>,
const Input>
ExtractGlimpses(
const Input& input, const typename internal::traits<Input>::Index width,
const typename internal::traits<Input>::Index height,
const std::vector<IndexPair<float> >& offsets, const bool normalized = true,
const bool centered = true,
const ExtractGlimpsesNoiseMode noise = ExtractGlimpsesNoiseMode::UNIFORM,
const int version = 2) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::Layout == ColMajor,
YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 4,
YOU_MADE_A_PROGRAMMING_MISTAKE);
typedef typename internal::traits<Input>::Index Index;
const GlimpseExtractionOp<Index> op(width, height, offsets, normalized,
centered, noise, version);
return input.customOp(op);
}
}
#endif | #include "tensorflow/core/kernels/eigen_attention.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
}
TEST(EigenAttentionTest, Simple) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
const ptrdiff_t glimpse_rows = 8;
const ptrdiff_t glimpse_cols = 6;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
std::vector<IndexPair<float>> offsets;
offsets.resize(batch);
for (int i = 0; i < batch; ++i) {
offsets[i].first = (-5 + i) / 10.0f;
offsets[i].second = (5 - i) / 10.0f;
}
Tensor<float, 4> result(depth, glimpse_rows, glimpse_cols, batch);
result = ExtractGlimpses(input, glimpse_rows, glimpse_cols, offsets);
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < glimpse_cols; ++c) {
ptrdiff_t source_c =
c + ((1.0f + offsets[b].second) * cols - glimpse_cols) / 2;
for (int r = 0; r < glimpse_rows; ++r) {
ptrdiff_t source_r =
r + ((1.0f + offsets[b].first) * rows - glimpse_rows) / 2;
for (int d = 0; d < depth; ++d) {
EigenApprox(result(d, r, c, b), input(d, source_r, source_c, b));
}
}
}
}
}
TEST(EigenAttentionTest, OutOfBoundsGlimpse) {
const ptrdiff_t depth = 3;
const ptrdiff_t batch = 10;
const ptrdiff_t rows = 32;
const ptrdiff_t cols = 48;
const ptrdiff_t glimpse_rows = 8;
const ptrdiff_t glimpse_cols = 6;
Tensor<float, 4> input(depth, rows, cols, batch);
input.setRandom();
std::vector<IndexPair<float>> offsets;
offsets.resize(batch);
for (int i = 0; i < batch; ++i) {
offsets[i].first = (-5 + i) / 2.0f;
offsets[i].second = (5 - i) / 2.0f;
}
Tensor<float, 4> result(depth, glimpse_rows, glimpse_cols, batch);
result = ExtractGlimpses(input, glimpse_rows, glimpse_cols, offsets);
for (int b = 0; b < batch; ++b) {
for (int c = 0; c < glimpse_cols; ++c) {
ptrdiff_t source_c =
c + ((1.0f + offsets[b].second) * cols - glimpse_cols) / 2;
if (source_c < glimpse_cols / 2 || source_c >= cols - glimpse_cols / 2) {
continue;
}
for (int r = 0; r < glimpse_rows; ++r) {
ptrdiff_t source_r =
r + ((1.0f + offsets[b].first) * rows - glimpse_rows) / 2;
if (source_r < glimpse_rows / 2 ||
source_r >= rows - glimpse_rows / 2) {
continue;
}
for (int d = 0; d < depth; ++d) {
EigenApprox(result(d, r, c, b), input(d, source_r, source_c, b));
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_attention.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_attention_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2dbc3bb9-9f0d-4727-be03-8810e34b02b3 | cpp | abseil/abseil-cpp | reflection | absl/flags/reflection.cc | absl/flags/reflection_test.cc | #include "absl/flags/reflection.h"
#include <assert.h>
#include <atomic>
#include <string>
#include "absl/base/config.h"
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/flags/commandlineflag.h"
#include "absl/flags/internal/private_handle_accessor.h"
#include "absl/flags/internal/registry.h"
#include "absl/flags/usage_config.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
class FlagRegistry {
public:
FlagRegistry() = default;
~FlagRegistry() = default;
void RegisterFlag(CommandLineFlag& flag, const char* filename);
void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION(lock_) { lock_.Lock(); }
void Unlock() ABSL_UNLOCK_FUNCTION(lock_) { lock_.Unlock(); }
CommandLineFlag* FindFlag(absl::string_view name);
static FlagRegistry& GlobalRegistry();
private:
friend class flags_internal::FlagSaverImpl;
friend void ForEachFlag(std::function<void(CommandLineFlag&)> visitor);
friend void FinalizeRegistry();
using FlagMap = absl::flat_hash_map<absl::string_view, CommandLineFlag*>;
using FlagIterator = FlagMap::iterator;
using FlagConstIterator = FlagMap::const_iterator;
FlagMap flags_;
std::vector<CommandLineFlag*> flat_flags_;
std::atomic<bool> finalized_flags_{false};
absl::Mutex lock_;
FlagRegistry(const FlagRegistry&);
FlagRegistry& operator=(const FlagRegistry&);
};
namespace {
class FlagRegistryLock {
public:
explicit FlagRegistryLock(FlagRegistry& fr) : fr_(fr) { fr_.Lock(); }
~FlagRegistryLock() { fr_.Unlock(); }
private:
FlagRegistry& fr_;
};
}
CommandLineFlag* FlagRegistry::FindFlag(absl::string_view name) {
if (finalized_flags_.load(std::memory_order_acquire)) {
auto it = std::partition_point(
flat_flags_.begin(), flat_flags_.end(),
[=](CommandLineFlag* f) { return f->Name() < name; });
if (it != flat_flags_.end() && (*it)->Name() == name) return *it;
}
FlagRegistryLock frl(*this);
auto it = flags_.find(name);
return it != flags_.end() ? it->second : nullptr;
}
void FlagRegistry::RegisterFlag(CommandLineFlag& flag, const char* filename) {
if (filename != nullptr &&
flag.Filename() != GetUsageConfig().normalize_filename(filename)) {
flags_internal::ReportUsageError(
absl::StrCat(
"Inconsistency between flag object and registration for flag '",
flag.Name(),
"', likely due to duplicate flags or an ODR violation. Relevant "
"files: ",
flag.Filename(), " and ", filename),
true);
std::exit(1);
}
FlagRegistryLock registry_lock(*this);
std::pair<FlagIterator, bool> ins =
flags_.insert(FlagMap::value_type(flag.Name(), &flag));
if (ins.second == false) {
CommandLineFlag& old_flag = *ins.first->second;
if (flag.IsRetired() != old_flag.IsRetired()) {
flags_internal::ReportUsageError(
absl::StrCat(
"Retired flag '", flag.Name(), "' was defined normally in file '",
(flag.IsRetired() ? old_flag.Filename() : flag.Filename()), "'."),
true);
} else if (flags_internal::PrivateHandleAccessor::TypeId(flag) !=
flags_internal::PrivateHandleAccessor::TypeId(old_flag)) {
flags_internal::ReportUsageError(
absl::StrCat("Flag '", flag.Name(),
"' was defined more than once but with "
"differing types. Defined in files '",
old_flag.Filename(), "' and '", flag.Filename(), "'."),
true);
} else if (old_flag.IsRetired()) {
return;
} else if (old_flag.Filename() != flag.Filename()) {
flags_internal::ReportUsageError(
absl::StrCat("Flag '", flag.Name(),
"' was defined more than once (in files '",
old_flag.Filename(), "' and '", flag.Filename(), "')."),
true);
} else {
flags_internal::ReportUsageError(
absl::StrCat(
"Something is wrong with flag '", flag.Name(), "' in file '",
flag.Filename(), "'. One possibility: file '", flag.Filename(),
"' is being linked both statically and dynamically into this "
"executable. e.g. some files listed as srcs to a test and also "
"listed as srcs of some shared lib deps of the same test."),
true);
}
std::exit(1);
}
}
FlagRegistry& FlagRegistry::GlobalRegistry() {
static absl::NoDestructor<FlagRegistry> global_registry;
return *global_registry;
}
void ForEachFlag(std::function<void(CommandLineFlag&)> visitor) {
FlagRegistry& registry = FlagRegistry::GlobalRegistry();
if (registry.finalized_flags_.load(std::memory_order_acquire)) {
for (const auto& i : registry.flat_flags_) visitor(*i);
}
FlagRegistryLock frl(registry);
for (const auto& i : registry.flags_) visitor(*i.second);
}
bool RegisterCommandLineFlag(CommandLineFlag& flag, const char* filename) {
FlagRegistry::GlobalRegistry().RegisterFlag(flag, filename);
return true;
}
void FinalizeRegistry() {
auto& registry = FlagRegistry::GlobalRegistry();
FlagRegistryLock frl(registry);
if (registry.finalized_flags_.load(std::memory_order_relaxed)) {
return;
}
registry.flat_flags_.reserve(registry.flags_.size());
for (const auto& f : registry.flags_) {
registry.flat_flags_.push_back(f.second);
}
std::sort(std::begin(registry.flat_flags_), std::end(registry.flat_flags_),
[](const CommandLineFlag* lhs, const CommandLineFlag* rhs) {
return lhs->Name() < rhs->Name();
});
registry.flags_.clear();
registry.finalized_flags_.store(true, std::memory_order_release);
}
namespace {
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#endif
class RetiredFlagObj final : public CommandLineFlag {
public:
constexpr RetiredFlagObj(const char* name, FlagFastTypeId type_id)
: name_(name), type_id_(type_id) {}
private:
absl::string_view Name() const override { return name_; }
std::string Filename() const override {
OnAccess();
return "RETIRED";
}
FlagFastTypeId TypeId() const override { return type_id_; }
std::string Help() const override {
OnAccess();
return "";
}
bool IsRetired() const override { return true; }
bool IsSpecifiedOnCommandLine() const override {
OnAccess();
return false;
}
std::string DefaultValue() const override {
OnAccess();
return "";
}
std::string CurrentValue() const override {
OnAccess();
return "";
}
bool ValidateInputValue(absl::string_view) const override {
OnAccess();
return true;
}
std::unique_ptr<flags_internal::FlagStateInterface> SaveState() override {
return nullptr;
}
bool ParseFrom(absl::string_view, flags_internal::FlagSettingMode,
flags_internal::ValueSource, std::string&) override {
OnAccess();
return false;
}
void CheckDefaultValueParsingRoundtrip() const override { OnAccess(); }
void Read(void*) const override { OnAccess(); }
void OnAccess() const {
flags_internal::ReportUsageError(
absl::StrCat("Accessing retired flag '", name_, "'"), false);
}
const char* const name_;
const FlagFastTypeId type_id_;
};
#if defined(__GNUC__) && !defined(__clang__)
#pragma GCC diagnostic pop
#endif
}
void Retire(const char* name, FlagFastTypeId type_id, char* buf) {
static_assert(sizeof(RetiredFlagObj) == kRetiredFlagObjSize, "");
static_assert(alignof(RetiredFlagObj) == kRetiredFlagObjAlignment, "");
auto* flag = ::new (static_cast<void*>(buf))
flags_internal::RetiredFlagObj(name, type_id);
FlagRegistry::GlobalRegistry().RegisterFlag(*flag, nullptr);
}
class FlagSaverImpl {
public:
FlagSaverImpl() = default;
FlagSaverImpl(const FlagSaverImpl&) = delete;
void operator=(const FlagSaverImpl&) = delete;
void SaveFromRegistry() {
assert(backup_registry_.empty());
flags_internal::ForEachFlag([&](CommandLineFlag& flag) {
if (auto flag_state =
flags_internal::PrivateHandleAccessor::SaveState(flag)) {
backup_registry_.emplace_back(std::move(flag_state));
}
});
}
void RestoreToRegistry() {
for (const auto& flag_state : backup_registry_) {
flag_state->Restore();
}
}
private:
std::vector<std::unique_ptr<flags_internal::FlagStateInterface>>
backup_registry_;
};
}
FlagSaver::FlagSaver() : impl_(new flags_internal::FlagSaverImpl) {
impl_->SaveFromRegistry();
}
FlagSaver::~FlagSaver() {
if (!impl_) return;
impl_->RestoreToRegistry();
delete impl_;
}
CommandLineFlag* FindCommandLineFlag(absl::string_view name) {
if (name.empty()) return nullptr;
flags_internal::FlagRegistry& registry =
flags_internal::FlagRegistry::GlobalRegistry();
return registry.FindFlag(name);
}
absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> GetAllFlags() {
absl::flat_hash_map<absl::string_view, absl::CommandLineFlag*> res;
flags_internal::ForEachFlag([&](CommandLineFlag& flag) {
if (!flag.IsRetired()) res.insert({flag.Name(), &flag});
});
return res;
}
ABSL_NAMESPACE_END
} | #include "absl/flags/reflection.h"
#include <memory>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/flags/config.h"
#include "absl/flags/flag.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
ABSL_FLAG(int, int_flag, 1, "int_flag help");
ABSL_FLAG(std::string, string_flag, "dflt", "string_flag help");
ABSL_RETIRED_FLAG(bool, bool_retired_flag, false, "bool_retired_flag help");
namespace {
class ReflectionTest : public testing::Test {
protected:
void SetUp() override {
#if ABSL_FLAGS_STRIP_NAMES
GTEST_SKIP() << "This test requires flag names to be present";
#endif
flag_saver_ = absl::make_unique<absl::FlagSaver>();
}
void TearDown() override { flag_saver_.reset(); }
private:
std::unique_ptr<absl::FlagSaver> flag_saver_;
};
TEST_F(ReflectionTest, TestFindCommandLineFlag) {
auto* handle = absl::FindCommandLineFlag("some_flag");
EXPECT_EQ(handle, nullptr);
handle = absl::FindCommandLineFlag("int_flag");
EXPECT_NE(handle, nullptr);
handle = absl::FindCommandLineFlag("string_flag");
EXPECT_NE(handle, nullptr);
handle = absl::FindCommandLineFlag("bool_retired_flag");
EXPECT_NE(handle, nullptr);
}
TEST_F(ReflectionTest, TestGetAllFlags) {
auto all_flags = absl::GetAllFlags();
EXPECT_NE(all_flags.find("int_flag"), all_flags.end());
EXPECT_EQ(all_flags.find("bool_retired_flag"), all_flags.end());
EXPECT_EQ(all_flags.find("some_undefined_flag"), all_flags.end());
std::vector<absl::string_view> flag_names_first_attempt;
auto all_flags_1 = absl::GetAllFlags();
for (auto f : all_flags_1) {
flag_names_first_attempt.push_back(f.first);
}
std::vector<absl::string_view> flag_names_second_attempt;
auto all_flags_2 = absl::GetAllFlags();
for (auto f : all_flags_2) {
flag_names_second_attempt.push_back(f.first);
}
EXPECT_THAT(flag_names_first_attempt,
::testing::UnorderedElementsAreArray(flag_names_second_attempt));
}
struct CustomUDT {
CustomUDT() : a(1), b(1) {}
CustomUDT(int a_, int b_) : a(a_), b(b_) {}
friend bool operator==(const CustomUDT& f1, const CustomUDT& f2) {
return f1.a == f2.a && f1.b == f2.b;
}
int a;
int b;
};
bool AbslParseFlag(absl::string_view in, CustomUDT* f, std::string*) {
std::vector<absl::string_view> parts =
absl::StrSplit(in, ':', absl::SkipWhitespace());
if (parts.size() != 2) return false;
if (!absl::SimpleAtoi(parts[0], &f->a)) return false;
if (!absl::SimpleAtoi(parts[1], &f->b)) return false;
return true;
}
std::string AbslUnparseFlag(const CustomUDT& f) {
return absl::StrCat(f.a, ":", f.b);
}
}
ABSL_FLAG(bool, test_flag_01, true, "");
ABSL_FLAG(int, test_flag_02, 1234, "");
ABSL_FLAG(int16_t, test_flag_03, -34, "");
ABSL_FLAG(uint16_t, test_flag_04, 189, "");
ABSL_FLAG(int32_t, test_flag_05, 10765, "");
ABSL_FLAG(uint32_t, test_flag_06, 40000, "");
ABSL_FLAG(int64_t, test_flag_07, -1234567, "");
ABSL_FLAG(uint64_t, test_flag_08, 9876543, "");
ABSL_FLAG(double, test_flag_09, -9.876e-50, "");
ABSL_FLAG(float, test_flag_10, 1.234e12f, "");
ABSL_FLAG(std::string, test_flag_11, "", "");
ABSL_FLAG(absl::Duration, test_flag_12, absl::Minutes(10), "");
static int counter = 0;
ABSL_FLAG(int, test_flag_13, 200, "").OnUpdate([]() { counter++; });
ABSL_FLAG(CustomUDT, test_flag_14, {}, "");
namespace {
TEST_F(ReflectionTest, TestFlagSaverInScope) {
{
absl::FlagSaver s;
counter = 0;
absl::SetFlag(&FLAGS_test_flag_01, false);
absl::SetFlag(&FLAGS_test_flag_02, -1021);
absl::SetFlag(&FLAGS_test_flag_03, 6009);
absl::SetFlag(&FLAGS_test_flag_04, 44);
absl::SetFlag(&FLAGS_test_flag_05, +800);
absl::SetFlag(&FLAGS_test_flag_06, -40978756);
absl::SetFlag(&FLAGS_test_flag_07, 23405);
absl::SetFlag(&FLAGS_test_flag_08, 975310);
absl::SetFlag(&FLAGS_test_flag_09, 1.00001);
absl::SetFlag(&FLAGS_test_flag_10, -3.54f);
absl::SetFlag(&FLAGS_test_flag_11, "asdf");
absl::SetFlag(&FLAGS_test_flag_12, absl::Hours(20));
absl::SetFlag(&FLAGS_test_flag_13, 4);
absl::SetFlag(&FLAGS_test_flag_14, CustomUDT{-1, -2});
}
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), true);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_02), 1234);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_03), -34);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_04), 189);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_05), 10765);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_06), 40000);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_07), -1234567);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), 9876543);
EXPECT_NEAR(absl::GetFlag(FLAGS_test_flag_09), -9.876e-50, 1e-55);
EXPECT_NEAR(absl::GetFlag(FLAGS_test_flag_10), 1.234e12f, 1e5f);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_11), "");
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_12), absl::Minutes(10));
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_13), 200);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_14), CustomUDT{});
EXPECT_EQ(counter, 2);
}
TEST_F(ReflectionTest, TestFlagSaverVsUpdateViaReflection) {
{
absl::FlagSaver s;
counter = 0;
std::string error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_01")->ParseFrom("false", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_02")->ParseFrom("-4536", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_03")->ParseFrom("111", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_04")->ParseFrom("909", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_05")->ParseFrom("-2004", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_06")->ParseFrom("1000023", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_07")->ParseFrom("69305", &error))
<< error;
EXPECT_TRUE(absl::FindCommandLineFlag("test_flag_08")
->ParseFrom("1000000001", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_09")->ParseFrom("2.09021", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_10")->ParseFrom("-33.1", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_11")->ParseFrom("ADD_FOO", &error))
<< error;
EXPECT_TRUE(absl::FindCommandLineFlag("test_flag_12")
->ParseFrom("3h11m16s", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_13")->ParseFrom("0", &error))
<< error;
EXPECT_TRUE(
absl::FindCommandLineFlag("test_flag_14")->ParseFrom("10:1", &error))
<< error;
}
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_01), true);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_02), 1234);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_03), -34);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_04), 189);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_05), 10765);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_06), 40000);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_07), -1234567);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), 9876543);
EXPECT_NEAR(absl::GetFlag(FLAGS_test_flag_09), -9.876e-50, 1e-55);
EXPECT_NEAR(absl::GetFlag(FLAGS_test_flag_10), 1.234e12f, 1e5f);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_11), "");
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_12), absl::Minutes(10));
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_13), 200);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_14), CustomUDT{});
EXPECT_EQ(counter, 2);
}
TEST_F(ReflectionTest, TestMultipleFlagSaversInEnclosedScopes) {
{
absl::FlagSaver s;
absl::SetFlag(&FLAGS_test_flag_08, 10);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), 10);
{
absl::FlagSaver s;
absl::SetFlag(&FLAGS_test_flag_08, 20);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), 20);
{
absl::FlagSaver s;
absl::SetFlag(&FLAGS_test_flag_08, -200);
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), -200);
}
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), 20);
}
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), 10);
}
EXPECT_EQ(absl::GetFlag(FLAGS_test_flag_08), 9876543);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/reflection.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/reflection_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
36f6b8c3-4e4d-44d4-a114-9c8ef199e3e6 | cpp | tensorflow/tensorflow | execution_trace_utils | third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils.cc | third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils_test.cc | #include "xla/mlir/tools/mlir_replay/public/execution_trace_utils.h"
#include <cassert>
#include <complex>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <type_traits>
#include <utility>
#include <variant>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "xla/literal.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include "xla/mlir/tools/mlir_replay/public/execution_trace.pb.h"
#include "xla/primitive_util.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace interpreter {
namespace {
struct TraceInterpreterValueVisitor {
TracedValue out;
void Add(float v) { out.add_floats(v); }
void Add(double v) { out.add_doubles(v); }
void Add(std::complex<float> v) {
out.add_floats(v.real());
out.add_floats(v.imag());
}
void Add(std::complex<double> v) {
out.add_doubles(v.real());
out.add_doubles(v.imag());
}
void Add(int64_t v) { out.add_ints(v); }
void Add(int32_t v) { out.add_ints(v); }
void Add(int16_t v) { out.add_ints(v); }
void Add(int8_t v) { out.add_ints(v); }
void Add(uint64_t v) { out.add_uints(v); }
void Add(uint32_t v) { out.add_uints(v); }
void Add(uint16_t v) { out.add_uints(v); }
void Add(uint8_t v) { out.add_uints(v); }
void Add(bool v) { out.add_ints(static_cast<int64_t>(v)); }
template <typename T>
void operator()(T v) {
SetElementType<T>();
out.set_is_scalar(true);
Add(v);
}
void operator()(const Tuple& t) {
out.set_element_type(TracedValue::TUPLE);
for (const auto& v : t.values) {
*out.add_tuple_elements() = ValueToTracedValue(*v);
}
}
template <typename T>
void operator()(const TensorOrMemref<T>& v) {
for (int64_t size : v.view.sizes) {
out.add_shape(size);
}
SetElementType<T>();
for (const auto& index : v.view.Indices()) {
Add(v.at(index));
}
}
template <typename T>
void SetElementType() {
out.set_element_type(GetElementType(T{}));
if constexpr (std::is_same_v<T, bool>) {
out.set_bit_width(1);
} else {
out.set_bit_width(sizeof(T) * 8);
}
}
template <typename T>
static TracedValue::ElementType GetElementType(const T&) {
if constexpr (std::is_floating_point_v<T>) {
return TracedValue::FLOAT;
} else if constexpr (std::is_integral_v<T>) {
if constexpr (std::is_unsigned_v<T>) {
return TracedValue::UNSIGNED;
} else {
return TracedValue::INTEGRAL;
}
} else {
T{"invalid type"} + 0;
return TracedValue::UNKNOWN;
}
}
template <typename T>
static TracedValue::ElementType GetElementType(const std::complex<T>&) {
return TracedValue::COMPLEX;
}
};
}
void ExecutionTraceListener::BeforeOp(ArrayRef<InterpreterValue> args,
Operation* op) {
auto* inst = regions_.back()->add_instructions();
inst->set_name(op->getName().getStringRef().str());
for (const auto& arg : args) {
*inst->add_args() = ValueToTracedValue(arg);
}
}
void ExecutionTraceListener::AfterOp(ArrayRef<InterpreterValue> results) {
auto* traced_results =
regions_.back()->mutable_instructions()->rbegin()->mutable_results();
for (const auto& result : results) {
*traced_results->Add() = ValueToTracedValue(result);
}
}
void ExecutionTraceListener::EnterRegion(ArrayRef<InterpreterValue> bbargs,
Region& region) {
if (regions_.empty()) {
regions_.push_back(trace_->mutable_trace());
} else {
regions_.push_back(
regions_.back()->mutable_instructions()->rbegin()->add_regions());
}
auto& traced_region = *regions_.back();
traced_region.set_region_number(region.getRegionNumber());
for (const auto& bbarg : bbargs) {
*traced_region.add_bbargs() = ValueToTracedValue(bbarg);
}
}
void ExecutionTraceListener::LeaveRegion(ArrayRef<InterpreterValue> yielded) {
for (const auto& result : yielded) {
*regions_.back()->add_results() = ValueToTracedValue(result);
}
regions_.pop_back();
}
llvm::SmallVector<mlir::Attribute> ValueToAttribute(
const InterpreterValue& value, mlir::Type type) {
if (std::holds_alternative<Tuple>(value.storage)) {
auto types = type.cast<TupleType>().getTypes();
const auto& t = std::get<Tuple>(value.storage);
llvm::SmallVector<mlir::Attribute> attrs;
for (const auto& [v, ty] : llvm::zip(t.values, types)) {
auto attr = ValueToAttribute(*v, ty);
assert(attr.size() == 1 && "nested tuples not supported");
attrs.push_back(attr.front());
}
return attrs;
}
if (!value.IsTensor()) {
return {cast<DenseElementsAttr>(
ValueToAttribute(value.AsUnitTensor(),
mlir::RankedTensorType::get({}, type))
.front())
.getValues<mlir::Attribute>()[0]};
}
if (!type.isa<ShapedType>()) {
return {};
}
auto shaped_ty = type.cast<ShapedType>();
return {DispatchScalarType(shaped_ty, [&](auto dummy) -> mlir::Attribute {
using T = decltype(dummy);
auto& t = std::get<TensorOrMemref<T>>(value.storage);
SmallVector<T> vals;
for (const auto& index : t.view.Indices()) {
vals.push_back(t.at(index));
}
auto attr_ty =
shaped_ty.cloneWith(t.view.sizes, shaped_ty.getElementType());
if constexpr (std::is_same_v<T, bool>) {
return mlir::DenseElementsAttr::get(attr_ty, vals);
} else {
return mlir::DenseElementsAttr::get<T>(attr_ty, vals);
}
})};
}
namespace {
template <typename T>
TensorOrMemref<T> ArrayLiteralToTensor(const xla::Literal& literal) {
SmallVector<int64_t> layout;
if (literal.shape().has_layout()) {
llvm::copy(literal.shape().layout().minor_to_major(),
std::back_inserter(layout));
}
SmallVector<int64_t> shape{literal.shape().dimensions().begin(),
literal.shape().dimensions().end()};
auto result = TensorOrMemref<T>::Empty(shape, layout);
assert(literal.size_bytes() == result.buffer->GetByteSize() &&
"expected buffer sizes to match");
memcpy(result.buffer->at(0, 0), literal.untyped_data(),
result.buffer->GetByteSize());
return result;
}
}
absl::StatusOr<InterpreterValue> LiteralToValue(const xla::Literal& literal) {
if (literal.shape().IsTuple()) {
auto elements = literal.Clone().DecomposeTuple();
Tuple result;
for (auto& element : elements) {
TF_ASSIGN_OR_RETURN(auto converted, LiteralToValue(element));
result.values.push_back(
std::make_shared<InterpreterValue>(std::move(converted)));
}
return {{result}};
}
if (literal.shape().IsToken()) {
return absl::UnimplementedError("token arguments are not implemented");
}
if (literal.shape().IsArray()) {
auto type = literal.shape().element_type();
if (xla::primitive_util::IsF8Type(type)) {
return absl::UnimplementedError(
absl::StrCat(xla::primitive_util::LowercasePrimitiveTypeName(type),
" not implemented"));
}
switch (type) {
case xla::PRED:
return {{ArrayLiteralToTensor<bool>(literal)}};
case xla::S8:
return {{ArrayLiteralToTensor<int8_t>(literal)}};
case xla::S16:
return {{ArrayLiteralToTensor<int16_t>(literal)}};
case xla::S32:
return {{ArrayLiteralToTensor<int32_t>(literal)}};
case xla::S64:
return {{ArrayLiteralToTensor<int64_t>(literal)}};
case xla::U8:
return {{ArrayLiteralToTensor<uint8_t>(literal)}};
case xla::U16:
return {{ArrayLiteralToTensor<uint16_t>(literal)}};
case xla::U32:
return {{ArrayLiteralToTensor<uint32_t>(literal)}};
case xla::U64:
return {{ArrayLiteralToTensor<uint64_t>(literal)}};
case xla::F16:
return absl::UnimplementedError("F16 not implemented");
case xla::F32:
return {{ArrayLiteralToTensor<float>(literal)}};
case xla::BF16:
return absl::UnimplementedError("BF16 not implemented");
case xla::F64:
return {{ArrayLiteralToTensor<double>(literal)}};
case xla::C64:
return {{ArrayLiteralToTensor<std::complex<float>>(literal)}};
case xla::C128:
return {{ArrayLiteralToTensor<std::complex<double>>(literal)}};
default:
break;
}
}
return absl::InvalidArgumentError("unexpected literal type");
}
absl::StatusOr<InterpreterValue> LiteralToValue(
const xla::LiteralProto& literal) {
TF_ASSIGN_OR_RETURN(auto deserialized,
xla::Literal::CreateFromProto(literal));
return LiteralToValue(deserialized);
}
absl::StatusOr<InterpreterValue> LiteralToValue(
const xla::LiteralProto& literal, mlir::Type type) {
TF_ASSIGN_OR_RETURN(auto result, LiteralToValue(literal));
return {DispatchScalarType(type, [&](auto dummy) -> InterpreterValue {
TensorOrMemref<decltype(dummy)> cast;
cast.view = result.View();
cast.buffer = result.GetBuffer();
return {cast};
})};
}
TracedValue ValueToTracedValue(const InterpreterValue& value) {
TraceInterpreterValueVisitor visitor;
std::visit(visitor, value.storage);
return visitor.out;
}
absl::StatusOr<InterpreterValue> TracedValueToValue(
const TracedValue& traced_value) {
auto extract = [&](auto dummy, auto& elements) -> InterpreterValue {
using T = decltype(dummy);
if (traced_value.is_scalar()) {
return {static_cast<T>(elements[0])};
}
auto result =
TensorOrMemref<T>::Empty(llvm::to_vector(traced_value.shape()));
for (auto [index, element] : llvm::zip(result.view.Indices(), elements)) {
result.at(index) = element;
}
return {result};
};
auto extract_complex = [&](auto& elements) -> InterpreterValue {
using T = std::complex<std::decay_t<decltype(elements[0])>>;
if (traced_value.is_scalar()) {
return {T{elements[0], elements[1]}};
}
auto result =
TensorOrMemref<T>::Empty(llvm::to_vector(traced_value.shape()));
int64_t i = 0;
for (auto it = result.view.Indices().begin(),
end = result.view.Indices().end();
it != end; ++it, i += 2) {
result.at(*it) = {elements[i], elements[i + 1]};
}
return {result};
};
switch (traced_value.element_type()) {
case TracedValue::UNKNOWN:
break;
case TracedValue::FLOAT:
if (traced_value.bit_width() == 32) {
return extract(float{}, traced_value.floats());
}
return extract(double{}, traced_value.doubles());
case TracedValue::UNSIGNED:
switch (traced_value.bit_width()) {
case 1:
return extract(bool{}, traced_value.ints());
case 8:
return extract(uint8_t{}, traced_value.uints());
case 16:
return extract(uint16_t{}, traced_value.uints());
case 32:
return extract(uint32_t{}, traced_value.uints());
case 64:
return extract(uint64_t{}, traced_value.uints());
}
break;
case TracedValue::INTEGRAL:
switch (traced_value.bit_width()) {
case 8:
return extract(int8_t{}, traced_value.ints());
case 16:
return extract(int16_t{}, traced_value.ints());
case 32:
return extract(int32_t{}, traced_value.ints());
case 64:
return extract(int64_t{}, traced_value.ints());
}
break;
case TracedValue::COMPLEX:
switch (traced_value.bit_width()) {
case 64:
return extract_complex(traced_value.floats());
case 128:
return extract_complex(traced_value.doubles());
}
break;
case TracedValue::TUPLE:
Tuple result;
for (const auto& elem : traced_value.tuple_elements()) {
TF_ASSIGN_OR_RETURN(auto converted, TracedValueToValue(elem));
result.values.push_back(
std::make_shared<InterpreterValue>(std::move(converted)));
}
return {{std::move(result)}};
}
return absl::InvalidArgumentError("unexpected type: " +
traced_value.DebugString());
}
llvm::SmallVector<const InstructionTrace*> FindOpExecutionsInTrace(
const ExecutionTrace& trace, mlir::Operation* op) {
llvm::SmallVector<int64_t> region_indices;
llvm::SmallVector<int64_t> op_indices;
std::function<void(mlir::Operation*)> get_op_path;
get_op_path = [&](mlir::Operation* op) {
auto* parent = op->getParentOp();
if (!llvm::isa<func::FuncOp>(parent)) {
get_op_path(parent);
region_indices.push_back(op->getParentRegion()->getRegionNumber());
}
int64_t index = 0;
while ((op = op->getPrevNode()) != nullptr) ++index;
op_indices.push_back(index);
};
get_op_path(op);
llvm::SmallVector<const InstructionTrace*> result;
std::function<void(const RegionTrace& trace, int index)> step;
step = [&](const RegionTrace& trace, int index) {
auto& instruction_trace = trace.instructions(op_indices[index]);
if (region_indices.size() > index) {
for (const auto& region : instruction_trace.regions()) {
if (region.region_number() == region_indices[index]) {
step(region, index + 1);
}
}
} else {
result.push_back(&instruction_trace);
}
};
step(trace.trace(), 0);
return result;
}
}
} | #include "xla/mlir/tools/mlir_replay/public/execution_trace_utils.h"
#include <cmath>
#include <complex>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "llvm/ADT/STLExtras.h"
#include "mlir/Support/LLVM.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
#include "xla/mlir/tools/mlir_interpreter/framework/tensor_or_memref.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace interpreter {
namespace {
class TracedValueRoundTripTest
: public ::testing::TestWithParam<InterpreterValue> {};
TEST_P(TracedValueRoundTripTest, Run) {
auto traced_value = ValueToTracedValue(GetParam());
TF_ASSERT_OK_AND_ASSIGN(auto value, TracedValueToValue(traced_value));
EXPECT_EQ(GetParam(), value) << GetParam().ToString();
}
template <typename T>
InterpreterValue MakeTensor(ArrayRef<int64_t> shape, ArrayRef<T> values) {
auto result = TensorOrMemref<T>::Empty(shape);
for (auto [indices, value] : llvm::zip(result.view.Indices(), values)) {
result.at(indices) = value;
}
return {result};
}
template <typename T>
std::shared_ptr<T> WrapShared(T value) {
return std::make_shared<T>(std::move(value));
}
INSTANTIATE_TEST_SUITE_P(
RoundTrip, TracedValueRoundTripTest,
::testing::ValuesIn(std::vector<InterpreterValue>{
{uint8_t{42}},
{uint16_t{43}},
{uint32_t{44}},
{uint64_t{45}},
{int8_t{-47}},
{int16_t{-48}},
{int32_t{-49}},
{int64_t{-50}},
{float{42.0}},
{double{42.0}},
{std::complex<float>{1.0, 2.0}},
{std::complex<double>{3.0, 4.0}},
{true},
{false},
{MakeTensor<int16_t>({1, 2}, {42, 43})},
{MakeTensor<double>({2, 2}, {1.0, -INFINITY, INFINITY, NAN})},
{MakeTensor<std::complex<double>>({}, {{1.0, 2.0}})},
{Tuple{SmallVector<std::shared_ptr<InterpreterValue>>{
WrapShared(InterpreterValue{42}),
WrapShared(InterpreterValue{43.0}),
}}}}));
class FromLiteralTest
: public ::testing::TestWithParam<
std::pair<std::shared_ptr<xla::Literal>, InterpreterValue>> {};
TEST_P(FromLiteralTest, Run) {
TF_ASSERT_OK_AND_ASSIGN(auto value, LiteralToValue(*GetParam().first));
EXPECT_EQ(value, GetParam().second)
<< value.ToString() << " vs " << GetParam().second.ToString();
}
std::vector<std::pair<std::shared_ptr<xla::Literal>, InterpreterValue>>
MakeInputs() {
using ::xla::LiteralUtil;
return {
{WrapShared(LiteralUtil::CreateR2<uint8_t>({{41, 42}})),
MakeTensor<uint8_t>({1, 2}, {41, 42})},
{WrapShared(LiteralUtil::CreateR0<uint16_t>(43)),
MakeTensor<uint16_t>({}, {43})},
{WrapShared(LiteralUtil::CreateR0<uint32_t>(44)),
MakeTensor<uint32_t>({}, {44})},
{WrapShared(LiteralUtil::CreateR0<uint64_t>(45)),
MakeTensor<uint64_t>({}, {45})},
{WrapShared(LiteralUtil::CreateR0<int8_t>(46)),
MakeTensor<int8_t>({}, {46})},
{WrapShared(LiteralUtil::CreateR0<int16_t>(47)),
MakeTensor<int16_t>({}, {47})},
{WrapShared(LiteralUtil::CreateR0<int32_t>(48)),
MakeTensor<int32_t>({}, {48})},
{WrapShared(LiteralUtil::CreateR0<int64_t>(49)),
MakeTensor<int64_t>({}, {49})},
{WrapShared(LiteralUtil::CreateR0<float>(50.0)),
MakeTensor<float>({}, {50.0})},
{WrapShared(LiteralUtil::CreateR0<double>(51.0)),
MakeTensor<double>({}, {51.0})},
{WrapShared(LiteralUtil::CreateR0<std::complex<float>>({52.0, 53.0})),
MakeTensor<std::complex<float>>({}, {{52.0, 53.0}})},
{WrapShared(LiteralUtil::CreateR0<std::complex<double>>({54.0, 55.0})),
MakeTensor<std::complex<double>>({}, {{54.0, 55.0}})},
{WrapShared(LiteralUtil::CreateR1<bool>({true, false})),
MakeTensor<bool>({2}, {true, false})},
{WrapShared(
LiteralUtil::MakeTupleOwned(LiteralUtil::CreateR0<bool>(true),
LiteralUtil::CreateR0<int8_t>(56))),
InterpreterValue{Tuple{SmallVector<std::shared_ptr<InterpreterValue>>{
std::make_shared<InterpreterValue>(MakeTensor<bool>({}, {true})),
std::make_shared<InterpreterValue>(
MakeTensor<int8_t>({}, {56}))}}}}};
}
INSTANTIATE_TEST_SUITE_P(Test, FromLiteralTest,
::testing::ValuesIn(MakeInputs()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_replay/public/execution_trace_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6bf7de3c-09a8-4dc1-a844-4bd09c44fcb0 | cpp | tensorflow/tensorflow | list_dataset_op | tensorflow/core/kernels/data/experimental/list_dataset_op.cc | tensorflow/core/kernels/data/experimental/list_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/list_dataset_op.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/util/batch_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const ListDatasetOp::kDatasetType;
constexpr const char* const ListDatasetOp::kTensors;
constexpr const char* const ListDatasetOp::kTinputTypes;
constexpr const char* const ListDatasetOp::kOutputTypes;
constexpr const char* const ListDatasetOp::kOutputShapes;
class ListDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, std::vector<Tensor> tensors,
const DataTypeVector& input_types, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes,
int num_components)
: DatasetBase(DatasetContext(ctx)),
tensors_(std::move(tensors)),
num_elements_(tensors_.size() / num_components),
num_components_(num_components),
input_types_(input_types),
output_types_(output_types),
output_shapes_(output_shapes) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
split_providers->push_back(
std::make_unique<IndexSplitProvider>(num_elements_));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return num_elements_;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
absl::Status RandomIndexingCompatible() const override {
return absl::OkStatus();
}
absl::Status Get(OpKernelContext* ctx, int64_t index,
std::vector<Tensor>* out_tensors) const override {
return Get(AnyContext(ctx), index, out_tensors);
}
absl::Status Get(AnyContext ctx, int64_t index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->clear();
out_tensors->reserve(num_components_);
for (int i = 0; i < num_components_; ++i) {
out_tensors->push_back(tensors_[i + num_components_ * index]);
}
return absl::OkStatus();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> tensors;
tensors.reserve(tensors_.size());
for (const Tensor& t : tensors_) {
Node* node;
if (!ctx->is_graph_rewrite()) {
TF_RETURN_IF_ERROR(b->AddDatasetOrTensor(ctx, t, &node));
} else {
TF_RETURN_IF_ERROR(b->AddPlaceholder(t, &node));
DCHECK_NE(ctx->input_list(), nullptr);
ctx->input_list()->emplace_back(node->name(), t);
}
tensors.emplace_back(node);
}
AttrValue input_types;
b->BuildAttrValue(input_types_, &input_types);
TF_RETURN_IF_ERROR(b->AddDataset(this, {}, {{0, tensors}},
{{kTinputTypes, input_types}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
global_shuffle_iterator_(dataset()) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
if (ctx->split_providers().empty()) {
split_provider_ =
std::make_shared<IndexSplitProvider>(dataset()->num_elements_);
} else {
TF_ASSIGN_OR_RETURN(split_provider_,
GetSingleSplitProvider(ctx, dataset()));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
Tensor split;
TF_RETURN_IF_ERROR(split_provider_->GetNext(&split, end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
int64_t index = split.scalar<int64_t>()();
out_tensors->reserve(dataset()->num_components_);
for (size_t i = 0; i < dataset()->num_components_; ++i) {
out_tensors->push_back(
dataset()->tensors_[i + dataset()->num_components_ * index]);
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(split_provider_->Save(
[this](const std::string& key) { return full_name(key); }, writer));
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
return split_provider_->Restore(
[this](const std::string& key) { return full_name(key); }, reader);
}
private:
std::shared_ptr<SplitProvider> split_provider_;
GlobalShuffleIterator global_shuffle_iterator_;
};
const std::vector<Tensor> tensors_;
int64 num_elements_;
size_t num_components_;
DataTypeVector input_types_;
DataTypeVector output_types_;
std::vector<PartialTensorShape> output_shapes_;
};
ListDatasetOp::ListDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kTinputTypes, &input_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void ListDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
OpInputList inputs;
OP_REQUIRES_OK(ctx, ctx->input_list(kTensors, &inputs));
std::vector<Tensor> tensors(inputs.begin(), inputs.end());
*output = new Dataset(ctx, std::move(tensors), input_types_, output_types_,
output_shapes_, output_shapes_.size());
OP_REQUIRES_OK(ctx,
VerifyTypesMatch((*output)->output_dtypes(), output_types_));
OP_REQUIRES_OK(
ctx, VerifyShapesCompatible((*output)->output_shapes(), output_shapes_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ListDataset").Device(DEVICE_CPU), ListDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/experimental/list_dataset_op.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "tensor_list_dataset";
class ListDatasetOpTest : public DatasetOpsTestBase {};
class ListDatasetParams : public DatasetParams {
public:
ListDatasetParams(std::vector<std::vector<Tensor>> elements, string node_name)
: DatasetParams(ListOutputTypes(elements), ListOutputShapes(elements),
std::move(node_name)) {
input_types_.reserve(elements.size() * elements.front().size());
tensors_.reserve(elements.size() * elements.front().size());
for (const auto& element : elements) {
for (const auto& tensor : element) {
input_types_.push_back(tensor.dtype());
tensors_.emplace_back(std::move(tensor));
}
}
}
std::vector<Tensor> GetInputTensors() const override { return tensors_; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->reserve(tensors_.size());
for (int i = 0; i < tensors_.size(); ++i) {
input_names->emplace_back(absl::StrCat("tensors_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"Tinput_types", input_types_},
{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return "List"; }
int64_t num_elements() const {
return tensors_.size() / num_tensors_per_element();
}
size_t num_tensors_per_element() const { return output_shapes_.size(); }
private:
DataTypeVector ListInputTypes(
const std::vector<std::vector<Tensor>>& input_elements) {
DataTypeVector input_types;
for (const auto& element : input_elements) {
for (const auto& tensor : element) {
input_types.emplace_back(tensor.dtype());
}
}
return input_types;
}
DataTypeVector ListOutputTypes(
const std::vector<std::vector<Tensor>>& input_elements) {
DataTypeVector output_types;
for (const auto& tensor : input_elements.front()) {
output_types.emplace_back(tensor.dtype());
}
return output_types;
}
std::vector<PartialTensorShape> ListOutputShapes(
const std::vector<std::vector<Tensor>>& input_elements) {
std::vector<PartialTensorShape> output_shapes;
for (const auto& tensor : input_elements.front()) {
absl::InlinedVector<int64_t, 4UL> partial_dim_sizes;
partial_dim_sizes.reserve(tensor.dims());
for (int i = 0; i < tensor.dims(); ++i) {
partial_dim_sizes.push_back(tensor.dim_size(i));
}
output_shapes.emplace_back(std::move(partial_dim_sizes));
}
return output_shapes;
}
public:
std::vector<Tensor> tensors_;
DataTypeVector input_types_;
};
ListDatasetParams PlainListDatasetParams() {
std::vector<std::vector<Tensor>> elements = {
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"})},
{CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}};
return {std::move(elements), kNodeName};
}
ListDatasetParams NestedListDatasetParams() {
std::vector<std::vector<Tensor>> elements = {
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3})},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}};
return {std::move(elements), kNodeName};
}
std::vector<GetNextTestCase<ListDatasetParams>> GetNextTestCases() {
return {
{PlainListDatasetParams(),
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedListDatasetParams(),
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedGetNextTest
: public ListDatasetOpTest,
public ::testing::WithParamInterface<GetNextTestCase<ListDatasetParams>> {
};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
size_t num_tensors_per_element =
test_case.dataset_params.num_tensors_per_element();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_element = 0;
while (true) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (end_of_sequence) {
EXPECT_TRUE(out_tensors.empty());
break;
}
for (int i = 0; i < out_tensors.size(); ++i) {
EXPECT_LT(i + num_tensors_per_element * cur_element,
test_case.expected_outputs.size());
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output = out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i + num_tensors_per_element * cur_element]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case
.expected_outputs[i + num_tensors_per_element * cur_element]));
}
}
cur_element++;
}
}
INSTANTIATE_TEST_SUITE_P(ListDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(ListDatasetOpTest, DatasetNodeName) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ListDatasetOpTest, DatasetTypeString) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ListDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ListDatasetParams>>
DatasetOutputTypesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_dtypes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetOutputTypesTestCases())
std::vector<DatasetOutputShapesTestCase<ListDatasetParams>>
DatasetOutputShapesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_shapes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ListDatasetParams>>
DatasetCardinalityTestCases() {
return {{PlainListDatasetParams(), 2},
{NestedListDatasetParams(), 2}};
}
DATASET_CARDINALITY_TEST_P(ListDatasetOpTest, ListDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ListDatasetParams>>
IteratorOutputTypesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_dtypes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
IteratorOutputTypesTestCases())
std::vector<IteratorOutputShapesTestCase<ListDatasetParams>>
IteratorOutputShapesTestCases() {
return {
{PlainListDatasetParams(), PlainListDatasetParams().output_shapes()},
{NestedListDatasetParams(), NestedListDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ListDatasetOpTest, ListDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ListDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PlainListDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ListDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ListDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PlainListDatasetParams(),
{0, 1, 2},
{CreateTensor<int64_t>(TensorShape({}), {1}),
CreateTensor<int64_t>(TensorShape({2}), {1, 2}),
CreateTensor<uint32>(TensorShape({}), {2}),
CreateTensor<uint32>(TensorShape({2}), {2, 3}),
CreateTensor<uint64>(TensorShape({}), {3}),
CreateTensor<uint64>(TensorShape({2}), {3, 4}),
CreateTensor<double>(TensorShape({1}), {37.0}),
CreateTensor<tstring>(TensorShape({1}), {"a"}),
CreateTensor<int64_t>(TensorShape({}), {2}),
CreateTensor<int64_t>(TensorShape({2}), {3, 4}),
CreateTensor<uint32>(TensorShape({}), {3}),
CreateTensor<uint32>(TensorShape({2}), {4, 5}),
CreateTensor<uint64>(TensorShape({}), {4}),
CreateTensor<uint64>(TensorShape({2}), {5, 6}),
CreateTensor<double>(TensorShape({1}), {38.0}),
CreateTensor<tstring>(TensorShape({1}), {"b"})}},
{NestedListDatasetParams(),
{0, 1, 2},
{CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {1.0, 2.0, 3.0, 4.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"a", "b"})}),
CreateTensor<int64_t>(TensorShape({3}), {1, 2, 3}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<double>(TensorShape({2, 2}), {5.0, 6.0, 7.0, 8.0})}),
CreateTensor<Variant>(
TensorShape({1}),
{CreateTensor<tstring>(TensorShape({1, 2}), {"c", "d"})}),
CreateTensor<int64_t>(TensorShape({3}), {4, 5, 6})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public ListDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<ListDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_context;
TF_ASSERT_OK(CreateSerializationContext(&serialization_context));
int cur_iteration = 0;
bool end_of_sequence = false;
auto params = static_cast<ListDatasetParams&>(test_case.dataset_params);
int64_t num_elements = params.num_elements();
size_t num_tensors_per_element = params.num_tensors_per_element();
std::vector<Tensor> out_tensors;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_elements) {
for (int i = 0; i < out_tensors.size(); ++i) {
if (out_tensors[i].dtype() == DT_VARIANT) {
const Tensor* output =
out_tensors[i].scalar<Variant>()().get<Tensor>();
const Tensor* expected_output =
test_case
.expected_outputs[i + num_tensors_per_element *
(cur_iteration - 1)]
.scalar<Variant>()()
.get<Tensor>();
TF_EXPECT_OK(ExpectEqual(*output, *expected_output));
} else {
TF_EXPECT_OK(ExpectEqual(
out_tensors[i],
test_case.expected_outputs[i + num_tensors_per_element *
(cur_iteration - 1)]));
}
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_context.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader, "Iterator",
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_SUITE_P(
ListDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
TEST_F(ListDatasetOpTest, SplitProvider) {
auto params =
ListDatasetParams({{CreateTensor<int64_t>(TensorShape({}), {6})},
{CreateTensor<int64_t>(TensorShape({}), {2})},
{CreateTensor<int64_t>(TensorShape({}), {3})},
{CreateTensor<int64_t>(TensorShape({}), {8})},
{CreateTensor<int64_t>(TensorShape({}), {7})},
{CreateTensor<int64_t>(TensorShape({}), {0})},
{CreateTensor<int64_t>(TensorShape({}), {10})}},
kNodeName);
TF_ASSERT_OK(InitializeRuntime(params));
TF_EXPECT_OK(CheckSplitProviderFullIteration(
params, CreateTensors<int64_t>(TensorShape({}),
{{6}, {2}, {3}, {8}, {7}, {0}, {10}})));
TF_EXPECT_OK(CheckSplitProviderShardedIteration(
params, 3, 1,
CreateTensors<int64_t>(TensorShape({}), {{2}, {7}})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/list_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/list_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb108b70-21f6-4315-9b31-59c2e7d8fca6 | cpp | google/cel-cpp | error_type | common/types/error_type.h | common/types/error_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_ERROR_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_ERROR_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class ErrorType final {
public:
static constexpr TypeKind kKind = TypeKind::kError;
static constexpr absl::string_view kName = "*error*";
ErrorType() = default;
ErrorType(const ErrorType&) = default;
ErrorType(ErrorType&&) = default;
ErrorType& operator=(const ErrorType&) = default;
ErrorType& operator=(ErrorType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(ErrorType&) noexcept {}
};
inline constexpr void swap(ErrorType& lhs, ErrorType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(ErrorType, ErrorType) { return true; }
inline constexpr bool operator!=(ErrorType lhs, ErrorType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, ErrorType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const ErrorType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(ErrorType, Kind) {
EXPECT_EQ(ErrorType().kind(), ErrorType::kKind);
EXPECT_EQ(Type(ErrorType()).kind(), ErrorType::kKind);
}
TEST(ErrorType, Name) {
EXPECT_EQ(ErrorType().name(), ErrorType::kName);
EXPECT_EQ(Type(ErrorType()).name(), ErrorType::kName);
}
TEST(ErrorType, DebugString) {
{
std::ostringstream out;
out << ErrorType();
EXPECT_EQ(out.str(), ErrorType::kName);
}
{
std::ostringstream out;
out << Type(ErrorType());
EXPECT_EQ(out.str(), ErrorType::kName);
}
}
TEST(ErrorType, Hash) {
EXPECT_EQ(absl::HashOf(ErrorType()), absl::HashOf(ErrorType()));
}
TEST(ErrorType, Equal) {
EXPECT_EQ(ErrorType(), ErrorType());
EXPECT_EQ(Type(ErrorType()), ErrorType());
EXPECT_EQ(ErrorType(), Type(ErrorType()));
EXPECT_EQ(Type(ErrorType()), Type(ErrorType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/error_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/error_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
6e2f5336-6a76-45d8-b557-76aa0818c7ef | cpp | tensorflow/tensorflow | fusion_block_level_rewriter | third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter.cc | third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter_test.cc | #include "xla/service/gpu/transforms/fusion_block_level_rewriter.h"
#include <string>
#include <utility>
#include <variant>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::MLIRContext;
absl::StatusOr<bool> ProcessFusionInstruction(
HloFusionInstruction* fusion_instruction,
const se::DeviceDescription& device_info,
HloCostAnalysis::ShapeSizeFunction shape_size, MLIRContext* ctx) {
const HloComputation* fusion_computation =
fusion_instruction->fused_instructions_computation();
if (CodegenDecision can_codegen = IsTritonSupportedComputation(
*fusion_computation, device_info.gpu_compute_capability());
!can_codegen) {
VLOG(2) << "Can't rewrite fusion " << fusion_instruction->ToString()
<< " because one or more instructions is not supported by Triton: "
<< can_codegen.Explain();
return false;
}
TF_ASSIGN_OR_RETURN(auto backend_config,
fusion_instruction->backend_config<GpuBackendConfig>());
if (backend_config.has_fusion_backend_config() &&
backend_config.fusion_backend_config().has_block_level_fusion_config()) {
return false;
}
HloFusionAnalysisCache fusion_analysis_cache(device_info);
GpuPerformanceModelWithIndexingAnalysis indexing_performance_model(
&device_info, &fusion_analysis_cache, shape_size, ctx);
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
Cast<HloFusionInstruction>(fusion_instruction));
TF_ASSIGN_OR_RETURN(
TiledRunTimeDataOrError tiled_runtime_data_or_error,
indexing_performance_model.TryFindBestTilingForFusion(*fusion_adaptor));
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&tiled_runtime_data_or_error)) {
VLOG(2) << "Can't rewrite fusion " << fusion_instruction->ToString()
<< " because tiling search failed. (The most likely cause for "
<< "is that SymbolicTileAnalysis failed.)";
return false;
}
TiledRunTimeData tiled_runtime_data =
std::get<TiledRunTimeData>(std::move(tiled_runtime_data_or_error));
VLOG(1)
<< "Found parameters "
<< absl::StrCat(
"sizes=[",
absl::StrJoin(
tiled_runtime_data.block_level_parameters.output_tile_sizes,
", "),
"], num_warps=",
tiled_runtime_data.block_level_parameters.num_warps)
<< " for fusion computation " << fusion_computation->ToString();
*backend_config.mutable_fusion_backend_config()
->mutable_block_level_fusion_config() =
tiled_runtime_data.block_level_parameters.ToBlockLevelFusionConfig();
backend_config.mutable_fusion_backend_config()->set_kind(
std::string(kTritonFusionKind));
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(backend_config));
return true;
}
}
absl::StatusOr<bool> FusionBlockLevelRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_RETURN_IF_ERROR(EnsureTritonSupportsComputeCapability(
device_info_.gpu_compute_capability()));
MLIRContext ctx;
bool has_changed = false;
for (HloComputation* computation :
module->MakeComputationSorted(execution_threads)) {
if (!computation->IsFusionComputation()) {
continue;
}
TF_ASSIGN_OR_RETURN(
bool changed,
ProcessFusionInstruction(
::xla::Cast<HloFusionInstruction>(computation->FusionInstruction()),
device_info_, shape_size_, &ctx));
has_changed |= changed;
}
return has_changed;
}
}
} | #include "xla/service/gpu/transforms/fusion_block_level_rewriter.h"
#include <cstdint>
#include <memory>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::tsl::testing::IsOkAndHolds;
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
bool HasTritonBlockLevelFusionConfig(const HloInstruction* fusion) {
return fusion->opcode() == HloOpcode::kFusion &&
fusion->has_backend_config() &&
fusion->backend_config<GpuBackendConfig>().ok() &&
fusion->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.has_block_level_fusion_config() &&
fusion->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.kind() == kTritonFusionKind;
}
class FusionBlockLevelRewriterTest : public HloTestBase {
protected:
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo(
se::CudaComputeCapability::Ampere())};
};
TEST_F(FusionBlockLevelRewriterTest,
DoesNotRewriteFusionThatIsAlreadyBlockLevel) {
const absl::string_view hlo_text = R"(
fusion_computation {
ROOT param_0 = f32[10,10] parameter(0)
}
ENTRY entry {
param_0 = f32[10,10] parameter(0)
ROOT fusion = f32[10,10] fusion(param_0), kind=kCustom,
calls=fusion_computation,
backend_config={"fusion_backend_config":
{"kind":"__triton", "block_level_fusion_config":{}}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction())
.Run(module.get()),
IsOkAndHolds(false));
}
TEST_F(FusionBlockLevelRewriterTest,
RewritesFusionThatIsNotBlockLevelAndCanBeTiledAndCodegenedCorrectly) {
const absl::string_view hlo_text = R"(
fusion_computation {
ROOT param_0 = f32[10,10] parameter(0)
}
ENTRY entry {
param_0 = f32[10,10] parameter(0)
ROOT fusion = f32[10,10] fusion(param_0), kind=kLoop,
calls=fusion_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction())
.Run(module.get()),
IsOkAndHolds(true));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())
.WithPredicate(HasTritonBlockLevelFusionConfig)));
}
TEST_F(FusionBlockLevelRewriterTest,
DoesNotRewriteFusionThatIsNotBlockLevelAndCannotBeTiledCorrectly) {
const absl::string_view hlo_text = R"(
fusion_computation {
param_0 = f32[10,10] parameter(0)
ROOT bitcast = f32[25,4] bitcast(param_0)
}
ENTRY entry {
param_0 = f32[10,10] parameter(0)
ROOT fusion = f32[25,4] fusion(param_0), kind=kLoop,
calls=fusion_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
mlir::MLIRContext ctx;
ASSERT_FALSE(std::holds_alternative<SymbolicTileAnalysis>(
SymbolicTileAnalysis::AnalyzeComputation(
*module->GetComputationWithName("fusion_computation"), &ctx)));
EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction())
.Run(module.get()),
IsOkAndHolds(false));
}
TEST_F(FusionBlockLevelRewriterTest,
DoesNotRewriteFusionThatIsNotBlockLevelAndCannotBeCodegenedCorrectly) {
const absl::string_view hlo_text = R"(
fusion_computation {
param_0 = f8e4m3fn[10,10] parameter(0)
ROOT add = f8e4m3fn[10,10] add(param_0, param_0)
}
ENTRY entry {
param_0 = f8e4m3fn[10,10] parameter(0)
ROOT fusion = f8e4m3fn[10,10] fusion(param_0), kind=kLoop,
calls=fusion_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
ASSERT_FALSE(IsTritonSupportedComputation(
*module->GetComputationWithName("fusion_computation"),
device_info_.gpu_compute_capability()));
EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction())
.Run(module.get()),
IsOkAndHolds(false));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
29bd60c2-1063-443b-b3e5-ead2ffba7234 | cpp | tensorflow/tensorflow | optimize_input_output_buffer_alias | third_party/xla/xla/service/optimize_input_output_buffer_alias.cc | third_party/xla/xla/service/optimize_input_output_buffer_alias_test.cc | #include "xla/service/optimize_input_output_buffer_alias.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::StatusOr<bool> OptimizeInputOutputBufferAlias::Build(
absl::Span<const Shape> input_shapes, const Shape& output_shape,
HloInputOutputAliasConfig* alias_config,
HloBufferDonorConfig* buffer_donor_config) {
bool changed = false;
if (output_shape.is_dynamic()) {
return false;
}
struct DonorEntry {
int64_t param_number;
ShapeIndex index;
int64_t shape_size;
};
absl::flat_hash_map<int64_t, std::vector<DonorEntry>> donors;
for (int64_t param_number = 0; param_number < input_shapes.size();
++param_number) {
const Shape& input_shape = input_shapes[param_number];
TF_RET_CHECK(LayoutUtil::HasLayout(input_shape));
VLOG(1) << "input_shape: " << input_shape.ToString();
ShapeUtil::ForEachSubshape(input_shape, [&](const Shape& subshape,
const ShapeIndex& index) {
if (!LayoutUtil::IsDenseArray(subshape) || subshape.is_dynamic()) {
return;
}
if (alias_config->ParameterHasAlias(param_number, index)) {
return;
}
if (registered_buffer_donor_only_ &&
!buffer_donor_config->ParameterIsBufferDonor(param_number, index)) {
return;
}
int64_t memory_space = subshape.layout().memory_space();
donors[memory_space].emplace_back(
DonorEntry{param_number, index, shape_size_fn_(subshape)});
});
}
struct DoneeEntry {
ShapeIndex index;
int64_t shape_size;
};
absl::flat_hash_map<int64_t, std::vector<DoneeEntry>> donees;
TF_RET_CHECK(LayoutUtil::HasLayout(output_shape));
VLOG(1) << "output_shape: " << output_shape.ToString();
ShapeUtil::ForEachSubshape(
output_shape, [&](const Shape& subshape, const ShapeIndex& index) {
if (!LayoutUtil::IsDenseArray(subshape)) {
return;
}
if (alias_config->OutputHasAlias(index)) {
return;
}
int64_t memory_space = subshape.layout().memory_space();
donees[memory_space].emplace_back(
DoneeEntry{index, shape_size_fn_(subshape)});
});
for (auto& [memory_space, donor_vector] : donors) {
auto donee_it = donees.find(memory_space);
if (donee_it == donees.end()) {
continue;
}
auto& donee_vector = donee_it->second;
absl::c_stable_sort(donor_vector,
[](const DonorEntry& a, const DonorEntry& b) -> bool {
return a.shape_size > b.shape_size;
});
absl::c_stable_sort(donee_vector,
[](const DoneeEntry& a, const DoneeEntry& b) -> bool {
return a.shape_size > b.shape_size;
});
int64_t donor_vector_index = 0;
int64_t donee_vector_index = 0;
while (donor_vector_index < donor_vector.size() &&
donee_vector_index < donee_vector.size()) {
const auto& donor = donor_vector[donor_vector_index];
const auto& donee = donee_vector[donee_vector_index];
if (donor.shape_size > donee.shape_size) {
donor_vector_index += 1;
} else if (donor.shape_size < donee.shape_size) {
donee_vector_index += 1;
} else {
TF_RETURN_IF_ERROR(alias_config->SetUpAlias(
donee.index, donor.param_number, donor.index));
TF_RETURN_IF_ERROR(buffer_donor_config->RemoveBufferDonor(
donor.param_number, donor.index));
donor_vector_index += 1;
donee_vector_index += 1;
changed = true;
}
}
}
return changed;
}
absl::StatusOr<bool> OptimizeInputOutputBufferAlias::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const auto& entry_computation_layout = module->entry_computation_layout();
std::vector<Shape> input_shapes;
input_shapes.reserve(module->entry_computation()->num_parameters());
for (int64_t i = 0; i < module->entry_computation()->num_parameters(); ++i) {
input_shapes.push_back(entry_computation_layout.parameter_shape(i));
}
const Shape& output_shape = entry_computation_layout.result_shape();
HloInputOutputAliasConfig* alias_config =
&module->input_output_alias_config();
HloBufferDonorConfig* buffer_donor_config = &module->buffer_donor_config();
TF_ASSIGN_OR_RETURN(bool changed, Build(input_shapes, output_shape,
alias_config, buffer_donor_config));
TF_RETURN_IF_ERROR(alias_config->Verify(*module, shape_size_fn_));
return changed;
}
} | #include "xla/service/optimize_input_output_buffer_alias.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/test.h"
namespace xla {
class OptimizeInputOutputBufferAliasTest : public HloTestBase {
protected:
OptimizeInputOutputBufferAliasTest() {
r1f32_ = ShapeUtil::MakeShape(F32, {4});
r2f32_ = ShapeUtil::MakeShape(F32, {4, 5});
r3f32_ = ShapeUtil::MakeShape(F32, {4, 5, 6});
r4f32_ = ShapeUtil::MakeShape(F32, {4, 5, 6, 7});
d1f32_ = ShapeUtil::MakeShape(F32, {256}, {true});
d2f32_ = ShapeUtil::MakeShape(F32, {128, 128},
{false, true});
d3f32_ = ShapeUtil::MakeShape(F32, {512});
}
void CreatePassAndBufferDonorConfig(
bool registered_donor_buffer_only = false) {
optimize_pass_ = std::make_unique<OptimizeInputOutputBufferAlias>(
registered_donor_buffer_only);
buffer_donor_config_ = HloBufferDonorConfig();
}
int64_t AliasCount() {
int64_t count = 0;
alias_config_.ForEachAlias(
[&](const ShapeIndex&, const HloInputOutputAliasConfig::Alias&) {
count++;
});
return count;
}
bool BuildAliasConfig(const std::vector<Shape>& input_shapes,
const Shape& output_shape) {
alias_config_ = HloInputOutputAliasConfig(output_shape);
auto changed = optimize_pass_->Build(input_shapes, output_shape,
&alias_config_, &buffer_donor_config_);
TF_CHECK_OK(changed.status());
return changed.value();
}
std::unique_ptr<OptimizeInputOutputBufferAlias> optimize_pass_;
HloInputOutputAliasConfig alias_config_;
HloBufferDonorConfig buffer_donor_config_;
Shape r1f32_;
Shape r2f32_;
Shape r3f32_;
Shape r4f32_;
Shape d1f32_;
Shape d2f32_;
Shape d3f32_;
};
TEST_F(OptimizeInputOutputBufferAliasTest, AllDifferentBufferSizes) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({r1f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, OrderedNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{0});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {3}), ShapeIndex{3});
}
TEST_F(OptimizeInputOutputBufferAliasTest, PartialReuseNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r1f32_, r2f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 2);
EXPECT_TRUE(alias_config_.OutputHasAlias(ShapeIndex{0}));
EXPECT_TRUE(alias_config_.OutputHasAlias(ShapeIndex{1}));
EXPECT_FALSE(alias_config_.OutputHasAlias(ShapeIndex{2}));
EXPECT_FALSE(alias_config_.OutputHasAlias(ShapeIndex{3}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, UnorderedNonNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r4f32_, r3f32_, r2f32_, r1f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{3});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {3}), ShapeIndex{0});
}
TEST_F(OptimizeInputOutputBufferAliasTest, UnorderedNestedTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({r1f32_}), r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape(
{r1f32_, ShapeUtil::MakeTupleShape({r3f32_, r2f32_}), r2f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 3);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0, 0}), ShapeIndex{0});
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {1}), ShapeIndex({1, 1}));
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {2}), ShapeIndex({1, 0}));
EXPECT_FALSE(alias_config_.ParameterHasAlias(0, {0, 3}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, MultipleParameters) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {{r1f32_, r2f32_, r3f32_, r4f32_}};
Shape output = ShapeUtil::MakeTupleShape({r4f32_, r3f32_, r2f32_, r1f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 4);
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {}), ShapeIndex{3});
EXPECT_EQ(alias_config_.GetAliasedOutput(1, {}), ShapeIndex{2});
EXPECT_EQ(alias_config_.GetAliasedOutput(2, {}), ShapeIndex{1});
EXPECT_EQ(alias_config_.GetAliasedOutput(3, {}), ShapeIndex{0});
}
TEST_F(OptimizeInputOutputBufferAliasTest, BufferDonorOnly) {
CreatePassAndBufferDonorConfig(true);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({r1f32_, r2f32_})};
Shape output = ShapeUtil::MakeTupleShape({r2f32_, r1f32_});
TF_CHECK_OK(buffer_donor_config_.AddBufferDonor(0, {0}));
EXPECT_TRUE(buffer_donor_config_.ParameterIsBufferDonor(0, {0}));
bool changed = BuildAliasConfig(input, output);
EXPECT_TRUE(changed);
EXPECT_EQ(AliasCount(), 1);
EXPECT_FALSE(buffer_donor_config_.ParameterIsBufferDonor(0, {0}));
EXPECT_EQ(alias_config_.GetAliasedOutput(0, {0}), ShapeIndex{1});
EXPECT_FALSE(alias_config_.GetAliasedOutput(0, {1}));
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeWithTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {ShapeUtil::MakeTupleShape({d1f32_, d2f32_})};
Shape output = ShapeUtil::MakeTupleShape({d1f32_, d2f32_});
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeNoTuple) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d1f32_, d2f32_};
Shape output = d1f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeBufferOutput) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d1f32_, d2f32_};
Shape output = d3f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, DynamicShapeBufferInput) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {d3f32_};
Shape output = d1f32_;
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
TEST_F(OptimizeInputOutputBufferAliasTest, AllDifferentMemorySpaces) {
CreatePassAndBufferDonorConfig(false);
std::vector<Shape> input = {
ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_})};
Shape output = ShapeUtil::MakeTupleShape({r1f32_, r2f32_, r3f32_, r4f32_});
for (int i = 0; i < output.tuple_shapes_size(); ++i) {
output.mutable_tuple_shapes(i)->mutable_layout()->set_memory_space(
Layout::kHostMemorySpace);
}
bool changed = BuildAliasConfig(input, output);
EXPECT_FALSE(changed);
EXPECT_EQ(AliasCount(), 0);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/optimize_input_output_buffer_alias.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/optimize_input_output_buffer_alias_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
458c05c5-40e1-494f-bb6d-f746c1850d3f | cpp | google/tensorstore | identity_transform | tensorstore/index_space/internal/identity_transform.cc | tensorstore/index_space/identity_transform_test.cc | #include "tensorstore/index_space/internal/identity_transform.h"
namespace tensorstore {
namespace internal_index_space {
void SetToIdentityTransform(span<OutputIndexMap> maps) {
for (DimensionIndex i = 0; i < maps.size(); ++i) {
auto& map = maps[i];
map.SetSingleInputDimension(i);
map.offset() = 0;
map.stride() = 1;
}
}
namespace {
void SetUnboundedDomain(TransformRep* data, DimensionIndex rank) {
assert(data->input_rank_capacity >= rank);
data->input_rank = rank;
std::fill_n(data->input_origin().begin(), rank, -kInfIndex);
std::fill_n(data->input_shape().begin(), rank, kInfSize);
const auto mask = DimensionSet::UpTo(rank);
data->implicit_lower_bounds = mask;
data->implicit_upper_bounds = mask;
}
void SetIdentityOutputOrDomainOnly(TransformRep* data, DimensionIndex rank,
bool domain_only) {
if (domain_only) {
data->output_rank = 0;
} else {
assert(data->output_rank_capacity >= rank);
data->output_rank = rank;
SetToIdentityTransform(data->output_index_maps().first(rank));
}
}
void SetToIdentityTransform(TransformRep* data, DimensionIndex rank,
bool domain_only) {
SetUnboundedDomain(data, rank);
SetIdentityOutputOrDomainOnly(data, rank, domain_only);
}
}
TransformRep::Ptr<> MakeIdentityTransform(DimensionIndex rank,
bool domain_only) {
auto data = TransformRep::Allocate(rank, domain_only ? 0 : rank);
SetToIdentityTransform(data.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(data.get());
return data;
}
TransformRep::Ptr<> MakeIdentityTransform(internal::StringLikeSpan labels,
bool domain_only) {
const DimensionIndex rank = labels.size();
auto data = TransformRep::Allocate(rank, domain_only ? 0 : rank);
SetToIdentityTransform(data.get(), rank, domain_only);
span<std::string> input_labels = data->input_labels().first(rank);
for (DimensionIndex i = 0; i < rank; ++i) {
std::string_view label = labels[i];
input_labels[i].assign(label.data(), label.size());
}
internal_index_space::DebugCheckInvariants(data.get());
return data;
}
TransformRep::Ptr<> MakeIdentityTransformLike(TransformRep* data,
bool domain_only) {
assert(data != nullptr);
const DimensionIndex rank = data->input_rank;
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
CopyTransformRepDomain(data, result.get());
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
TransformRep::Ptr<> MakeIdentityTransform(span<const Index> shape,
bool domain_only) {
const DimensionIndex rank = shape.size();
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
result->input_rank = rank;
std::fill_n(result->input_origin().begin(), rank, 0);
std::copy_n(shape.begin(), rank, result->input_shape().begin());
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
TransformRep::Ptr<> MakeIdentityTransform(BoxView<> domain, bool domain_only) {
const DimensionIndex rank = domain.rank();
auto result = TransformRep::Allocate(rank, domain_only ? 0 : rank);
result->input_rank = rank;
result->input_domain(rank).DeepAssign(domain);
result->implicit_lower_bounds = false;
result->implicit_upper_bounds = false;
SetIdentityOutputOrDomainOnly(result.get(), rank, domain_only);
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
}
} | #include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::AllocateArray;
using ::tensorstore::Box;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::span;
TEST(IdentityTransformTest, Static) {
auto t = IdentityTransform<2>();
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(t.input_rank());
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, Dynamic) {
auto t = IdentityTransform(2);
static_assert(std::is_same_v<decltype(t), IndexTransform<>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(t.input_rank());
static_assert(std::is_same_v<decltype(d), IndexDomain<>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, LabeledCString) {
auto t = IdentityTransform({"x", "y"});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({"x", "y"});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, LabeledStdString) {
auto t = IdentityTransform({std::string("x"), std::string("y")});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({std::string("x"), std::string("y")});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IndexTransformTest, LabeledStringView) {
auto t = IdentityTransform({std::string_view("x"), std::string_view("y")});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({std::string_view("x"), std::string_view("y")});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformLikeTest, IndexTransform) {
EXPECT_EQ((IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
IdentityTransformLike(IndexTransformBuilder<2, 3>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.input_labels({"x", "y"})
.output_single_input_dimension(0, 5, 7, 1)
.output_single_input_dimension(1, 6, 8, 0)
.output_single_input_dimension(2, 7, 9, 0)
.Finalize()
.value()));
}
TEST(IdentityTransformLikeTest, Array) {
EXPECT_EQ((IndexTransformBuilder<2, 2>()
.input_origin({0, 0})
.input_shape({3, 5})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value()),
IdentityTransformLike(AllocateArray<float>({3, 5})));
}
TEST(IdentityTransformTest, StaticBox) {
auto box = Box({1, 2}, {3, 4});
auto t = IdentityTransform(box);
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
EXPECT_EQ(box, t.domain().box());
static_assert(tensorstore::HasBoxDomain<IndexTransform<2, 2>>);
EXPECT_EQ(box, GetBoxDomainOf(t));
auto d = IndexDomain(box);
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, DynamicBox) {
auto t = IdentityTransform(Box<>({1, 2}, {3, 4}));
static_assert(std::is_same_v<decltype(t), IndexTransform<>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(Box<>({1, 2}, {3, 4}));
static_assert(std::is_same_v<decltype(d), IndexDomain<>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, FromShape) {
auto t = IdentityTransform(span<const Index, 2>({2, 3}));
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain(span<const Index, 2>({2, 3}));
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
TEST(IdentityTransformTest, FromShapeBracedList) {
auto t = IdentityTransform({2, 3});
static_assert(std::is_same_v<decltype(t), IndexTransform<2, 2>>);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t);
auto d = IndexDomain({2, 3});
static_assert(std::is_same_v<decltype(d), IndexDomain<2>>);
EXPECT_EQ(t.domain(), d);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/identity_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/identity_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ed59ab97-24d9-4a90-96df-bc43c5670b95 | cpp | tensorflow/tensorflow | fusion_analysis_cache | third_party/xla/xla/service/gpu/model/fusion_analysis_cache.cc | third_party/xla/xla/service/gpu/model/fusion_analysis_cache_test.cc | #include "xla/service/gpu/model/fusion_analysis_cache.h"
#include <utility>
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
namespace xla::gpu {
const HloFusionAnalysis& HloFusionAnalysisCache::Get(
const HloInstruction& instruction) {
{
absl::MutexLock lock(&mutex_);
auto it = analyses_.find(instruction.unique_id());
if (it != analyses_.end()) {
return it->second;
}
}
HloFusionAnalysis analysis =
HloFusionAnalysis::Create(instruction, device_info_);
absl::MutexLock lock(&mutex_);
auto it = analyses_.find(instruction.unique_id());
if (it != analyses_.end()) {
return it->second;
}
return analyses_.emplace(instruction.unique_id(), std::move(analysis))
.first->second;
}
const HloFusionAnalysis& HloFusionAnalysisCache::Get(
const HloInstruction& producer, const HloInstruction& consumer) {
std::pair<int, int> key{producer.unique_id(), consumer.unique_id()};
{
absl::MutexLock lock(&mutex_);
auto it = producer_consumer_analyses_.find(key);
if (it != producer_consumer_analyses_.end()) {
return it->second;
}
}
HloFusionAnalysis analysis =
HloFusionAnalysis::Create(producer, consumer, device_info_);
absl::MutexLock lock(&mutex_);
auto it = producer_consumer_analyses_.find(key);
if (it != producer_consumer_analyses_.end()) {
return it->second;
}
producers_for_consumers_[consumer.unique_id()].push_back(
producer.unique_id());
consumers_for_producers_[producer.unique_id()].push_back(
consumer.unique_id());
return producer_consumer_analyses_.emplace(key, std::move(analysis))
.first->second;
}
void HloFusionAnalysisCache::Invalidate(const HloInstruction& instruction) {
analyses_.erase(instruction.unique_id());
if (auto consumers =
consumers_for_producers_.extract(instruction.unique_id())) {
for (const auto consumer : consumers.mapped()) {
producer_consumer_analyses_.erase({instruction.unique_id(), consumer});
}
}
if (auto producers =
producers_for_consumers_.extract(instruction.unique_id())) {
for (const auto producer : producers.mapped()) {
producer_consumer_analyses_.erase({producer, instruction.unique_id()});
}
}
}
void HloFusionAnalysisCache::Clear() {
analyses_.clear();
producer_consumer_analyses_.clear();
consumers_for_producers_.clear();
producers_for_consumers_.clear();
}
} | #include "xla/service/gpu/model/fusion_analysis_cache.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/hlo_parser.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class FusionAnalysisCacheTest : public HloTestBase {
public:
stream_executor::DeviceDescription device_{
TestGpuDeviceInfo::RTXA6000DeviceInfo()};
HloFusionAnalysisCache cache_{device_};
};
TEST_F(FusionAnalysisCacheTest, CachesAndInvalidates) {
absl::string_view hlo_string = R"(
HloModule m
f {
c0 = f32[] constant(0)
b0 = f32[1000] broadcast(c0)
ROOT n0 = f32[1000] negate(b0)
}
ENTRY e {
ROOT r.1 = f32[1000] fusion(), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto* computation = module->GetComputationWithName("f");
auto* broadcast = computation->GetInstructionWithName("b0");
auto* negate = computation->GetInstructionWithName("n0");
auto* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), negate);
computation->set_root_instruction(broadcast);
EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), negate)
<< "Analysis should be cached.";
cache_.Invalidate(*fusion);
EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), broadcast)
<< "Analysis should have been recomputed";
}
TEST_F(FusionAnalysisCacheTest, CachesAndInvalidatesProducerConsumerFusions) {
absl::string_view hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
f {
c0 = f32[] constant(0)
b0 = f32[1000] broadcast(c0)
ROOT r0 = f32[] reduce(b0, c0), dimensions={0}, to_apply=add
}
ENTRY e {
f0 = f32[] fusion(), kind=kInput, calls=f
ROOT n0 = f32[] negate(f0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto* fusion = module->entry_computation()->GetInstructionWithName("f0");
auto* neg = module->entry_computation()->GetInstructionWithName("n0");
auto* computation = module->GetComputationWithName("f");
auto* constant = computation->GetInstructionWithName("c0");
EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
computation->set_root_instruction(constant);
EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction)
<< "Analysis should be cached.";
cache_.Invalidate(*fusion);
EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop)
<< "Analysis should have been recomputed";
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/fusion_analysis_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/fusion_analysis_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85616131-3443-4e65-a297-10371fcfbd75 | cpp | google/quiche | oghttp2_adapter | quiche/http2/adapter/oghttp2_adapter.cc | quiche/http2/adapter/oghttp2_adapter_test.cc | #include "quiche/http2/adapter/oghttp2_adapter.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "quiche/http2/adapter/http2_util.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
namespace http2 {
namespace adapter {
namespace {
using spdy::SpdyGoAwayIR;
using spdy::SpdyPingIR;
using spdy::SpdyPriorityIR;
using spdy::SpdyWindowUpdateIR;
}
std::unique_ptr<OgHttp2Adapter> OgHttp2Adapter::Create(
Http2VisitorInterface& visitor, Options options) {
return absl::WrapUnique(new OgHttp2Adapter(visitor, std::move(options)));
}
OgHttp2Adapter::~OgHttp2Adapter() {}
bool OgHttp2Adapter::IsServerSession() const {
return session_->IsServerSession();
}
int64_t OgHttp2Adapter::ProcessBytes(absl::string_view bytes) {
return session_->ProcessBytes(bytes);
}
void OgHttp2Adapter::SubmitSettings(absl::Span<const Http2Setting> settings) {
session_->SubmitSettings(settings);
}
void OgHttp2Adapter::SubmitPriorityForStream(Http2StreamId stream_id,
Http2StreamId parent_stream_id,
int weight, bool exclusive) {
session_->EnqueueFrame(std::make_unique<SpdyPriorityIR>(
stream_id, parent_stream_id, weight, exclusive));
}
void OgHttp2Adapter::SubmitPing(Http2PingId ping_id) {
session_->EnqueueFrame(std::make_unique<SpdyPingIR>(ping_id));
}
void OgHttp2Adapter::SubmitShutdownNotice() {
session_->StartGracefulShutdown();
}
void OgHttp2Adapter::SubmitGoAway(Http2StreamId last_accepted_stream_id,
Http2ErrorCode error_code,
absl::string_view opaque_data) {
session_->EnqueueFrame(std::make_unique<SpdyGoAwayIR>(
last_accepted_stream_id, TranslateErrorCode(error_code),
std::string(opaque_data)));
}
void OgHttp2Adapter::SubmitWindowUpdate(Http2StreamId stream_id,
int window_increment) {
session_->EnqueueFrame(
std::make_unique<SpdyWindowUpdateIR>(stream_id, window_increment));
}
void OgHttp2Adapter::SubmitMetadata(Http2StreamId stream_id,
size_t ,
std::unique_ptr<MetadataSource> source) {
session_->SubmitMetadata(stream_id, std::move(source));
}
void OgHttp2Adapter::SubmitMetadata(Http2StreamId stream_id,
size_t ) {
session_->SubmitMetadata(stream_id);
}
int OgHttp2Adapter::Send() { return session_->Send(); }
int OgHttp2Adapter::GetSendWindowSize() const {
return session_->GetRemoteWindowSize();
}
int OgHttp2Adapter::GetStreamSendWindowSize(Http2StreamId stream_id) const {
return session_->GetStreamSendWindowSize(stream_id);
}
int OgHttp2Adapter::GetStreamReceiveWindowLimit(Http2StreamId stream_id) const {
return session_->GetStreamReceiveWindowLimit(stream_id);
}
int OgHttp2Adapter::GetStreamReceiveWindowSize(Http2StreamId stream_id) const {
return session_->GetStreamReceiveWindowSize(stream_id);
}
int OgHttp2Adapter::GetReceiveWindowSize() const {
return session_->GetReceiveWindowSize();
}
int OgHttp2Adapter::GetHpackEncoderDynamicTableSize() const {
return session_->GetHpackEncoderDynamicTableSize();
}
int OgHttp2Adapter::GetHpackEncoderDynamicTableCapacity() const {
return session_->GetHpackEncoderDynamicTableCapacity();
}
int OgHttp2Adapter::GetHpackDecoderDynamicTableSize() const {
return session_->GetHpackDecoderDynamicTableSize();
}
int OgHttp2Adapter::GetHpackDecoderSizeLimit() const {
return session_->GetHpackDecoderSizeLimit();
}
Http2StreamId OgHttp2Adapter::GetHighestReceivedStreamId() const {
return session_->GetHighestReceivedStreamId();
}
void OgHttp2Adapter::MarkDataConsumedForStream(Http2StreamId stream_id,
size_t num_bytes) {
session_->Consume(stream_id, num_bytes);
}
void OgHttp2Adapter::SubmitRst(Http2StreamId stream_id,
Http2ErrorCode error_code) {
session_->EnqueueFrame(std::make_unique<spdy::SpdyRstStreamIR>(
stream_id, TranslateErrorCode(error_code)));
}
int32_t OgHttp2Adapter::SubmitRequest(
absl::Span<const Header> headers,
std::unique_ptr<DataFrameSource> data_source, bool end_stream,
void* user_data) {
return session_->SubmitRequest(headers, std::move(data_source), end_stream,
user_data);
}
int OgHttp2Adapter::SubmitResponse(Http2StreamId stream_id,
absl::Span<const Header> headers,
std::unique_ptr<DataFrameSource> data_source,
bool end_stream) {
return session_->SubmitResponse(stream_id, headers, std::move(data_source),
end_stream);
}
int OgHttp2Adapter::SubmitTrailer(Http2StreamId stream_id,
absl::Span<const Header> trailers) {
return session_->SubmitTrailer(stream_id, trailers);
}
void OgHttp2Adapter::SetStreamUserData(Http2StreamId stream_id,
void* user_data) {
session_->SetStreamUserData(stream_id, user_data);
}
void* OgHttp2Adapter::GetStreamUserData(Http2StreamId stream_id) {
return session_->GetStreamUserData(stream_id);
}
bool OgHttp2Adapter::ResumeStream(Http2StreamId stream_id) {
return session_->ResumeStream(stream_id);
}
OgHttp2Adapter::OgHttp2Adapter(Http2VisitorInterface& visitor, Options options)
: Http2Adapter(visitor),
session_(std::make_unique<OgHttp2Session>(visitor, std::move(options))) {}
}
} | #include "quiche/http2/adapter/oghttp2_adapter.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/str_join.h"
#include "quiche/http2/adapter/http2_protocol.h"
#include "quiche/http2/adapter/http2_visitor_interface.h"
#include "quiche/http2/adapter/mock_http2_visitor.h"
#include "quiche/http2/adapter/oghttp2_util.h"
#include "quiche/http2/adapter/test_frame_sequence.h"
#include "quiche/http2/adapter/test_utils.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
namespace {
using ConnectionError = Http2VisitorInterface::ConnectionError;
using spdy::SpdyFrameType;
using testing::_;
enum FrameType {
DATA,
HEADERS,
PRIORITY,
RST_STREAM,
SETTINGS,
PUSH_PROMISE,
PING,
GOAWAY,
WINDOW_UPDATE,
CONTINUATION,
};
TEST(OgHttp2AdapterTest, IsServerSession) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_TRUE(adapter->IsServerSession());
}
TEST(OgHttp2AdapterTest, ProcessBytes) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence seq;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, 4, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 8, 6, 0));
EXPECT_CALL(visitor, OnPing(17, false));
adapter->ProcessBytes(
TestFrameSequence().ClientPreface().Ping(17).Serialize());
}
TEST(OgHttp2AdapterTest, HeaderValuesWithObsTextAllowedByDefault) {
TestVisitor visitor;
OgHttp2Session::Options options;
options.perspective = Perspective::kServer;
ASSERT_TRUE(options.allow_obs_text);
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{"name", "val\xa1ue"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "name", "val\xa1ue"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
}
TEST(OgHttp2AdapterTest, HeaderValuesWithObsTextDisallowed) {
TestVisitor visitor;
OgHttp2Session::Options options;
options.allow_obs_text = false;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{"name", "val\xa1ue"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/"));
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
}
TEST(OgHttp2AdapterTest, RequestPathWithSpaceOrTab) {
TestVisitor visitor;
OgHttp2Session::Options options;
options.allow_obs_text = false;
options.perspective = Perspective::kServer;
ASSERT_EQ(false, options.validate_path);
options.validate_path = true;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/ fragment"}},
true)
.Headers(3,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/\tfragment2"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":authority", "example.com"));
EXPECT_CALL(
visitor,
OnInvalidFrame(3, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
}
TEST(OgHttp2AdapterTest, RequestPathWithSpaceOrTabNoPathValidation) {
TestVisitor visitor;
OgHttp2Session::Options options;
options.allow_obs_text = false;
options.perspective = Perspective::kServer;
ASSERT_EQ(false, options.validate_path);
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/ fragment"}},
true)
.Headers(3,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/\tfragment2"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/ fragment"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":path", "/\tfragment2"));
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnEndStream(3));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
}
TEST(OgHttp2AdapterTest, InitialSettingsNoExtendedConnect) {
TestVisitor client_visitor;
OgHttp2Adapter::Options client_options;
client_options.perspective = Perspective::kClient;
client_options.max_header_list_bytes = 42;
client_options.allow_extended_connect = false;
auto client_adapter = OgHttp2Adapter::Create(client_visitor, client_options);
TestVisitor server_visitor;
OgHttp2Adapter::Options server_options;
server_options.perspective = Perspective::kServer;
server_options.allow_extended_connect = false;
auto server_adapter = OgHttp2Adapter::Create(server_visitor, server_options);
testing::InSequence s;
EXPECT_CALL(client_visitor, OnBeforeFrameSent(SETTINGS, 0, 12, 0x0));
EXPECT_CALL(client_visitor, OnFrameSent(SETTINGS, 0, 12, 0x0, 0));
{
int result = client_adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = client_visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
}
EXPECT_CALL(server_visitor, OnBeforeFrameSent(SETTINGS, 0, 0, 0x0));
EXPECT_CALL(server_visitor, OnFrameSent(SETTINGS, 0, 0, 0x0, 0));
{
int result = server_adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = server_visitor.data();
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
}
EXPECT_CALL(client_visitor, OnFrameHeader(0, 0, SETTINGS, 0x0));
EXPECT_CALL(client_visitor, OnSettingsStart());
EXPECT_CALL(client_visitor, OnSettingsEnd());
{
const int64_t result = client_adapter->ProcessBytes(server_visitor.data());
EXPECT_EQ(server_visitor.data().size(), static_cast<size_t>(result));
}
EXPECT_CALL(server_visitor, OnFrameHeader(0, 12, SETTINGS, 0x0));
EXPECT_CALL(server_visitor, OnSettingsStart());
EXPECT_CALL(server_visitor,
OnSetting(Http2Setting{Http2KnownSettingsId::ENABLE_PUSH, 0u}));
EXPECT_CALL(
server_visitor,
OnSetting(Http2Setting{Http2KnownSettingsId::MAX_HEADER_LIST_SIZE, 42u}));
EXPECT_CALL(server_visitor, OnSettingsEnd());
{
const int64_t result = server_adapter->ProcessBytes(client_visitor.data());
EXPECT_EQ(client_visitor.data().size(), static_cast<size_t>(result));
}
}
TEST(OgHttp2AdapterTest, InitialSettings) {
TestVisitor client_visitor;
OgHttp2Adapter::Options client_options;
client_options.perspective = Perspective::kClient;
client_options.max_header_list_bytes = 42;
ASSERT_TRUE(client_options.allow_extended_connect);
auto client_adapter = OgHttp2Adapter::Create(client_visitor, client_options);
TestVisitor server_visitor;
OgHttp2Adapter::Options server_options;
server_options.perspective = Perspective::kServer;
ASSERT_TRUE(server_options.allow_extended_connect);
auto server_adapter = OgHttp2Adapter::Create(server_visitor, server_options);
testing::InSequence s;
EXPECT_CALL(client_visitor, OnBeforeFrameSent(SETTINGS, 0, 12, 0x0));
EXPECT_CALL(client_visitor, OnFrameSent(SETTINGS, 0, 12, 0x0, 0));
{
int result = client_adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = client_visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
}
EXPECT_CALL(server_visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(server_visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
{
int result = server_adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = server_visitor.data();
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
}
EXPECT_CALL(client_visitor, OnFrameHeader(0, 6, SETTINGS, 0x0));
EXPECT_CALL(client_visitor, OnSettingsStart());
EXPECT_CALL(client_visitor,
OnSetting(Http2Setting{
Http2KnownSettingsId::ENABLE_CONNECT_PROTOCOL, 1u}));
EXPECT_CALL(client_visitor, OnSettingsEnd());
{
const int64_t result = client_adapter->ProcessBytes(server_visitor.data());
EXPECT_EQ(server_visitor.data().size(), static_cast<size_t>(result));
}
EXPECT_CALL(server_visitor, OnFrameHeader(0, 12, SETTINGS, 0x0));
EXPECT_CALL(server_visitor, OnSettingsStart());
EXPECT_CALL(server_visitor,
OnSetting(Http2Setting{Http2KnownSettingsId::ENABLE_PUSH, 0u}));
EXPECT_CALL(
server_visitor,
OnSetting(Http2Setting{Http2KnownSettingsId::MAX_HEADER_LIST_SIZE, 42u}));
EXPECT_CALL(server_visitor, OnSettingsEnd());
{
const int64_t result = server_adapter->ProcessBytes(client_visitor.data());
EXPECT_EQ(client_visitor.data().size(), static_cast<size_t>(result));
}
}
TEST(OgHttp2AdapterTest, AutomaticSettingsAndPingAcks) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence().ClientPreface().Ping(42).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, _, PING, 0));
EXPECT_CALL(visitor, OnPing(42, false));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(PING, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(PING, 0, _, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::PING}));
}
TEST(OgHttp2AdapterTest, AutomaticPingAcksDisabled) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.auto_ping_ack = false;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence().ClientPreface().Ping(42).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, _, PING, 0));
EXPECT_CALL(visitor, OnPing(42, false));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
}
TEST(OgHttp2AdapterTest, InvalidMaxFrameSizeSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence().ClientPreface({{MAX_FRAME_SIZE, 3u}}).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(
visitor,
OnInvalidFrame(0, Http2VisitorInterface::InvalidFrameError::kProtocol));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kInvalidSetting));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, InvalidPushSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence().ClientPreface({{ENABLE_PUSH, 3u}}).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(
visitor,
OnInvalidFrame(0, Http2VisitorInterface::InvalidFrameError::kProtocol));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kInvalidSetting));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, InvalidConnectProtocolSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface({{ENABLE_CONNECT_PROTOCOL, 3u}})
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(
visitor,
OnInvalidFrame(0, Http2VisitorInterface::InvalidFrameError::kProtocol));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kInvalidSetting));
int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
auto adapter2 = OgHttp2Adapter::Create(visitor, options);
const std::string frames2 = TestFrameSequence()
.ClientPreface({{ENABLE_CONNECT_PROTOCOL, 1}})
.Settings({{ENABLE_CONNECT_PROTOCOL, 0}})
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{ENABLE_CONNECT_PROTOCOL, 1u}));
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(
visitor,
OnInvalidFrame(0, Http2VisitorInterface::InvalidFrameError::kProtocol));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kInvalidSetting));
read_result = adapter2->ProcessBytes(frames2);
EXPECT_EQ(static_cast<size_t>(read_result), frames2.size());
EXPECT_TRUE(adapter2->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
adapter2->Send();
}
TEST(OgHttp2AdapterTest, ClientSetsRemoteMaxStreamOption) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
options.remote_max_concurrent_streams = 3;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers = ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
const int32_t stream_id2 =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
const int32_t stream_id3 =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
const int32_t stream_id4 =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id3, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id3, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(stream_id1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(stream_id1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(stream_id1));
EXPECT_CALL(visitor, OnHeaderForStream(stream_id1, ":status", "200"));
EXPECT_CALL(visitor,
OnHeaderForStream(stream_id1, "server", "my-fake-server"));
EXPECT_CALL(visitor, OnHeaderForStream(stream_id1, "date",
"Tue, 6 Apr 2021 12:54:01 GMT"));
EXPECT_CALL(visitor, OnEndHeadersForStream(stream_id1));
EXPECT_CALL(visitor, OnEndStream(stream_id1));
EXPECT_CALL(visitor,
OnCloseStream(stream_id1, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
ASSERT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x1));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x1, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id4, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id4, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
}
TEST(OgHttp2AdapterTest, ClientHandles100Headers) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "100"}},
false)
.Ping(101)
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "100"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(0, 8, PING, 0));
EXPECT_CALL(visitor, OnPing(101, false));
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "server", "my-fake-server"));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "date", "Tue, 6 Apr 2021 12:54:01 GMT"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(PING, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(PING, 0, _, ACK_FLAG, 0));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::PING}));
}
TEST(OgHttp2AdapterTest, QueuingWindowUpdateAffectsWindow) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_EQ(adapter->GetReceiveWindowSize(), kInitialFlowControlWindowSize);
adapter->SubmitWindowUpdate(0, 10000);
EXPECT_EQ(adapter->GetReceiveWindowSize(),
kInitialFlowControlWindowSize + 10000);
const std::vector<Header> headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 0, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 0, 4, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(stream_id),
kInitialFlowControlWindowSize);
adapter->SubmitWindowUpdate(1, 20000);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(stream_id),
kInitialFlowControlWindowSize + 20000);
}
TEST(OgHttp2AdapterTest, AckOfSettingInitialWindowSizeAffectsWindow) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
const std::string initial_frames =
TestFrameSequence()
.ServerPreface()
.SettingsAck()
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0x0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck);
int64_t parse_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(parse_result));
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(stream_id1),
kInitialFlowControlWindowSize);
adapter->SubmitSettings({{INITIAL_WINDOW_SIZE, 80000u}});
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(stream_id1),
kInitialFlowControlWindowSize);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(stream_id1),
kInitialFlowControlWindowSize);
const std::string settings_ack =
TestFrameSequence().SettingsAck().Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck);
parse_result = adapter->ProcessBytes(settings_ack);
EXPECT_EQ(settings_ack.size(), static_cast<size_t>(parse_result));
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(stream_id1), 80000);
const std::vector<Header> headers2 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}});
const int32_t stream_id2 =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(stream_id2), 80000);
}
TEST(OgHttp2AdapterTest, ClientRejects100HeadersWithFin) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "100"}}, false)
.Headers(1, {{":status", "100"}}, true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "100"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "100"));
EXPECT_CALL(visitor,
OnInvalidFrame(
1, Http2VisitorInterface::InvalidFrameError::kHttpMessaging));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(RST_STREAM, 1, _, 0x0, 1));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ClientRejects100HeadersWithContent) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "100"}},
false)
.Data(1, "We needed the final headers before data, whoops")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "100"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ClientRejects100HeadersWithContentLength) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "100"}, {"content-length", "42"}},
false)
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "100"));
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ClientHandlesResponseWithContentLengthAndPadding) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
const std::vector<Header> headers2 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}});
const int32_t stream_id2 =
adapter->SubmitRequest(headers2, nullptr, true, nullptr);
ASSERT_GT(stream_id2, stream_id1);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "200"}, {"content-length", "2"}},
false)
.Data(1, "hi", true, 10)
.Headers(3, {{":status", "200"}, {"content-length", "24"}},
false)
.Data(3, "hi", false, 11)
.Data(3, " it's nice", false, 12)
.Data(3, " to meet you", true, 13)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "content-length", "2"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 2 + 10, DATA, 0x9));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 2 + 10));
EXPECT_CALL(visitor, OnDataPaddingLength(1, 10));
EXPECT_CALL(visitor, OnDataForStream(1, "hi"));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(3, "content-length", "24"));
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnFrameHeader(3, 2 + 11, DATA, 0x8));
EXPECT_CALL(visitor, OnBeginDataForStream(3, 2 + 11));
EXPECT_CALL(visitor, OnDataPaddingLength(3, 11));
EXPECT_CALL(visitor, OnDataForStream(3, "hi"));
EXPECT_CALL(visitor, OnFrameHeader(3, 10 + 12, DATA, 0x8));
EXPECT_CALL(visitor, OnBeginDataForStream(3, 10 + 12));
EXPECT_CALL(visitor, OnDataPaddingLength(3, 12));
EXPECT_CALL(visitor, OnDataForStream(3, " it's nice"));
EXPECT_CALL(visitor, OnFrameHeader(3, 12 + 13, DATA, 0x9));
EXPECT_CALL(visitor, OnBeginDataForStream(3, 12 + 13));
EXPECT_CALL(visitor, OnDataPaddingLength(3, 13));
EXPECT_CALL(visitor, OnDataForStream(3, " to meet you"));
EXPECT_CALL(visitor, OnEndStream(3));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({
SpdyFrameType::SETTINGS,
}));
}
class ResponseCompleteBeforeRequestTest
: public quiche::test::QuicheTestWithParam<std::tuple<bool, bool>> {
public:
bool HasTrailers() const { return std::get<0>(GetParam()); }
bool HasRstStream() const { return std::get<1>(GetParam()); }
};
INSTANTIATE_TEST_SUITE_P(TrailersAndRstStreamAllCombinations,
ResponseCompleteBeforeRequestTest,
testing::Combine(testing::Bool(), testing::Bool()));
TEST_P(ResponseCompleteBeforeRequestTest,
ClientHandlesResponseBeforeRequestComplete) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "POST"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, std::move(body1), false, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor,
OnBeforeFrameSent(HEADERS, stream_id1, _, END_HEADERS_FLAG));
EXPECT_CALL(visitor,
OnFrameSent(HEADERS, stream_id1, _, END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
TestFrameSequence response;
response.ServerPreface()
.Headers(1, {{":status", "200"}, {"content-length", "2"}},
false)
.Data(1, "hi", !HasTrailers(), 10);
if (HasTrailers()) {
response.Headers(1, {{"my-weird-trailer", "has a value"}}, true);
}
if (HasRstStream()) {
response.RstStream(1, Http2ErrorCode::HTTP2_NO_ERROR);
}
const std::string stream_frames = response.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "content-length", "2"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor,
OnFrameHeader(1, 2 + 10, DATA, HasTrailers() ? 0x8 : 0x9));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 2 + 10));
EXPECT_CALL(visitor, OnDataPaddingLength(1, 10));
EXPECT_CALL(visitor, OnDataForStream(1, "hi"));
if (HasTrailers()) {
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "my-weird-trailer", "has a value"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
}
EXPECT_CALL(visitor, OnEndStream(1));
if (HasRstStream()) {
EXPECT_CALL(visitor, OnFrameHeader(1, _, RST_STREAM, 0));
EXPECT_CALL(visitor, OnRstStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
}
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({
SpdyFrameType::SETTINGS,
}));
if (!HasRstStream()) {
visitor.AppendPayloadForStream(1, "final fragment");
}
visitor.SetEndData(1, true);
adapter->ResumeStream(1);
if (!HasRstStream()) {
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, END_STREAM_FLAG, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
}
result = adapter->Send();
EXPECT_EQ(0, result);
}
TEST(OgHttp2AdapterTest, ClientHandles204WithContent) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
const std::vector<Header> headers2 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}});
const int32_t stream_id2 =
adapter->SubmitRequest(headers2, nullptr, true, nullptr);
ASSERT_GT(stream_id2, stream_id1);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "204"}, {"content-length", "2"}},
false)
.Data(1, "hi")
.Headers(3, {{":status", "204"}}, false)
.Data(3, "hi")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "204"));
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":status", "204"));
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnFrameHeader(3, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(3, 2));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::RST_STREAM,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ClientHandles304WithContent) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "304"}, {"content-length", "2"}},
false)
.Data(1, "hi")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "304"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "content-length", "2"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 2));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ClientHandles304WithContentLength) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
ASSERT_GT(stream_id, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "304"}, {"content-length", "2"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "304"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "content-length", "2"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS}));
}
TEST(OgHttp2AdapterTest, ClientHandlesTrailers) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const char* kSentinel1 = "arbitrary pointer 1";
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, nullptr, true, const_cast<char*>(kSentinel1));
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(1, "This is the response body.")
.Headers(1, {{"final-status", "A-OK"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "server", "my-fake-server"));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "date", "Tue, 6 Apr 2021 12:54:01 GMT"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 26, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 26));
EXPECT_CALL(visitor, OnDataForStream(1, "This is the response body."));
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, "final-status", "A-OK"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS}));
}
class OgHttp2AdapterDataTest : public quiche::test::QuicheTestWithParam<bool> {
};
INSTANTIATE_TEST_SUITE_P(BothValues, OgHttp2AdapterDataTest, testing::Bool());
TEST_P(OgHttp2AdapterDataTest, ClientSendsTrailers) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const std::string kBody = "This is an example request body.";
visitor.AppendPayloadForStream(1, kBody);
visitor.SetEndData(1, false);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, GetParam() ? nullptr : std::move(body1), false, nullptr);
ASSERT_EQ(stream_id1, 1);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id1, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS,
SpdyFrameType::DATA}));
visitor.Clear();
const std::vector<Header> trailers1 =
ToHeaders({{"extra-info", "Trailers are weird but good?"}});
adapter->SubmitTrailer(stream_id1, trailers1);
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
data = visitor.data();
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::HEADERS}));
}
TEST(OgHttp2AdapterTest, ClientRstStreamWhileHandlingHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const char* kSentinel1 = "arbitrary pointer 1";
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, nullptr, true, const_cast<char*>(kSentinel1));
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(1, "This is the response body.")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "server", "my-fake-server"));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "date", "Tue, 6 Apr 2021 12:54:01 GMT"))
.WillOnce(testing::DoAll(
testing::InvokeWithoutArgs([&adapter]() {
adapter->SubmitRst(1, Http2ErrorCode::REFUSED_STREAM);
}),
testing::Return(Http2VisitorInterface::HEADER_RST_STREAM)));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, stream_id1, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, stream_id1, 4, 0x0,
static_cast<int>(Http2ErrorCode::REFUSED_STREAM)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ClientConnectionErrorWhileHandlingHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const char* kSentinel1 = "arbitrary pointer 1";
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, nullptr, true, const_cast<char*>(kSentinel1));
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(1, "This is the response body.")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "server", "my-fake-server"));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "date", "Tue, 6 Apr 2021 12:54:01 GMT"))
.WillOnce(
testing::Return(Http2VisitorInterface::HEADER_CONNECTION_ERROR));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kHeaderError));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_LT(stream_result, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientConnectionErrorWhileHandlingHeadersOnly) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const char* kSentinel1 = "arbitrary pointer 1";
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, nullptr, true, const_cast<char*>(kSentinel1));
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "server", "my-fake-server"));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "date", "Tue, 6 Apr 2021 12:54:01 GMT"))
.WillOnce(
testing::Return(Http2VisitorInterface::HEADER_CONNECTION_ERROR));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kHeaderError));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_LT(stream_result, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientRejectsHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const char* kSentinel1 = "arbitrary pointer 1";
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, nullptr, true, const_cast<char*>(kSentinel1));
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(1, "This is the response body.")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1))
.WillOnce(testing::Return(false));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kHeaderError));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_LT(stream_result, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientHandlesSmallerHpackHeaderTableSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 = ToHeaders({
{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"x-i-do-not-like", "green eggs and ham"},
{"x-i-will-not-eat-them", "here or there, in a box, with a fox"},
{"x-like-them-in-a-house", "no"},
{"x-like-them-with-a-mouse", "no"},
});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
EXPECT_GT(adapter->GetHpackEncoderDynamicTableSize(), 100);
const std::string stream_frames =
TestFrameSequence().Settings({{HEADER_TABLE_SIZE, 100u}}).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{HEADER_TABLE_SIZE, 100u}));
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_EQ(adapter->GetHpackEncoderDynamicTableCapacity(), 100);
EXPECT_LE(adapter->GetHpackEncoderDynamicTableSize(), 100);
}
TEST(OgHttp2AdapterTest, ClientHandlesLargerHpackHeaderTableSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
EXPECT_EQ(adapter->GetHpackEncoderDynamicTableCapacity(), 4096);
const std::string stream_frames =
TestFrameSequence().Settings({{HEADER_TABLE_SIZE, 40960u}}).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{HEADER_TABLE_SIZE, 40960u}));
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_EQ(adapter->GetHpackEncoderDynamicTableCapacity(), 4096);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
EXPECT_EQ(adapter->GetHpackEncoderDynamicTableCapacity(), 40960);
}
TEST(OgHttp2AdapterTest, ClientSendsHpackHeaderTableSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 = ToHeaders({
{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.SettingsAck()
.Headers(
1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"},
{"x-i-do-not-like", "green eggs and ham"},
{"x-i-will-not-eat-them", "here or there, in a box, with a fox"},
{"x-like-them-in-a-house", "no"},
{"x-like-them-with-a-mouse", "no"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 1));
EXPECT_CALL(visitor, OnSettingsAck());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(7);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_GT(adapter->GetHpackDecoderSizeLimit(), 100);
adapter->SubmitSettings({{HEADER_TABLE_SIZE, 100u}});
EXPECT_GT(adapter->GetHpackDecoderSizeLimit(), 100);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_GT(adapter->GetHpackDecoderSizeLimit(), 100);
result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::vector<Header> headers2 = ToHeaders({
{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"},
});
const int32_t stream_id2 =
adapter->SubmitRequest(headers2, nullptr, true, nullptr);
ASSERT_GT(stream_id2, stream_id1);
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string response_frames =
TestFrameSequence()
.Headers(stream_id2,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(stream_id2, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(stream_id2));
EXPECT_CALL(visitor, OnHeaderForStream(stream_id2, _, _)).Times(3);
EXPECT_CALL(visitor, OnEndHeadersForStream(stream_id2));
EXPECT_CALL(visitor, OnEndStream(stream_id2));
EXPECT_CALL(visitor,
OnCloseStream(stream_id2, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t response_result = adapter->ProcessBytes(response_frames);
EXPECT_EQ(response_frames.size(), static_cast<size_t>(response_result));
EXPECT_GT(adapter->GetHpackDecoderSizeLimit(), 100);
const std::string settings_ack =
TestFrameSequence().SettingsAck().Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 1));
EXPECT_CALL(visitor, OnSettingsAck());
const int64_t ack_result = adapter->ProcessBytes(settings_ack);
EXPECT_EQ(settings_ack.size(), static_cast<size_t>(ack_result));
EXPECT_EQ(adapter->GetHpackDecoderSizeLimit(), 100);
}
TEST(OgHttp2AdapterTest, DISABLED_ClientHandlesInvalidTrailers) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const char* kSentinel1 = "arbitrary pointer 1";
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, nullptr, true, const_cast<char*>(kSentinel1));
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(1, "This is the response body.")
.Headers(1, {{":bad-status", "9000"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "server", "my-fake-server"));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "date", "Tue, 6 Apr 2021 12:54:01 GMT"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 26, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 26));
EXPECT_CALL(visitor, OnDataForStream(1, "This is the response body."));
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, stream_id1, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(RST_STREAM, stream_id1, 4, 0x0, 1));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::PROTOCOL_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ClientStartsShutdown) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
adapter->SubmitShutdownNotice();
EXPECT_FALSE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view serialized = visitor.data();
EXPECT_THAT(serialized,
testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
serialized.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(serialized, EqualsFrames({SpdyFrameType::SETTINGS}));
}
TEST(OgHttp2AdapterTest, ClientReceivesGoAway) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
const std::vector<Header> headers2 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}});
const int32_t stream_id2 =
adapter->SubmitRequest(headers2, nullptr, true, nullptr);
ASSERT_GT(stream_id2, stream_id1);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS,
SpdyFrameType::HEADERS}));
visitor.Clear();
adapter->SubmitWindowUpdate(3, 42);
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.RstStream(1, Http2ErrorCode::ENHANCE_YOUR_CALM)
.GoAway(1, Http2ErrorCode::INTERNAL_ERROR, "indigestion")
.WindowUpdate(0, 42)
.WindowUpdate(1, 42)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, 4, RST_STREAM, 0));
EXPECT_CALL(visitor, OnRstStream(1, Http2ErrorCode::ENHANCE_YOUR_CALM));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::ENHANCE_YOUR_CALM));
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(visitor, OnGoAway(1, Http2ErrorCode::INTERNAL_ERROR, ""));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::REFUSED_STREAM));
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 42));
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS}));
}
TEST(OgHttp2AdapterTest, ClientReceivesMultipleGoAways) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string initial_frames =
TestFrameSequence()
.ServerPreface()
.GoAway(kMaxStreamId, Http2ErrorCode::INTERNAL_ERROR, "indigestion")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(visitor,
OnGoAway(kMaxStreamId, Http2ErrorCode::INTERNAL_ERROR, ""));
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(initial_result));
adapter->SubmitWindowUpdate(1, 42);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 1, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 1, 4, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::WINDOW_UPDATE}));
visitor.Clear();
const std::string final_frames =
TestFrameSequence()
.GoAway(0, Http2ErrorCode::INTERNAL_ERROR, "indigestion")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(visitor, OnGoAway(0, Http2ErrorCode::INTERNAL_ERROR, ""));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::REFUSED_STREAM));
const int64_t final_result = adapter->ProcessBytes(final_frames);
EXPECT_EQ(final_frames.size(), static_cast<size_t>(final_result));
EXPECT_FALSE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), testing::IsEmpty());
}
TEST(OgHttp2AdapterTest, ClientReceivesMultipleGoAwaysWithIncreasingStreamId) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string frames =
TestFrameSequence()
.ServerPreface()
.GoAway(0, Http2ErrorCode::HTTP2_NO_ERROR, "")
.GoAway(0, Http2ErrorCode::ENHANCE_YOUR_CALM, "")
.GoAway(1, Http2ErrorCode::INTERNAL_ERROR, "")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(visitor, OnGoAway(0, Http2ErrorCode::HTTP2_NO_ERROR, ""));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::REFUSED_STREAM));
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(visitor, OnGoAway(0, Http2ErrorCode::ENHANCE_YOUR_CALM, ""));
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(
visitor,
OnInvalidFrame(0, Http2VisitorInterface::InvalidFrameError::kProtocol));
EXPECT_CALL(visitor,
OnConnectionError(ConnectionError::kInvalidGoAwayLastStreamId));
const int64_t frames_result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(frames_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientReceivesGoAwayWithPendingStreams) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string initial_frames =
TestFrameSequence()
.ServerPreface({{MAX_CONCURRENT_STREAMS, 1}})
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting);
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(initial_result));
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
const std::vector<Header> headers2 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}});
const int32_t stream_id2 =
adapter->SubmitRequest(headers2, nullptr, true, nullptr);
ASSERT_GT(stream_id2, stream_id1);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.GoAway(kMaxStreamId, Http2ErrorCode::INTERNAL_ERROR, "indigestion")
.Settings({{MAX_CONCURRENT_STREAMS, 42u}})
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(visitor,
OnGoAway(kMaxStreamId, Http2ErrorCode::INTERNAL_ERROR, ""));
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{MAX_CONCURRENT_STREAMS, 42u}));
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::REFUSED_STREAM));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::vector<Header> headers3 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/three"}});
const int32_t stream_id3 =
adapter->SubmitRequest(headers3, nullptr, true, nullptr);
ASSERT_GT(stream_id3, stream_id2);
EXPECT_CALL(visitor, OnCloseStream(5, Http2ErrorCode::REFUSED_STREAM));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), testing::IsEmpty());
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, ClientFailsOnGoAway) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const char* kSentinel1 = "arbitrary pointer 1";
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, nullptr, true, const_cast<char*>(kSentinel1));
ASSERT_GT(stream_id1, 0);
QUICHE_LOG(INFO) << "Created stream: " << stream_id1;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.GoAway(1, Http2ErrorCode::INTERNAL_ERROR, "indigestion")
.Data(1, "This is the response body.")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "server", "my-fake-server"));
EXPECT_CALL(visitor,
OnHeaderForStream(1, "date", "Tue, 6 Apr 2021 12:54:01 GMT"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0));
EXPECT_CALL(visitor, OnGoAway(1, Http2ErrorCode::INTERNAL_ERROR, ""))
.WillOnce(testing::Return(false));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kParseError));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_LT(stream_result, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientRejects101Response) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"upgrade", "new-protocol"}});
const int32_t stream_id1 =
adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1,
{{":status", "101"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(static_cast<int64_t>(stream_frames.size()), stream_result);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(RST_STREAM, 1, 4, 0x0,
static_cast<uint32_t>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST_P(OgHttp2AdapterDataTest, ClientObeysMaxConcurrentStreams) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view serialized = visitor.data();
EXPECT_THAT(serialized,
testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
serialized.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(serialized, EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string initial_frames =
TestFrameSequence()
.ServerPreface({{MAX_CONCURRENT_STREAMS, 1}})
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting);
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(initial_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string kBody = "This is an example request body.";
visitor.AppendPayloadForStream(1, kBody);
visitor.SetEndData(1, true);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
const int stream_id = adapter->SubmitRequest(
ToHeaders({{":method", "POST"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}}),
GetParam() ? nullptr : std::move(body1), false, nullptr);
ASSERT_EQ(stream_id, 1);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor,
OnBeforeFrameSent(HEADERS, stream_id, _, END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _, END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id, _, END_STREAM_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::HEADERS, SpdyFrameType::DATA}));
EXPECT_THAT(visitor.data(), testing::HasSubstr(kBody));
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
const int next_stream_id =
adapter->SubmitRequest(ToHeaders({{":method", "POST"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}}),
nullptr, true, nullptr);
EXPECT_GT(next_stream_id, stream_id);
EXPECT_FALSE(adapter->want_write());
const std::string stream_frames =
TestFrameSequence()
.Headers(stream_id,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(stream_id, "This is the response body.", true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(stream_id, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(stream_id));
EXPECT_CALL(visitor, OnHeaderForStream(stream_id, ":status", "200"));
EXPECT_CALL(visitor,
OnHeaderForStream(stream_id, "server", "my-fake-server"));
EXPECT_CALL(visitor, OnHeaderForStream(stream_id, "date",
"Tue, 6 Apr 2021 12:54:01 GMT"));
EXPECT_CALL(visitor, OnEndHeadersForStream(stream_id));
EXPECT_CALL(visitor, OnFrameHeader(stream_id, 26, DATA, END_STREAM_FLAG));
EXPECT_CALL(visitor, OnBeginDataForStream(stream_id, 26));
EXPECT_CALL(visitor,
OnDataForStream(stream_id, "This is the response body."));
EXPECT_CALL(visitor, OnEndStream(stream_id));
EXPECT_CALL(visitor,
OnCloseStream(stream_id, Http2ErrorCode::HTTP2_NO_ERROR));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, next_stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, next_stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::HEADERS}));
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
}
TEST_P(OgHttp2AdapterDataTest, ClientReceivesInitialWindowSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string initial_frames =
TestFrameSequence()
.Settings({{INITIAL_WINDOW_SIZE, 80000u}})
.WindowUpdate(0, 65536)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{INITIAL_WINDOW_SIZE, 80000u}));
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 65536));
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(initial_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
int64_t result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view serialized = visitor.data();
EXPECT_THAT(serialized,
testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
serialized.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(serialized,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string kLongBody = std::string(81000, 'c');
visitor.AppendPayloadForStream(1, kLongBody);
visitor.SetEndData(1, true);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
const int stream_id = adapter->SubmitRequest(
ToHeaders({{":method", "POST"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}}),
GetParam() ? nullptr : std::move(body1), false, nullptr);
EXPECT_GT(stream_id, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id, 16384, 0x0, 0)).Times(4);
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id, 14464, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::HEADERS, SpdyFrameType::DATA,
SpdyFrameType::DATA, SpdyFrameType::DATA,
SpdyFrameType::DATA, SpdyFrameType::DATA}));
}
TEST_P(OgHttp2AdapterDataTest,
ClientReceivesInitialWindowSettingAfterStreamStart) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string initial_frames =
TestFrameSequence().ServerPreface().WindowUpdate(0, 65536).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 65536));
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(initial_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
int64_t result = adapter->Send();
EXPECT_EQ(0, result);
visitor.Clear();
const std::string kLongBody = std::string(81000, 'c');
visitor.AppendPayloadForStream(1, kLongBody);
visitor.SetEndData(1, true);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
const int stream_id = adapter->SubmitRequest(
ToHeaders({{":method", "POST"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}}),
GetParam() ? nullptr : std::move(body1), false, nullptr);
EXPECT_GT(stream_id, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id, 16384, 0x0, 0)).Times(3);
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id, 16383, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::HEADERS, SpdyFrameType::DATA,
SpdyFrameType::DATA, SpdyFrameType::DATA,
SpdyFrameType::DATA}));
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
const std::string settings_frame =
TestFrameSequence().Settings({{INITIAL_WINDOW_SIZE, 80000u}}).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{INITIAL_WINDOW_SIZE, 80000u}));
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t settings_result = adapter->ProcessBytes(settings_frame);
EXPECT_EQ(settings_frame.size(), static_cast<size_t>(settings_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id, 14465, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::DATA}));
}
TEST(OgHttp2AdapterTest, InvalidInitialWindowSetting) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const uint32_t kTooLargeInitialWindow = 1u << 31;
const std::string initial_frames =
TestFrameSequence()
.Settings({{INITIAL_WINDOW_SIZE, kTooLargeInitialWindow}})
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor,
OnInvalidFrame(
0, Http2VisitorInterface::InvalidFrameError::kFlowControl));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kFlowControlError));
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(initial_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
int64_t result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view serialized = visitor.data();
EXPECT_THAT(serialized,
testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
serialized.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(serialized,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
visitor.Clear();
}
TEST(OggHttp2AdapterClientTest, InitialWindowSettingCausesOverflow) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
ASSERT_GT(stream_id, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int64_t write_result = adapter->Send();
EXPECT_EQ(0, write_result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const uint32_t kLargeInitialWindow = (1u << 31) - 1;
const std::string frames =
TestFrameSequence()
.ServerPreface()
.Headers(stream_id, {{":status", "200"}}, false)
.WindowUpdate(stream_id, 65536u)
.Settings({{INITIAL_WINDOW_SIZE, kLargeInitialWindow}})
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(stream_id, _, HEADERS, 0x4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(stream_id));
EXPECT_CALL(visitor, OnHeaderForStream(stream_id, ":status", "200"));
EXPECT_CALL(visitor, OnEndHeadersForStream(stream_id));
EXPECT_CALL(visitor, OnFrameHeader(stream_id, 4, WINDOW_UPDATE, 0x0));
EXPECT_CALL(visitor, OnWindowUpdate(stream_id, 65536));
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{INITIAL_WINDOW_SIZE,
kLargeInitialWindow}));
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, stream_id, 4, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(RST_STREAM, stream_id, 4, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
EXPECT_CALL(visitor,
OnCloseStream(stream_id, Http2ErrorCode::HTTP2_NO_ERROR));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, FailureSendingConnectionPreface) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
visitor.set_has_write_error();
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kSendError));
int result = adapter->Send();
EXPECT_LT(result, 0);
}
TEST(OgHttp2AdapterTest, MaxFrameSizeSettingNotAppliedBeforeAck) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const uint32_t large_frame_size = kDefaultFramePayloadSizeLimit + 42;
adapter->SubmitSettings({{MAX_FRAME_SIZE, large_frame_size}});
const int32_t stream_id = adapter->SubmitRequest(
ToHeaders({{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}}),
nullptr, true, nullptr);
EXPECT_GT(stream_id, 0);
EXPECT_TRUE(adapter->want_write());
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string server_frames =
TestFrameSequence()
.ServerPreface()
.Headers(1, {{":status", "200"}}, false)
.Data(1, std::string(large_frame_size, 'a'))
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, large_frame_size, DATA, 0x0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kParseError));
const int64_t process_result = adapter->ProcessBytes(server_frames);
EXPECT_EQ(server_frames.size(), static_cast<size_t>(process_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::FRAME_SIZE_ERROR)));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, MaxFrameSizeSettingAppliedAfterAck) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const uint32_t large_frame_size = kDefaultFramePayloadSizeLimit + 42;
adapter->SubmitSettings({{MAX_FRAME_SIZE, large_frame_size}});
const int32_t stream_id = adapter->SubmitRequest(
ToHeaders({{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}}),
nullptr, true, nullptr);
EXPECT_GT(stream_id, 0);
EXPECT_TRUE(adapter->want_write());
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data,
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string server_frames =
TestFrameSequence()
.ServerPreface()
.SettingsAck()
.Headers(1, {{":status", "200"}}, false)
.Data(1, std::string(large_frame_size, 'a'))
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":status", "200"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, large_frame_size, DATA, 0x0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, large_frame_size));
EXPECT_CALL(visitor, OnDataForStream(1, _));
const int64_t process_result = adapter->ProcessBytes(server_frames);
EXPECT_EQ(server_frames.size(), static_cast<size_t>(process_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS}));
}
TEST(OgHttp2AdapterTest, ClientForbidsPushPromise) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int write_result = adapter->Send();
EXPECT_EQ(0, write_result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::vector<Header> headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
ASSERT_GT(stream_id, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
write_result = adapter->Send();
EXPECT_EQ(0, write_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::HEADERS}));
visitor.Clear();
const std::vector<Header> push_headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/push"}});
const std::string frames = TestFrameSequence()
.ServerPreface()
.SettingsAck()
.PushPromise(stream_id, 2, push_headers)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck);
EXPECT_CALL(visitor, OnFrameHeader(stream_id, _, PUSH_PROMISE, _));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kInvalidPushPromise));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientForbidsPushStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int write_result = adapter->Send();
EXPECT_EQ(0, write_result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::vector<Header> headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
ASSERT_GT(stream_id, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
write_result = adapter->Send();
EXPECT_EQ(0, write_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string frames =
TestFrameSequence()
.ServerPreface()
.SettingsAck()
.Headers(2,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck);
EXPECT_CALL(visitor, OnFrameHeader(2, _, HEADERS, _));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kInvalidNewStreamId));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientReceivesDataOnClosedStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
absl::string_view data = visitor.data();
EXPECT_THAT(data, testing::StartsWith(spdy::kHttp2ConnectionHeaderPrefix));
data.remove_prefix(strlen(spdy::kHttp2ConnectionHeaderPrefix));
EXPECT_THAT(data, EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string initial_frames =
TestFrameSequence().ServerPreface().Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(initial_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::SETTINGS}));
visitor.Clear();
int stream_id =
adapter->SubmitRequest(ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}}),
nullptr, true, nullptr);
EXPECT_GT(stream_id, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::HEADERS}));
visitor.Clear();
adapter->SubmitRst(stream_id, Http2ErrorCode::CANCEL);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, stream_id, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(RST_STREAM, stream_id, _, 0x0,
static_cast<int>(Http2ErrorCode::CANCEL)));
EXPECT_CALL(visitor,
OnCloseStream(stream_id, Http2ErrorCode::HTTP2_NO_ERROR));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::RST_STREAM}));
visitor.Clear();
const std::string response_frames =
TestFrameSequence()
.Headers(stream_id,
{{":status", "200"},
{"server", "my-fake-server"},
{"date", "Tue, 6 Apr 2021 12:54:01 GMT"}},
false)
.Data(stream_id, "This is the response body.", true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(stream_id, _, HEADERS, 0x4));
EXPECT_CALL(visitor, OnFrameHeader(stream_id, _, DATA, END_STREAM_FLAG));
const int64_t response_result = adapter->ProcessBytes(response_frames);
EXPECT_EQ(response_frames.size(), static_cast<size_t>(response_result));
EXPECT_FALSE(adapter->want_write());
}
TEST_P(OgHttp2AdapterDataTest, ClientEncountersFlowControlBlock) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const std::string kBody = std::string(100 * 1024, 'a');
visitor.AppendPayloadForStream(1, kBody);
visitor.SetEndData(1, false);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, GetParam() ? nullptr : std::move(body1), false, nullptr);
ASSERT_GT(stream_id1, 0);
const std::vector<Header> headers2 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}});
visitor.AppendPayloadForStream(3, kBody);
visitor.SetEndData(3, false);
auto body2 = std::make_unique<VisitorDataSource>(visitor, 3);
const int32_t stream_id2 = adapter->SubmitRequest(
headers2, GetParam() ? nullptr : std::move(body2), false, nullptr);
ASSERT_EQ(stream_id2, 3);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _, 0x4, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id1, _, 0x0, 0)).Times(4);
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_EQ(0, adapter->GetSendWindowSize());
const std::string stream_frames = TestFrameSequence()
.ServerPreface()
.WindowUpdate(0, 80000)
.WindowUpdate(stream_id1, 20000)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 80000));
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(1, 20000));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id2, _, 0x0, 0))
.Times(testing::AtLeast(1));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id1, _, 0x0, 0))
.Times(testing::AtLeast(1));
EXPECT_TRUE(adapter->want_write());
result = adapter->Send();
EXPECT_EQ(0, result);
}
TEST_P(OgHttp2AdapterDataTest, ClientSendsTrailersAfterFlowControlBlock) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
visitor.AppendPayloadForStream(1, "Really small body.");
visitor.SetEndData(1, false);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
const int32_t stream_id1 = adapter->SubmitRequest(
headers1, GetParam() ? nullptr : std::move(body1), false, nullptr);
ASSERT_GT(stream_id1, 0);
const std::vector<Header> headers2 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}});
const std::string kBody = std::string(100 * 1024, 'a');
visitor.AppendPayloadForStream(3, kBody);
visitor.SetEndData(3, false);
auto body2 = std::make_unique<VisitorDataSource>(visitor, 3);
const int32_t stream_id2 = adapter->SubmitRequest(
headers2, GetParam() ? nullptr : std::move(body2), false, nullptr);
ASSERT_GT(stream_id2, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _, 0x4, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id2, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id2, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id1, _, 0x0, 0)).Times(1);
EXPECT_CALL(visitor, OnFrameSent(DATA, stream_id2, _, 0x0, 0)).Times(4);
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_FALSE(adapter->want_write());
EXPECT_EQ(0, adapter->GetSendWindowSize());
const std::vector<Header> trailers1 =
ToHeaders({{"extra-info", "Trailers are weird but good?"}});
adapter->SubmitTrailer(stream_id1, trailers1);
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
}
TEST(OgHttp2AdapterTest, ClientQueuesRequests) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
adapter->Send();
const std::string initial_frames =
TestFrameSequence()
.ServerPreface({{MAX_CONCURRENT_STREAMS, 2}})
.SettingsAck()
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0x0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{
Http2KnownSettingsId::MAX_CONCURRENT_STREAMS, 2u}));
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
adapter->ProcessBytes(initial_frames);
const std::vector<Header> headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/example/request"}});
std::vector<int32_t> stream_ids;
int32_t stream_id = adapter->SubmitRequest(headers, nullptr, true, nullptr);
stream_ids.push_back(stream_id);
stream_id = adapter->SubmitRequest(headers, nullptr, true, nullptr);
stream_ids.push_back(stream_id);
stream_id = adapter->SubmitRequest(headers, nullptr, true, nullptr);
stream_ids.push_back(stream_id);
stream_id = adapter->SubmitRequest(headers, nullptr, true, nullptr);
stream_ids.push_back(stream_id);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_ids[0], _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_ids[0], _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_ids[1], _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_ids[1], _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
adapter->Send();
const std::string update_streams =
TestFrameSequence().Settings({{MAX_CONCURRENT_STREAMS, 5}}).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0x0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSetting(Http2Setting{
Http2KnownSettingsId::MAX_CONCURRENT_STREAMS, 5u}));
EXPECT_CALL(visitor, OnSettingsEnd());
adapter->ProcessBytes(update_streams);
stream_id = adapter->SubmitRequest(headers, nullptr, true, nullptr);
stream_ids.push_back(stream_id);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_ids[2], _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_ids[2], _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_ids[3], _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_ids[3], _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_ids[4], _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_ids[4], _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
adapter->Send();
}
TEST(OgHttp2AdapterTest, ClientAcceptsHeadResponseWithContentLength) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::vector<Header> headers = ToHeaders({{":method", "HEAD"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/"}});
const int32_t stream_id =
adapter->SubmitRequest(headers, nullptr, true, nullptr);
testing::InSequence s;
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, stream_id, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
adapter->Send();
const std::string initial_frames =
TestFrameSequence()
.ServerPreface()
.SettingsAck()
.Headers(stream_id, {{":status", "200"}, {"content-length", "101"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, _, SETTINGS, 0x0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
EXPECT_CALL(visitor, OnFrameHeader(stream_id, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(stream_id));
EXPECT_CALL(visitor, OnHeaderForStream).Times(2);
EXPECT_CALL(visitor, OnEndHeadersForStream(stream_id));
EXPECT_CALL(visitor, OnEndStream(stream_id));
EXPECT_CALL(visitor,
OnCloseStream(stream_id, Http2ErrorCode::HTTP2_NO_ERROR));
adapter->ProcessBytes(initial_frames);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
adapter->Send();
}
TEST(OgHttp2AdapterTest, GetSendWindowSize) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const int peer_window = adapter->GetSendWindowSize();
EXPECT_EQ(peer_window, kInitialFlowControlWindowSize);
}
TEST(OgHttp2AdapterTest, WindowUpdateZeroDelta) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string data_chunk(kDefaultFramePayloadSizeLimit, 'a');
const std::string request =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"}},
false)
.WindowUpdate(1, 0)
.Data(1, "Subsequent frames on stream 1 are not delivered.")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
adapter->ProcessBytes(request);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, _));
adapter->Send();
const std::string window_update =
TestFrameSequence().WindowUpdate(0, 0).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kFlowControlError));
adapter->ProcessBytes(window_update);
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
adapter->Send();
}
TEST(OgHttp2AdapterTest, WindowUpdateCausesWindowOverflow) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string data_chunk(kDefaultFramePayloadSizeLimit, 'a');
const std::string request =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"}},
false)
.WindowUpdate(1, std::numeric_limits<int>::max())
.Data(1, "Subsequent frames on stream 1 are not delivered.")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
adapter->ProcessBytes(request);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, _));
adapter->Send();
const std::string window_update =
TestFrameSequence()
.WindowUpdate(0, std::numeric_limits<int>::max())
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kFlowControlError));
adapter->ProcessBytes(window_update);
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
adapter->Send();
}
TEST(OgHttp2AdapterTest, WindowUpdateRaisesFlowControlWindowLimit) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string data_chunk(kDefaultFramePayloadSizeLimit, 'a');
const std::string request = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"}},
false)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
adapter->ProcessBytes(request);
adapter->SubmitWindowUpdate(0, 2 * kDefaultFramePayloadSizeLimit);
adapter->SubmitWindowUpdate(1, 2 * kDefaultFramePayloadSizeLimit);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 0, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 0, 4, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 1, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 1, 4, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_EQ(kInitialFlowControlWindowSize + 2 * kDefaultFramePayloadSizeLimit,
adapter->GetReceiveWindowSize());
EXPECT_EQ(kInitialFlowControlWindowSize + 2 * kDefaultFramePayloadSizeLimit,
adapter->GetStreamReceiveWindowSize(1));
const std::string request_body = TestFrameSequence()
.Data(1, data_chunk)
.Data(1, data_chunk)
.Data(1, data_chunk)
.Data(1, data_chunk)
.Data(1, data_chunk)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0)).Times(5);
EXPECT_CALL(visitor, OnBeginDataForStream(1, _)).Times(5);
EXPECT_CALL(visitor, OnDataForStream(1, _)).Times(5);
adapter->ProcessBytes(request_body);
EXPECT_EQ(kInitialFlowControlWindowSize - 3 * kDefaultFramePayloadSizeLimit,
adapter->GetReceiveWindowSize());
EXPECT_EQ(kInitialFlowControlWindowSize - 3 * kDefaultFramePayloadSizeLimit,
adapter->GetStreamReceiveWindowSize(1));
adapter->MarkDataConsumedForStream(1, 4 * kDefaultFramePayloadSizeLimit);
EXPECT_GT(adapter->GetReceiveWindowSize(), kInitialFlowControlWindowSize);
EXPECT_GT(adapter->GetStreamReceiveWindowSize(1),
kInitialFlowControlWindowSize);
}
TEST(OgHttp2AdapterTest, MarkDataConsumedForNonexistentStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Data(1, "Some data on stream 1")
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
EXPECT_CALL(visitor, OnDataForStream(1, _));
adapter->ProcessBytes(frames);
adapter->MarkDataConsumedForStream(3, 11);
}
TEST(OgHttp2AdapterTest, TestSerialize) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_TRUE(adapter->want_read());
EXPECT_FALSE(adapter->want_write());
adapter->SubmitSettings(
{{HEADER_TABLE_SIZE, 128}, {MAX_FRAME_SIZE, 128 << 10}});
EXPECT_TRUE(adapter->want_write());
const Http2StreamId accepted_stream = 3;
const Http2StreamId rejected_stream = 7;
adapter->SubmitPriorityForStream(accepted_stream, 1, 255, true);
adapter->SubmitRst(rejected_stream, Http2ErrorCode::CANCEL);
adapter->SubmitPing(42);
adapter->SubmitGoAway(13, Http2ErrorCode::HTTP2_NO_ERROR, "");
adapter->SubmitWindowUpdate(accepted_stream, 127);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(PRIORITY, accepted_stream, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(PRIORITY, accepted_stream, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, rejected_stream, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(RST_STREAM, rejected_stream, _, 0x0, 0x8));
EXPECT_CALL(visitor, OnBeforeFrameSent(PING, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(PING, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(GOAWAY, 0, _, 0x0, 0));
EXPECT_CALL(visitor,
OnBeforeFrameSent(WINDOW_UPDATE, accepted_stream, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, accepted_stream, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(
visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::PRIORITY,
SpdyFrameType::RST_STREAM, SpdyFrameType::PING,
SpdyFrameType::GOAWAY, SpdyFrameType::WINDOW_UPDATE}));
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, TestPartialSerialize) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
adapter->SubmitSettings(
{{HEADER_TABLE_SIZE, 128}, {MAX_FRAME_SIZE, 128 << 10}});
adapter->SubmitGoAway(13, Http2ErrorCode::HTTP2_NO_ERROR,
"And don't come back!");
adapter->SubmitPing(42);
EXPECT_TRUE(adapter->want_write());
visitor.set_send_limit(20);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(GOAWAY, 0, _, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(PING, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(PING, 0, _, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_FALSE(adapter->want_write());
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY,
SpdyFrameType::PING}));
}
TEST(OgHttp2AdapterTest, TestStreamInitialWindowSizeUpdates) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
adapter->SubmitSettings({{INITIAL_WINDOW_SIZE, 80000}});
EXPECT_TRUE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 0x4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(1), 65535);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(1), 65535);
const std::string ack = TestFrameSequence().SettingsAck().Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
adapter->ProcessBytes(ack);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(1), 80000);
adapter->SubmitSettings({{INITIAL_WINDOW_SIZE, 90000}});
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(1), 80000);
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
adapter->ProcessBytes(ack);
EXPECT_EQ(adapter->GetStreamReceiveWindowSize(1), 90000);
}
TEST(OgHttp2AdapterTest, ConnectionErrorOnControlFrameSent) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence().ClientPreface().Ping(42).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, _, PING, 0));
EXPECT_CALL(visitor, OnPing(42, false));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0))
.WillOnce(testing::Return(-902));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kSendError));
int send_result = adapter->Send();
EXPECT_LT(send_result, 0);
EXPECT_FALSE(adapter->want_write());
send_result = adapter->Send();
EXPECT_LT(send_result, 0);
}
TEST_P(OgHttp2AdapterDataTest, ConnectionErrorOnDataFrameSent) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
visitor.AppendPayloadForStream(
1, "Here is some data, which will lead to a fatal error");
auto body = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result =
adapter->SubmitResponse(1, ToHeaders({{":status", "200"}}),
GetParam() ? nullptr : std::move(body), false);
ASSERT_EQ(0, submit_result);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0))
.WillOnce(testing::Return(-902));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kSendError));
int send_result = adapter->Send();
EXPECT_LT(send_result, 0);
visitor.AppendPayloadForStream(
1, "After the fatal error, data will be sent no more");
EXPECT_FALSE(adapter->want_write());
send_result = adapter->Send();
EXPECT_LT(send_result, 0);
}
TEST(OgHttp2AdapterTest, ClientSendsContinuation) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true,
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 1));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnFrameHeader(1, _, CONTINUATION, 4));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
}
TEST_P(OgHttp2AdapterDataTest, RepeatedHeaderNames) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"accept", "text/plain"},
{"accept", "text/html"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "accept", "text/plain"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "accept", "text/html"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
const std::vector<Header> headers1 = ToHeaders(
{{":status", "200"}, {"content-length", "10"}, {"content-length", "10"}});
visitor.AppendPayloadForStream(1, "perfection");
visitor.SetEndData(1, true);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1, headers1, GetParam() ? nullptr : std::move(body1), false);
ASSERT_EQ(0, submit_result);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, 10, END_STREAM, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS, SpdyFrameType::DATA}));
}
TEST_P(OgHttp2AdapterDataTest, ServerRespondsToRequestWithTrailers) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames =
TestFrameSequence()
.ClientPreface()
.Headers(1, {{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}})
.Data(1, "Example data, woohoo.")
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
EXPECT_CALL(visitor, OnDataForStream(1, _));
int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
const std::vector<Header> headers1 = ToHeaders({{":status", "200"}});
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1, headers1, GetParam() ? nullptr : std::move(body1), false);
ASSERT_EQ(0, submit_result);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS}));
visitor.Clear();
const std::string more_frames =
TestFrameSequence()
.Headers(1, {{"extra-info", "Trailers are weird but good?"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, "extra-info",
"Trailers are weird but good?"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
result = adapter->ProcessBytes(more_frames);
EXPECT_EQ(more_frames.size(), static_cast<size_t>(result));
visitor.SetEndData(1, true);
EXPECT_EQ(true, adapter->ResumeStream(1));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, 0, END_STREAM, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::DATA}));
}
TEST(OgHttp2AdapterTest, ServerReceivesMoreHeaderBytesThanConfigured) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.max_header_list_bytes = 42;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"from-douglas-de-fermat",
"I have discovered a truly marvelous answer to the life, "
"the universe, and everything that the header setting is "
"too narrow to contain."}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kParseError));
int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::COMPRESSION_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerVisitorRejectsHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"header1", "ok"},
{"header2", "rejected"},
{"header3", "not processed"},
{"header4", "not processed"},
{"header5", "not processed"},
{"header6", "not processed"},
{"header7", "not processed"},
{"header8", "not processed"}},
false, true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 0x0));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(5);
EXPECT_CALL(visitor, OnHeaderForStream(1, "header2", _))
.WillOnce(testing::Return(Http2VisitorInterface::HEADER_RST_STREAM));
int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x1));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x1, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST_P(OgHttp2AdapterDataTest, ServerSubmitsResponseWithDataSourceError) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
visitor.SimulateError(1);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1, ToHeaders({{":status", "200"}, {"x-comment", "Sure, sounds good."}}),
GetParam() ? nullptr : std::move(body1), false);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::INTERNAL_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS}));
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
int trailer_result =
adapter->SubmitTrailer(1, ToHeaders({{":final-status", "a-ok"}}));
ASSERT_LT(trailer_result, 0);
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, CompleteRequestWithServerResponse) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Data(1, "This is the response body.", true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 1));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
EXPECT_CALL(visitor, OnDataForStream(1, _));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
int submit_result = adapter->SubmitResponse(
1, ToHeaders({{":status", "200"}}), nullptr, true);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS}));
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, IncompleteRequestWithServerResponse) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
int submit_result = adapter->SubmitResponse(
1, ToHeaders({{":status", "200"}}), nullptr, true);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS}));
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, IncompleteRequestWithServerResponseRstStreamEnabled) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.rst_stream_no_error_when_incomplete = true;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
int submit_result = adapter->SubmitResponse(
1, ToHeaders({{":status", "200"}}), nullptr, true);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(RST_STREAM, 1, 4, 0x0, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(
visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS, SpdyFrameType::RST_STREAM}));
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, ServerHandlesMultipleContentLength) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/1"},
{"content-length", "7"},
{"content-length", "7"}},
false)
.Headers(3,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/3"},
{"content-length", "11"},
{"content-length", "13"}},
false)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/1"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "content-length", "7"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":path", "/3"));
EXPECT_CALL(visitor, OnHeaderForStream(3, "content-length", "11"));
EXPECT_CALL(
visitor,
OnInvalidFrame(3, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
}
TEST_P(OgHttp2AdapterDataTest, ServerSendsInvalidTrailers) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
const absl::string_view kBody = "This is an example response body.";
visitor.AppendPayloadForStream(1, kBody);
visitor.SetEndData(1, false);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1, ToHeaders({{":status", "200"}, {"x-comment", "Sure, sounds good."}}),
GetParam() ? nullptr : std::move(body1), false);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS, SpdyFrameType::DATA}));
EXPECT_THAT(visitor.data(), testing::HasSubstr(kBody));
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
int trailer_result =
adapter->SubmitTrailer(1, ToHeaders({{":final-status", "a-ok"}}));
ASSERT_EQ(trailer_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::HEADERS}));
}
TEST(OgHttp2AdapterTest, ServerHandlesDataWithPadding) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Data(1, "This is the request body.",
true, 39)
.Headers(3,
{{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 25 + 39, DATA, 0x9));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 25 + 39));
EXPECT_CALL(visitor, OnDataPaddingLength(1, 39));
EXPECT_CALL(visitor, OnDataForStream(1, "This is the request body."));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnEndStream(3));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<int64_t>(frames.size()), result);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
}
TEST(OgHttp2AdapterTest, ServerHandlesHostHeader) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":path", "/this/is/request/one"},
{"host", "example.com"}},
true)
.Headers(3,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"host", "example.com"}},
true)
.Headers(5,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "foo.com"},
{":path", "/this/is/request/one"},
{"host", "bar.com"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnEndStream(3));
EXPECT_CALL(visitor, OnFrameHeader(5, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(5));
EXPECT_CALL(visitor, OnHeaderForStream(5, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(5, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 5, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 5, 4, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(5, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
visitor.Clear();
}
TEST(OgHttp2AdapterTest, ServerHandlesHostHeaderWithLaxValidation) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.allow_different_host_and_authority = true;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":path", "/this/is/request/one"},
{"host", "example.com"}},
true)
.Headers(3,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"host", "example.com"}},
true)
.Headers(5,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "foo.com"},
{":path", "/this/is/request/one"},
{"host", "bar.com"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnEndStream(3));
EXPECT_CALL(visitor, OnFrameHeader(5, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(5));
EXPECT_CALL(visitor, OnHeaderForStream(5, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(5));
EXPECT_CALL(visitor, OnEndStream(5));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
visitor.Clear();
}
TEST_P(OgHttp2AdapterDataTest, ServerSubmitsTrailersWhileDataDeferred) {
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
for (const bool add_more_body_data : {true, false}) {
TestVisitor visitor;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.WindowUpdate(1, 2000)
.Data(1, "This is the request body.")
.WindowUpdate(0, 2000)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(1, 2000));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
EXPECT_CALL(visitor, OnDataForStream(1, "This is the request body."));
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 2000));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
visitor.Clear();
const absl::string_view kBody = "This is an example response body.";
visitor.AppendPayloadForStream(1, kBody);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1, ToHeaders({{":status", "200"}, {"x-comment", "Sure, sounds good."}}),
GetParam() ? nullptr : std::move(body1), false);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
if (add_more_body_data) {
visitor.AppendPayloadForStream(1, " More body! This is ignored.");
}
int trailer_result =
adapter->SubmitTrailer(1, ToHeaders({{"final-status", "a-ok"}}));
ASSERT_EQ(trailer_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::HEADERS}));
EXPECT_FALSE(adapter->want_write());
}
}
TEST_P(OgHttp2AdapterDataTest, ServerSubmitsTrailersWithFlowControlBlockage) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.WindowUpdate(0, 2000)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 2000));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
visitor.Clear();
EXPECT_EQ(kInitialFlowControlWindowSize, adapter->GetStreamSendWindowSize(1));
const std::string kBody(60000, 'a');
visitor.AppendPayloadForStream(1, kBody);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1, ToHeaders({{":status", "200"}, {"x-comment", "Sure, sounds good."}}),
GetParam() ? nullptr : std::move(body1), false);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0)).Times(4);
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::HEADERS, SpdyFrameType::DATA,
SpdyFrameType::DATA, SpdyFrameType::DATA,
SpdyFrameType::DATA}));
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
visitor.AppendPayloadForStream(1, std::string(6000, 'b'));
EXPECT_LT(adapter->GetStreamSendWindowSize(1), 6000);
EXPECT_GT(adapter->GetSendWindowSize(), 6000);
adapter->ResumeStream(1);
int trailer_result =
adapter->SubmitTrailer(1, ToHeaders({{"final-status", "a-ok"}}));
ASSERT_EQ(trailer_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::DATA}));
visitor.Clear();
EXPECT_EQ(adapter->GetStreamSendWindowSize(1), 0);
EXPECT_GT(adapter->GetSendWindowSize(), 0);
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(1, 2000));
adapter->ProcessBytes(TestFrameSequence().WindowUpdate(1, 2000).Serialize());
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::DATA, SpdyFrameType::HEADERS}));
EXPECT_FALSE(adapter->want_write());
}
TEST_P(OgHttp2AdapterDataTest, ServerSubmitsTrailersWithDataEndStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence()
.ClientPreface()
.Headers(1, {{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}})
.Data(1, "Example data, woohoo.")
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
EXPECT_CALL(visitor, OnDataForStream(1, _));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
const absl::string_view kBody = "This is an example response body.";
visitor.AppendPayloadForStream(1, kBody);
visitor.SetEndData(1, true);
auto body = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result =
adapter->SubmitResponse(1, ToHeaders({{":status", "200"}}),
GetParam() ? nullptr : std::move(body), false);
ASSERT_EQ(submit_result, 0);
const std::vector<Header> trailers =
ToHeaders({{"extra-info", "Trailers are weird but good?"}});
submit_result = adapter->SubmitTrailer(1, trailers);
ASSERT_EQ(submit_result, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, END_STREAM_FLAG, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::INTERNAL_ERROR));
const int send_result = adapter->Send();
EXPECT_EQ(send_result, 0);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS, SpdyFrameType::DATA}));
}
TEST_P(OgHttp2AdapterDataTest,
ServerSubmitsTrailersWithDataEndStreamAndDeferral) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence()
.ClientPreface()
.Headers(1, {{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}})
.Data(1, "Example data, woohoo.")
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
EXPECT_CALL(visitor, OnDataForStream(1, _));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
const absl::string_view kBody = "This is an example response body.";
visitor.AppendPayloadForStream(1, kBody);
auto body = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result =
adapter->SubmitResponse(1, ToHeaders({{":status", "200"}}),
GetParam() ? nullptr : std::move(body), false);
ASSERT_EQ(submit_result, 0);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0));
int send_result = adapter->Send();
EXPECT_EQ(send_result, 0);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS, SpdyFrameType::DATA}));
visitor.Clear();
const std::vector<Header> trailers =
ToHeaders({{"extra-info", "Trailers are weird but good?"}});
submit_result = adapter->SubmitTrailer(1, trailers);
ASSERT_EQ(submit_result, 0);
visitor.AppendPayloadForStream(1, kBody);
visitor.SetEndData(1, true);
adapter->ResumeStream(1);
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, END_STREAM_FLAG, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::INTERNAL_ERROR));
send_result = adapter->Send();
EXPECT_EQ(send_result, 0);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::DATA}));
}
TEST(OgHttp2AdapterTest, ClientDisobeysConnectionFlowControl) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"accept", "some bogus value!"}},
false)
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(4464, 'a'))
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 16384));
EXPECT_CALL(visitor, OnDataForStream(1, _));
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 16384));
EXPECT_CALL(visitor, OnDataForStream(1, _));
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 16384));
EXPECT_CALL(visitor, OnDataForStream(1, _));
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kFlowControlError));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientDisobeysConnectionFlowControlWithOneDataFrame) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const uint32_t window_overflow_bytes = kInitialFlowControlWindowSize + 1;
adapter->SubmitSettings({{MAX_FRAME_SIZE, window_overflow_bytes}});
const std::string initial_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
int64_t process_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(process_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string overflow_frames =
TestFrameSequence()
.SettingsAck()
.Data(1, std::string(window_overflow_bytes, 'a'))
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
EXPECT_CALL(visitor, OnFrameHeader(1, window_overflow_bytes, DATA, 0x0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kFlowControlError));
process_result = adapter->ProcessBytes(overflow_frames);
EXPECT_EQ(overflow_frames.size(), static_cast<size_t>(process_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientDisobeysConnectionFlowControlAcrossReads) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const uint32_t window_overflow_bytes = kInitialFlowControlWindowSize + 1;
adapter->SubmitSettings({{MAX_FRAME_SIZE, window_overflow_bytes}});
const std::string initial_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
int64_t process_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(initial_frames.size(), static_cast<size_t>(process_result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string overflow_frames =
TestFrameSequence()
.SettingsAck()
.Data(1, std::string(window_overflow_bytes, 'a'))
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
EXPECT_CALL(visitor, OnFrameHeader(1, window_overflow_bytes, DATA, 0x0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kFlowControlError));
const size_t chunk_length = 16384;
ASSERT_GE(overflow_frames.size(), chunk_length);
process_result =
adapter->ProcessBytes(overflow_frames.substr(0, chunk_length));
EXPECT_EQ(chunk_length, static_cast<size_t>(process_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ClientDisobeysStreamFlowControl) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"accept", "some bogus value!"}},
false)
.Serialize();
const std::string more_frames = TestFrameSequence()
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(16384, 'a'))
.Data(1, std::string(4464, 'a'))
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
adapter->SubmitWindowUpdate(0, 20000);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 0, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 0, 4, 0x0, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::WINDOW_UPDATE}));
visitor.Clear();
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 16384));
EXPECT_CALL(visitor, OnDataForStream(1, _));
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 16384));
EXPECT_CALL(visitor, OnDataForStream(1, _));
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 16384));
EXPECT_CALL(visitor, OnDataForStream(1, _));
EXPECT_CALL(visitor, OnFrameHeader(1, 16384, DATA, 0x0));
result = adapter->ProcessBytes(more_frames);
EXPECT_EQ(more_frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(
visitor,
OnFrameSent(RST_STREAM, 1, 4, 0x0,
static_cast<int>(Http2ErrorCode::FLOW_CONTROL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerErrorWhileHandlingHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"accept", "some bogus value!"}},
false)
.WindowUpdate(1, 2000)
.Data(1, "This is the request body.")
.WindowUpdate(0, 2000)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "accept", "some bogus value!"))
.WillOnce(testing::Return(Http2VisitorInterface::HEADER_RST_STREAM));
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 2000));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, 4, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerErrorWhileHandlingHeadersDropsFrames) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"accept", "some bogus value!"}},
false)
.WindowUpdate(1, 2000)
.Data(1, "This is the request body.")
.Metadata(1, "This is the request metadata.")
.RstStream(1, Http2ErrorCode::CANCEL)
.WindowUpdate(0, 2000)
.Headers(3,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
false)
.Metadata(3, "This is the request metadata.",
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnHeaderForStream(1, "accept", "some bogus value!"))
.WillOnce(testing::Return(Http2VisitorInterface::HEADER_RST_STREAM));
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 2000));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnFrameHeader(3, _, kMetadataFrameType, 0));
EXPECT_CALL(visitor, OnBeginMetadataForStream(3, _));
EXPECT_CALL(visitor, OnMetadataForStream(3, "This is the re"))
.WillOnce(testing::DoAll(testing::InvokeWithoutArgs([&adapter]() {
adapter->SubmitRst(
3, Http2ErrorCode::REFUSED_STREAM);
}),
testing::Return(true)));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, 4, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, 4, 0x0,
static_cast<int>(Http2ErrorCode::REFUSED_STREAM)));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(
visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM, SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerConnectionErrorWhileHandlingHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"Accept", "uppercase, oh boy!"}},
false)
.WindowUpdate(1, 2000)
.Data(1, "This is the request body.")
.WindowUpdate(0, 2000)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader))
.WillOnce(testing::Return(false));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kHeaderError));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_LT(result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, 4, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::RST_STREAM,
SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerErrorAfterHandlingHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.WindowUpdate(1, 2000)
.Data(1, "This is the request body.")
.WindowUpdate(0, 2000)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1))
.WillOnce(testing::Return(false));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kParseError));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_LT(result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerRejectsFrameHeader) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Ping(64)
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.WindowUpdate(1, 2000)
.Data(1, "This is the request body.")
.WindowUpdate(0, 2000)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 8, PING, 0))
.WillOnce(testing::Return(false));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kParseError));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_LT(result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerRejectsBeginningOfData) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Data(1, "This is the request body.")
.Headers(3,
{{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
true)
.RstStream(3, Http2ErrorCode::CANCEL)
.Ping(47)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 25, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 25))
.WillOnce(testing::Return(false));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kParseError));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_LT(result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerReceivesTooLargeHeader) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.max_header_list_bytes = 64 * 1024;
options.max_header_field_size = 64 * 1024;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string too_large_value = std::string(80 * 1024, 'q');
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"x-toobig", too_large_value}},
true)
.Headers(3,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnFrameHeader(1, _, CONTINUATION, 0)).Times(3);
EXPECT_CALL(visitor, OnFrameHeader(1, _, CONTINUATION, END_HEADERS_FLAG));
EXPECT_CALL(visitor,
OnFrameHeader(3, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnEndStream(3));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<int64_t>(frames.size()), result);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, 4, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerReceivesInvalidAuthority) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "ex|ample.com"},
{":path", "/this/is/request/one"}},
false)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<int64_t>(frames.size()), result);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0x0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0x0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, 4, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, 4, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttpAdapterTest, ServerReceivesGoAway) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.GoAway(0, Http2ErrorCode::HTTP2_NO_ERROR, "")
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnFrameHeader(0, _, GOAWAY, 0x0));
EXPECT_CALL(visitor, OnGoAway(0, Http2ErrorCode::HTTP2_NO_ERROR, ""));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<int64_t>(frames.size()), result);
const int submit_result =
adapter->SubmitResponse(1, ToHeaders({{":status", "200"}}),
nullptr, true);
ASSERT_EQ(0, submit_result);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0x0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0x0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::HEADERS}));
}
TEST_P(OgHttp2AdapterDataTest, ServerSubmitResponse) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
const char* kSentinel1 = "arbitrary pointer 1";
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1))
.WillOnce(testing::InvokeWithoutArgs([&adapter, kSentinel1]() {
adapter->SetStreamUserData(1, const_cast<char*>(kSentinel1));
return true;
}));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), result);
EXPECT_EQ(1, adapter->GetHighestReceivedStreamId());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
visitor.Clear();
EXPECT_EQ(0, adapter->GetHpackEncoderDynamicTableSize());
EXPECT_FALSE(adapter->want_write());
const absl::string_view kBody = "This is an example response body.";
visitor.AppendPayloadForStream(1, kBody);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1,
ToHeaders({{":status", "404"},
{"x-comment", "I have no idea what you're talking about."}}),
GetParam() ? nullptr : std::move(body1), false);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_EQ(kSentinel1, adapter->GetStreamUserData(1));
adapter->SetStreamUserData(1, nullptr);
EXPECT_EQ(nullptr, adapter->GetStreamUserData(1));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x4));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, 0x4, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::HEADERS, SpdyFrameType::DATA}));
EXPECT_THAT(visitor.data(), testing::HasSubstr(kBody));
EXPECT_FALSE(adapter->want_write());
EXPECT_LT(adapter->GetStreamSendWindowSize(1), kInitialFlowControlWindowSize);
EXPECT_GT(adapter->GetStreamSendWindowSize(1), 0);
EXPECT_EQ(adapter->GetStreamSendWindowSize(3), -1);
EXPECT_GT(adapter->GetHpackEncoderDynamicTableSize(), 0);
}
TEST_P(OgHttp2AdapterDataTest, ServerSubmitResponseWithResetFromClient) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), result);
EXPECT_EQ(1, adapter->GetHighestReceivedStreamId());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
visitor.Clear();
EXPECT_FALSE(adapter->want_write());
const absl::string_view kBody = "This is an example response body.";
visitor.AppendPayloadForStream(1, kBody);
auto body1 = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result = adapter->SubmitResponse(
1,
ToHeaders({{":status", "404"},
{"x-comment", "I have no idea what you're talking about."}}),
GetParam() ? nullptr : std::move(body1), false);
EXPECT_EQ(submit_result, 0);
EXPECT_TRUE(adapter->want_write());
const std::string reset =
TestFrameSequence().RstStream(1, Http2ErrorCode::CANCEL).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(1, 4, RST_STREAM, 0));
EXPECT_CALL(visitor, OnRstStream(1, Http2ErrorCode::CANCEL));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::CANCEL));
const int64_t reset_result = adapter->ProcessBytes(reset);
EXPECT_EQ(reset.size(), static_cast<size_t>(reset_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, _)).Times(0);
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, _, _)).Times(0);
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, _, _)).Times(0);
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), testing::IsEmpty());
}
TEST(OgHttp2AdapterTest, ServerRejectsStreamData) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Data(1, "This is the request body.")
.Headers(3,
{{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
true)
.RstStream(3, Http2ErrorCode::CANCEL)
.Ping(47)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, 25, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 25));
EXPECT_CALL(visitor, OnDataForStream(1, _)).WillOnce(testing::Return(false));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kParseError));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_LT(result, 0);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
using OgHttp2AdapterInteractionDataTest = OgHttp2AdapterDataTest;
INSTANTIATE_TEST_SUITE_P(BothValues, OgHttp2AdapterInteractionDataTest,
testing::Bool());
TEST_P(OgHttp2AdapterInteractionDataTest, ClientServerInteractionTest) {
TestVisitor client_visitor;
OgHttp2Adapter::Options client_options;
client_options.perspective = Perspective::kClient;
auto client_adapter = OgHttp2Adapter::Create(client_visitor, client_options);
TestVisitor server_visitor;
OgHttp2Adapter::Options server_options;
server_options.perspective = Perspective::kServer;
auto server_adapter = OgHttp2Adapter::Create(server_visitor, server_options);
EXPECT_CALL(client_visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(client_visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0x0));
EXPECT_CALL(client_visitor, OnBeforeFrameSent(HEADERS, 1, _, 0x5));
EXPECT_CALL(client_visitor, OnFrameSent(HEADERS, 1, _, 0x5, 0x0));
EXPECT_CALL(client_visitor, OnReadyToSend(_))
.WillRepeatedly(
testing::Invoke(server_adapter.get(), &OgHttp2Adapter::ProcessBytes));
EXPECT_CALL(server_visitor, OnReadyToSend(_))
.WillRepeatedly(
testing::Invoke(client_adapter.get(), &OgHttp2Adapter::ProcessBytes));
EXPECT_CALL(server_visitor, OnEndHeadersForStream(_))
.WillRepeatedly([&server_adapter](Http2StreamId stream_id) {
server_adapter->SubmitResponse(
stream_id, ToHeaders({{":status", "200"}}), nullptr, true);
server_adapter->Send();
return true;
});
EXPECT_CALL(client_visitor, OnEndHeadersForStream(_))
.WillRepeatedly([&client_adapter,
&client_visitor](Http2StreamId stream_id) {
if (stream_id < 10) {
const Http2StreamId new_stream_id = stream_id + 2;
client_visitor.AppendPayloadForStream(
new_stream_id, "This is an example request body.");
client_visitor.SetEndData(new_stream_id, true);
auto body = std::make_unique<VisitorDataSource>(client_visitor,
new_stream_id);
const int created_stream_id = client_adapter->SubmitRequest(
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path",
absl::StrCat("/this/is/request/", new_stream_id)}}),
GetParam() ? nullptr : std::move(body), false, nullptr);
EXPECT_EQ(new_stream_id, created_stream_id);
client_adapter->Send();
}
return true;
});
int stream_id = client_adapter->SubmitRequest(
ToHeaders({{":method", "POST"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}}),
nullptr, true, nullptr);
EXPECT_EQ(stream_id, 1);
client_adapter->Send();
}
TEST(OgHttp2AdapterInteractionTest,
ClientServerInteractionRepeatedHeaderNames) {
TestVisitor client_visitor;
OgHttp2Adapter::Options client_options;
client_options.perspective = Perspective::kClient;
auto client_adapter = OgHttp2Adapter::Create(client_visitor, client_options);
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"accept", "text/plain"},
{"accept", "text/html"}});
const int32_t stream_id1 =
client_adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(client_visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(client_visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(client_visitor,
OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(client_visitor,
OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int send_result = client_adapter->Send();
EXPECT_EQ(0, send_result);
TestVisitor server_visitor;
OgHttp2Adapter::Options server_options;
server_options.perspective = Perspective::kServer;
auto server_adapter = OgHttp2Adapter::Create(server_visitor, server_options);
testing::InSequence s;
EXPECT_CALL(server_visitor, OnFrameHeader(0, _, SETTINGS, 0));
EXPECT_CALL(server_visitor, OnSettingsStart());
EXPECT_CALL(server_visitor, OnSetting).Times(testing::AnyNumber());
EXPECT_CALL(server_visitor, OnSettingsEnd());
EXPECT_CALL(server_visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(server_visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(server_visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(server_visitor, OnHeaderForStream(1, ":scheme", "http"));
EXPECT_CALL(server_visitor,
OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(server_visitor,
OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(server_visitor, OnHeaderForStream(1, "accept", "text/plain"));
EXPECT_CALL(server_visitor, OnHeaderForStream(1, "accept", "text/html"));
EXPECT_CALL(server_visitor, OnEndHeadersForStream(1));
EXPECT_CALL(server_visitor, OnEndStream(1));
int64_t result = server_adapter->ProcessBytes(client_visitor.data());
EXPECT_EQ(client_visitor.data().size(), static_cast<size_t>(result));
}
TEST(OgHttp2AdapterInteractionTest, ClientServerInteractionWithCookies) {
TestVisitor client_visitor;
OgHttp2Adapter::Options client_options;
client_options.perspective = Perspective::kClient;
auto client_adapter = OgHttp2Adapter::Create(client_visitor, client_options);
const std::vector<Header> headers1 =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"cookie", "a; b=2; c"},
{"cookie", "d=e, f, g; h"}});
const int32_t stream_id1 =
client_adapter->SubmitRequest(headers1, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(client_visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(client_visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(client_visitor,
OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(client_visitor,
OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
int send_result = client_adapter->Send();
EXPECT_EQ(0, send_result);
TestVisitor server_visitor;
OgHttp2Adapter::Options server_options;
server_options.perspective = Perspective::kServer;
auto server_adapter = OgHttp2Adapter::Create(server_visitor, server_options);
testing::InSequence s;
EXPECT_CALL(server_visitor, OnFrameHeader(0, _, SETTINGS, 0));
EXPECT_CALL(server_visitor, OnSettingsStart());
EXPECT_CALL(server_visitor, OnSetting).Times(testing::AnyNumber());
EXPECT_CALL(server_visitor, OnSettingsEnd());
EXPECT_CALL(server_visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(server_visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(server_visitor, OnHeaderForStream(1, ":method", "GET"));
EXPECT_CALL(server_visitor, OnHeaderForStream(1, ":scheme", "http"));
EXPECT_CALL(server_visitor,
OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(server_visitor,
OnHeaderForStream(1, ":path", "/this/is/request/one"));
EXPECT_CALL(server_visitor,
OnHeaderForStream(1, "cookie", "a; b=2; c; d=e, f, g; h"));
EXPECT_CALL(server_visitor, OnEndHeadersForStream(1));
EXPECT_CALL(server_visitor, OnEndStream(1));
int64_t result = server_adapter->ProcessBytes(client_visitor.data());
EXPECT_EQ(client_visitor.data().size(), static_cast<size_t>(result));
}
TEST(OgHttp2AdapterTest, ServerForbidsNewStreamBelowWatermark) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_EQ(0, adapter->GetHighestReceivedStreamId());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(3,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Data(3, "This is the request body.")
.Headers(1,
{{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":path", "/this/is/request/one"));
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnFrameHeader(3, 25, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(3, 25));
EXPECT_CALL(visitor, OnDataForStream(3, "This is the request body."));
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kInvalidNewStreamId));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
EXPECT_EQ(3, adapter->GetHighestReceivedStreamId());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerForbidsWindowUpdateOnIdleStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_EQ(0, adapter->GetHighestReceivedStreamId());
const std::string frames =
TestFrameSequence().ClientPreface().WindowUpdate(1, 42).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kWrongFrameSequence));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
EXPECT_EQ(1, adapter->GetHighestReceivedStreamId());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerForbidsDataOnIdleStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_EQ(0, adapter->GetHighestReceivedStreamId());
const std::string frames = TestFrameSequence()
.ClientPreface()
.Data(1, "Sorry, out of order")
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kWrongFrameSequence));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
EXPECT_EQ(1, adapter->GetHighestReceivedStreamId());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerForbidsRstStreamOnIdleStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_EQ(0, adapter->GetHighestReceivedStreamId());
const std::string frames =
TestFrameSequence()
.ClientPreface()
.RstStream(1, Http2ErrorCode::ENHANCE_YOUR_CALM)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, RST_STREAM, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kWrongFrameSequence));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
EXPECT_EQ(1, adapter->GetHighestReceivedStreamId());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerForbidsNewStreamAboveStreamLimit) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
adapter->SubmitSettings({{MAX_CONCURRENT_STREAMS, 1}});
const std::string initial_frames =
TestFrameSequence().ClientPreface().Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(static_cast<size_t>(initial_result), initial_frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.SettingsAck()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Headers(3,
{{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor,
OnFrameHeader(3, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(
visitor,
OnInvalidFrame(3, Http2VisitorInterface::InvalidFrameError::kProtocol));
EXPECT_CALL(visitor, OnConnectionError(
ConnectionError::kExceededMaxConcurrentStreams));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(static_cast<size_t>(stream_result), stream_frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerRstStreamsNewStreamAboveStreamLimitBeforeAck) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
adapter->SubmitSettings({{MAX_CONCURRENT_STREAMS, 1}});
const std::string initial_frames =
TestFrameSequence().ClientPreface().Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(static_cast<size_t>(initial_result), initial_frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Headers(3,
{{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/two"}},
true)
.Serialize();
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor,
OnFrameHeader(3, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor,
OnInvalidFrame(
3, Http2VisitorInterface::InvalidFrameError::kRefusedStream));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(static_cast<size_t>(stream_result), stream_frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, _, 0x0,
static_cast<int>(Http2ErrorCode::REFUSED_STREAM)));
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), EqualsFrames({SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerForbidsProtocolPseudoheaderBeforeAck) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.allow_extended_connect = false;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string initial_frames =
TestFrameSequence().ClientPreface().Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(static_cast<size_t>(initial_result), initial_frames.size());
const std::string stream1_frames =
TestFrameSequence()
.Headers(1,
{{":method", "CONNECT"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{":protocol", "websocket"}},
true)
.Serialize();
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(5);
EXPECT_CALL(visitor,
OnInvalidFrame(
1, Http2VisitorInterface::InvalidFrameError::kHttpMessaging));
int64_t stream_result = adapter->ProcessBytes(stream1_frames);
EXPECT_EQ(static_cast<size_t>(stream_result), stream1_frames.size());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
adapter->SubmitSettings({{ENABLE_CONNECT_PROTOCOL, 1}});
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(
visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM, SpdyFrameType::SETTINGS}));
visitor.Clear();
const std::string stream3_frames =
TestFrameSequence()
.Headers(3,
{{":method", "CONNECT"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/two"},
{":protocol", "websocket"}},
true)
.Serialize();
EXPECT_CALL(visitor,
OnFrameHeader(3, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnEndStream(3));
stream_result = adapter->ProcessBytes(stream3_frames);
EXPECT_EQ(static_cast<size_t>(stream_result), stream3_frames.size());
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, ServerAllowsProtocolPseudoheaderAfterAck) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
adapter->SubmitSettings({{ENABLE_CONNECT_PROTOCOL, 1}});
const std::string initial_frames =
TestFrameSequence().ClientPreface().Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(static_cast<size_t>(initial_result), initial_frames.size());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
visitor.Clear();
const std::string stream_frames =
TestFrameSequence()
.SettingsAck()
.Headers(1,
{{":method", "CONNECT"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{":protocol", "websocket"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, _, SETTINGS, ACK_FLAG));
EXPECT_CALL(visitor, OnSettingsAck());
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(static_cast<size_t>(stream_result), stream_frames.size());
EXPECT_FALSE(adapter->want_write());
}
TEST_P(OgHttp2AdapterDataTest, SkipsSendingFramesForRejectedStream) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string initial_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t initial_result = adapter->ProcessBytes(initial_frames);
EXPECT_EQ(static_cast<size_t>(initial_result), initial_frames.size());
visitor.AppendPayloadForStream(
1, "Here is some data, which will be completely ignored!");
auto body = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result =
adapter->SubmitResponse(1, ToHeaders({{":status", "200"}}),
GetParam() ? nullptr : std::move(body), false);
ASSERT_EQ(0, submit_result);
auto source = std::make_unique<TestMetadataSource>(ToHeaderBlock(ToHeaders(
{{"query-cost", "is too darn high"}, {"secret-sauce", "hollandaise"}})));
adapter->SubmitMetadata(1, 16384u, std::move(source));
adapter->SubmitWindowUpdate(1, 1024);
adapter->SubmitRst(1, Http2ErrorCode::INTERNAL_ERROR);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::INTERNAL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttpAdapterServerTest, ServerStartsShutdown) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
adapter->SubmitShutdownNotice();
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(GOAWAY, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
}
TEST(OgHttp2AdapterTest, ServerStartsShutdownAfterGoaway) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
EXPECT_FALSE(adapter->want_write());
adapter->SubmitGoAway(1, Http2ErrorCode::HTTP2_NO_ERROR,
"and don't come back!");
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(GOAWAY, 0, _, 0x0, 0));
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
adapter->SubmitShutdownNotice();
EXPECT_FALSE(adapter->want_write());
}
TEST(OgHttp2AdapterTest, ConnectionErrorWithBlackholingData) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.blackhole_data_on_connection_error = true;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence().ClientPreface().WindowUpdate(1, 42).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kWrongFrameSequence));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(result), frames.size());
const std::string next_frame = TestFrameSequence().Ping(42).Serialize();
const int64_t next_result = adapter->ProcessBytes(next_frame);
EXPECT_EQ(static_cast<size_t>(next_result), next_frame.size());
}
TEST(OgHttp2AdapterTest, ConnectionErrorWithoutBlackholingData) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.blackhole_data_on_connection_error = false;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence().ClientPreface().WindowUpdate(1, 42).Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kWrongFrameSequence));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_LT(result, 0);
const std::string next_frame = TestFrameSequence().Ping(42).Serialize();
const int64_t next_result = adapter->ProcessBytes(next_frame);
EXPECT_LT(next_result, 0);
}
TEST_P(OgHttp2AdapterDataTest, ServerDoesNotSendFramesAfterImmediateGoAway) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
adapter->SubmitSettings({{HEADER_TABLE_SIZE, 100u}});
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
visitor.AppendPayloadForStream(1, "This data is doomed to never be written.");
auto body = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result =
adapter->SubmitResponse(1, ToHeaders({{":status", "200"}}),
GetParam() ? nullptr : std::move(body), false);
ASSERT_EQ(0, submit_result);
adapter->SubmitWindowUpdate(kConnectionStreamId, 42);
adapter->SubmitSettings({});
auto source = std::make_unique<TestMetadataSource>(ToHeaderBlock(ToHeaders(
{{"query-cost", "is too darn high"}, {"secret-sauce", "hollandaise"}})));
adapter->SubmitMetadata(1, 16384u, std::move(source));
EXPECT_TRUE(adapter->want_write());
const std::string connection_error_frames =
TestFrameSequence().WindowUpdate(3, 42).Serialize();
EXPECT_CALL(visitor, OnFrameHeader(3, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnConnectionError(ConnectionError::kWrongFrameSequence));
const int64_t result = adapter->ProcessBytes(connection_error_frames);
EXPECT_EQ(static_cast<size_t>(result), connection_error_frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 6, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 6, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(GOAWAY, 0, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(GOAWAY, 0, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::GOAWAY}));
visitor.Clear();
adapter->SubmitPing(42);
send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(), testing::IsEmpty());
}
TEST(OgHttp2AdapterTest, ServerHandlesContentLength) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::string stream_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1, {{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"},
{"content-length", "2"}})
.Data(1, "hi", true)
.Headers(3,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/three"},
{"content-length", "nan"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 1));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 2));
EXPECT_CALL(visitor, OnDataForStream(1, "hi"));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(3, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerHandlesContentLengthMismatch) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::string stream_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1, {{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/two"},
{"content-length", "2"}})
.Data(1, "h", true)
.Headers(3, {{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/three"},
{"content-length", "2"}})
.Data(3, "howdy", true)
.Headers(5,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/four"},
{"content-length", "2"}},
true)
.Headers(7,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/four"},
{"content-length", "2"}},
false)
.Data(7, "h", false)
.Headers(7, {{"extra-info", "Trailers with content-length mismatch"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 1));
EXPECT_CALL(visitor, OnBeginDataForStream(1, 1));
EXPECT_CALL(visitor, OnDataForStream(1, "h"));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnFrameHeader(3, _, DATA, 1));
EXPECT_CALL(visitor, OnBeginDataForStream(3, 5));
EXPECT_CALL(visitor, OnFrameHeader(5, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(5));
EXPECT_CALL(visitor, OnHeaderForStream(5, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(5));
EXPECT_CALL(visitor,
OnInvalidFrame(
5, Http2VisitorInterface::InvalidFrameError::kHttpMessaging));
EXPECT_CALL(visitor, OnFrameHeader(7, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(7));
EXPECT_CALL(visitor, OnHeaderForStream(7, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(7));
EXPECT_CALL(visitor, OnFrameHeader(7, _, DATA, 0));
EXPECT_CALL(visitor, OnBeginDataForStream(7, 1));
EXPECT_CALL(visitor, OnDataForStream(7, "h"));
EXPECT_CALL(visitor, OnFrameHeader(7, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(7));
EXPECT_CALL(visitor, OnHeaderForStream(7, _, _));
EXPECT_CALL(visitor, OnEndHeadersForStream(7));
EXPECT_CALL(visitor,
OnInvalidFrame(
7, Http2VisitorInterface::InvalidFrameError::kHttpMessaging));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 5, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 5, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(5, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 7, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 7, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(7, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(
visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM, SpdyFrameType::RST_STREAM,
SpdyFrameType::RST_STREAM, SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerHandlesAsteriskPathForOptions) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::string stream_frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "*"},
{":method", "OPTIONS"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_TRUE(adapter->want_write());
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS}));
}
TEST(OgHttp2AdapterTest, ServerHandlesInvalidPath) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::string stream_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "*"},
{":method", "GET"}},
true)
.Headers(3,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "other/non/slash/starter"},
{":method", "GET"}},
true)
.Headers(5,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", ""},
{":method", "GET"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor,
OnInvalidFrame(
1, Http2VisitorInterface::InvalidFrameError::kHttpMessaging));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(4);
EXPECT_CALL(visitor,
OnInvalidFrame(
3, Http2VisitorInterface::InvalidFrameError::kHttpMessaging));
EXPECT_CALL(visitor, OnFrameHeader(5, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(5));
EXPECT_CALL(visitor, OnHeaderForStream(5, _, _)).Times(2);
EXPECT_CALL(
visitor,
OnInvalidFrame(5, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 5, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 5, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(5, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(
visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM, SpdyFrameType::RST_STREAM,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerHandlesTeHeader) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::string stream_frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{":method", "GET"},
{"te", "trailers"}},
true)
.Headers(3,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{":method", "GET"},
{"te", "trailers, deflate"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(5);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(3, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerHandlesConnectionSpecificHeaders) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
testing::InSequence s;
const std::string stream_frames =
TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{":method", "GET"},
{"connection", "keep-alive"}},
true)
.Headers(3,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{":method", "GET"},
{"proxy-connection", "keep-alive"}},
true)
.Headers(5,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{":method", "GET"},
{"keep-alive", "timeout=42"}},
true)
.Headers(7,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{":method", "GET"},
{"transfer-encoding", "chunked"}},
true)
.Headers(9,
{{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{":method", "GET"},
{"upgrade", "h2c"}},
true)
.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(1, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(3, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
EXPECT_CALL(visitor, OnFrameHeader(5, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(5));
EXPECT_CALL(visitor, OnHeaderForStream(5, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(5, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
EXPECT_CALL(visitor, OnFrameHeader(7, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(7));
EXPECT_CALL(visitor, OnHeaderForStream(7, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(7, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
EXPECT_CALL(visitor, OnFrameHeader(9, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(9));
EXPECT_CALL(visitor, OnHeaderForStream(9, _, _)).Times(4);
EXPECT_CALL(
visitor,
OnInvalidFrame(9, Http2VisitorInterface::InvalidFrameError::kHttpHeader));
const int64_t stream_result = adapter->ProcessBytes(stream_frames);
EXPECT_EQ(stream_frames.size(), static_cast<size_t>(stream_result));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 1, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 1, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 3, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 3, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(3, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 5, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 5, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(5, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 7, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 7, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(7, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_CALL(visitor, OnBeforeFrameSent(RST_STREAM, 9, _, 0x0));
EXPECT_CALL(visitor,
OnFrameSent(RST_STREAM, 9, _, 0x0,
static_cast<int>(Http2ErrorCode::PROTOCOL_ERROR)));
EXPECT_CALL(visitor, OnCloseStream(9, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_TRUE(adapter->want_write());
int result = adapter->Send();
EXPECT_EQ(0, result);
EXPECT_THAT(
visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::RST_STREAM, SpdyFrameType::RST_STREAM,
SpdyFrameType::RST_STREAM, SpdyFrameType::RST_STREAM,
SpdyFrameType::RST_STREAM}));
}
TEST(OgHttp2AdapterTest, ServerUsesCustomWindowUpdateStrategy) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.should_window_update_fn = [](int64_t , int64_t ,
int64_t ) { return true; };
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false)
.Data(1, "This is the request body.",
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, END_STREAM_FLAG));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _));
EXPECT_CALL(visitor, OnDataForStream(1, "This is the request body."));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<int64_t>(frames.size()), result);
adapter->MarkDataConsumedForStream(1, 5);
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 1, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 1, 4, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 0, 4, 0x0));
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 0, 4, 0x0, 0));
int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::WINDOW_UPDATE,
SpdyFrameType::WINDOW_UPDATE}));
}
TEST(OgHttp2AdapterTest, ServerConsumesDataWithPadding) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
TestFrameSequence seq = std::move(TestFrameSequence().ClientPreface().Headers(
1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
false));
size_t total_size = 0;
while (total_size < 62 * 1024) {
seq.Data(1, "a", false, 254);
total_size += 255;
}
const std::string frames = seq.Serialize();
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(1, _, DATA, 0x8))
.Times(testing::AtLeast(1));
EXPECT_CALL(visitor, OnBeginDataForStream(1, _)).Times(testing::AtLeast(1));
EXPECT_CALL(visitor, OnDataForStream(1, "a")).Times(testing::AtLeast(1));
EXPECT_CALL(visitor, OnDataPaddingLength(1, _)).Times(testing::AtLeast(1));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(result, frames.size());
EXPECT_TRUE(adapter->want_write());
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 1, _, 0x0)).Times(1);
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 1, _, 0x0, 0)).Times(1);
EXPECT_CALL(visitor, OnBeforeFrameSent(WINDOW_UPDATE, 0, _, 0x0)).Times(1);
EXPECT_CALL(visitor, OnFrameSent(WINDOW_UPDATE, 0, _, 0x0, 0)).Times(1);
const int send_result = adapter->Send();
EXPECT_EQ(0, send_result);
EXPECT_THAT(visitor.data(),
EqualsFrames({SpdyFrameType::SETTINGS, SpdyFrameType::SETTINGS,
SpdyFrameType::WINDOW_UPDATE,
SpdyFrameType::WINDOW_UPDATE}));
}
TEST(OgHttp2AdapterTest, NoopHeaderValidatorTest) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
options.validate_http_headers = false;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames = TestFrameSequence()
.ClientPreface()
.Headers(1,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/1"},
{"content-length", "7"},
{"content-length", "7"}},
false)
.Headers(3,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/3"},
{"content-length", "11"},
{"content-length", "13"}},
false)
.Headers(5,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "foo.com"},
{":path", "/"},
{"host", "bar.com"}},
true)
.Headers(7,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{"Accept", "uppercase, oh boy!"}},
false)
.Headers(9,
{{":method", "POST"},
{":scheme", "https"},
{":authority", "ex|ample.com"},
{":path", "/"}},
false)
.Headers(11,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/"},
{"content-length", "nan"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 0, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(1, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(1, ":path", "/1"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "content-length", "7"));
EXPECT_CALL(visitor, OnHeaderForStream(1, "content-length", "7"));
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnFrameHeader(3, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(3));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(3, ":path", "/3"));
EXPECT_CALL(visitor, OnHeaderForStream(3, "content-length", "11"));
EXPECT_CALL(visitor, OnHeaderForStream(3, "content-length", "13"));
EXPECT_CALL(visitor, OnEndHeadersForStream(3));
EXPECT_CALL(visitor, OnFrameHeader(5, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(5));
EXPECT_CALL(visitor, OnHeaderForStream(5, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(5, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(5, ":authority", "foo.com"));
EXPECT_CALL(visitor, OnHeaderForStream(5, ":path", "/"));
EXPECT_CALL(visitor, OnHeaderForStream(5, "host", "bar.com"));
EXPECT_CALL(visitor, OnEndHeadersForStream(5));
EXPECT_CALL(visitor, OnEndStream(5));
EXPECT_CALL(visitor, OnFrameHeader(7, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(7));
EXPECT_CALL(visitor, OnHeaderForStream(7, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(7, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(7, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(7, ":path", "/"));
EXPECT_CALL(visitor, OnHeaderForStream(7, "Accept", "uppercase, oh boy!"));
EXPECT_CALL(visitor, OnEndHeadersForStream(7));
EXPECT_CALL(visitor, OnFrameHeader(9, _, HEADERS, 4));
EXPECT_CALL(visitor, OnBeginHeadersForStream(9));
EXPECT_CALL(visitor, OnHeaderForStream(9, ":method", "POST"));
EXPECT_CALL(visitor, OnHeaderForStream(9, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(9, ":authority", "ex|ample.com"));
EXPECT_CALL(visitor, OnHeaderForStream(9, ":path", "/"));
EXPECT_CALL(visitor, OnEndHeadersForStream(9));
EXPECT_CALL(visitor, OnFrameHeader(11, _, HEADERS, 5));
EXPECT_CALL(visitor, OnBeginHeadersForStream(11));
EXPECT_CALL(visitor, OnHeaderForStream(11, ":method", "GET"));
EXPECT_CALL(visitor, OnHeaderForStream(11, ":scheme", "https"));
EXPECT_CALL(visitor, OnHeaderForStream(11, ":authority", "example.com"));
EXPECT_CALL(visitor, OnHeaderForStream(11, ":path", "/"));
EXPECT_CALL(visitor, OnHeaderForStream(11, "content-length", "nan"));
EXPECT_CALL(visitor, OnEndHeadersForStream(11));
EXPECT_CALL(visitor, OnEndStream(11));
const int64_t result = adapter->ProcessBytes(frames);
EXPECT_EQ(frames.size(), static_cast<size_t>(result));
}
TEST_P(OgHttp2AdapterDataTest, NegativeFlowControlStreamResumption) {
TestVisitor visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kServer;
auto adapter = OgHttp2Adapter::Create(visitor, options);
const std::string frames =
TestFrameSequence()
.ClientPreface({{INITIAL_WINDOW_SIZE, 128u * 1024u}})
.WindowUpdate(0, 1 << 20)
.Headers(1,
{{":method", "GET"},
{":scheme", "https"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}},
true)
.Serialize();
testing::InSequence s;
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor,
OnSetting(Http2Setting{Http2KnownSettingsId::INITIAL_WINDOW_SIZE,
128u * 1024u}));
EXPECT_CALL(visitor, OnSettingsEnd());
EXPECT_CALL(visitor, OnFrameHeader(0, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(0, 1 << 20));
EXPECT_CALL(visitor,
OnFrameHeader(1, _, HEADERS, END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnBeginHeadersForStream(1));
EXPECT_CALL(visitor, OnHeaderForStream(1, _, _)).Times(4);
EXPECT_CALL(visitor, OnEndHeadersForStream(1));
EXPECT_CALL(visitor, OnEndStream(1));
const int64_t read_result = adapter->ProcessBytes(frames);
EXPECT_EQ(static_cast<size_t>(read_result), frames.size());
visitor.AppendPayloadForStream(1, std::string(70000, 'a'));
auto body = std::make_unique<VisitorDataSource>(visitor, 1);
int submit_result =
adapter->SubmitResponse(1, ToHeaders({{":status", "200"}}),
GetParam() ? nullptr : std::move(body), false);
ASSERT_EQ(0, submit_result);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
EXPECT_CALL(visitor, OnBeforeFrameSent(HEADERS, 1, _, END_HEADERS_FLAG));
EXPECT_CALL(visitor, OnFrameSent(HEADERS, 1, _, END_HEADERS_FLAG, 0));
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0)).Times(5);
adapter->Send();
EXPECT_FALSE(adapter->want_write());
EXPECT_CALL(visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(visitor, OnSettingsStart());
EXPECT_CALL(visitor,
OnSetting(Http2Setting{Http2KnownSettingsId::INITIAL_WINDOW_SIZE,
64u * 1024u}));
EXPECT_CALL(visitor, OnSettingsEnd());
adapter->ProcessBytes(TestFrameSequence()
.Settings({{INITIAL_WINDOW_SIZE, 64u * 1024u}})
.Serialize());
EXPECT_TRUE(adapter->want_write());
EXPECT_LT(adapter->GetStreamSendWindowSize(1), 0);
visitor.AppendPayloadForStream(1, "Stream should be resumed.");
adapter->ResumeStream(1);
EXPECT_CALL(visitor, OnBeforeFrameSent(SETTINGS, 0, _, ACK_FLAG));
EXPECT_CALL(visitor, OnFrameSent(SETTINGS, 0, _, ACK_FLAG, 0));
adapter->Send();
EXPECT_FALSE(adapter->want_write());
EXPECT_CALL(visitor, OnFrameHeader(1, 4, WINDOW_UPDATE, 0));
EXPECT_CALL(visitor, OnWindowUpdate(1, 10000));
adapter->ProcessBytes(TestFrameSequence().WindowUpdate(1, 10000).Serialize());
EXPECT_TRUE(adapter->want_write());
EXPECT_GT(adapter->GetStreamSendWindowSize(1), 0);
EXPECT_CALL(visitor, OnFrameSent(DATA, 1, _, 0x0, 0));
adapter->Send();
}
TEST(OgHttp2AdapterTest, SetCookieRoundtrip) {
TestVisitor client_visitor;
OgHttp2Adapter::Options options;
options.perspective = Perspective::kClient;
auto client_adapter = OgHttp2Adapter::Create(client_visitor, options);
TestVisitor server_visitor;
options.perspective = Perspective::kServer;
auto server_adapter = OgHttp2Adapter::Create(server_visitor, options);
const std::vector<Header> request_headers =
ToHeaders({{":method", "GET"},
{":scheme", "http"},
{":authority", "example.com"},
{":path", "/this/is/request/one"}});
const int32_t stream_id1 =
client_adapter->SubmitRequest(request_headers, nullptr, true, nullptr);
ASSERT_GT(stream_id1, 0);
EXPECT_CALL(client_visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(client_visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(client_visitor,
OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(client_visitor,
OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_EQ(0, client_adapter->Send());
EXPECT_CALL(server_visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(server_visitor, OnSettingsStart());
EXPECT_CALL(server_visitor,
OnSetting(Http2Setting{Http2KnownSettingsId::ENABLE_PUSH, 0u}));
EXPECT_CALL(server_visitor, OnSettingsEnd());
EXPECT_CALL(server_visitor, OnFrameHeader(stream_id1, _, HEADERS, 5));
EXPECT_CALL(server_visitor, OnBeginHeadersForStream(stream_id1));
EXPECT_CALL(server_visitor, OnHeaderForStream).Times(4);
EXPECT_CALL(server_visitor, OnEndHeadersForStream(stream_id1));
EXPECT_CALL(server_visitor, OnEndStream(stream_id1));
ASSERT_EQ(client_visitor.data().size(),
server_adapter->ProcessBytes(client_visitor.data()));
const std::vector<Header> response_headers =
ToHeaders({{":status", "200"},
{"set-cookie", "chocolate_chip=yummy"},
{"set-cookie", "macadamia_nut=okay"}});
EXPECT_EQ(0, server_adapter->SubmitResponse(stream_id1, response_headers,
nullptr, true));
EXPECT_CALL(server_visitor, OnBeforeFrameSent(SETTINGS, 0, _, 0x0));
EXPECT_CALL(server_visitor, OnFrameSent(SETTINGS, 0, _, 0x0, 0));
EXPECT_CALL(server_visitor, OnBeforeFrameSent(SETTINGS, 0, 0, ACK_FLAG));
EXPECT_CALL(server_visitor, OnFrameSent(SETTINGS, 0, 0, ACK_FLAG, 0));
EXPECT_CALL(server_visitor,
OnBeforeFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG));
EXPECT_CALL(server_visitor,
OnFrameSent(HEADERS, stream_id1, _,
END_STREAM_FLAG | END_HEADERS_FLAG, 0));
EXPECT_CALL(server_visitor,
OnCloseStream(stream_id1, Http2ErrorCode::HTTP2_NO_ERROR));
EXPECT_EQ(0, server_adapter->Send());
EXPECT_CALL(client_visitor, OnFrameHeader(0, 6, SETTINGS, 0));
EXPECT_CALL(client_visitor, OnSettingsStart());
EXPECT_CALL(client_visitor,
OnSetting(Http2Setting{
Http2KnownSettingsId::ENABLE_CONNECT_PROTOCOL, 1u}));
EXPECT_CALL(client_visitor, OnSettingsEnd());
EXPECT_CALL(client_visitor, OnFrameHeader(0, 0, SETTINGS, ACK_FLAG));
EXPECT_CALL(client_visitor, OnSettingsAck());
EXPECT_CALL(client_visitor, OnFrameHeader(stream_id1, _, HEADERS, 5));
EXPECT_CALL(client_visitor, OnBeginHeadersForStream(stream_id1));
EXPECT_CALL(client_visitor, OnHeaderForStream(stream_id1, ":status", "200"));
EXPECT_CALL(client_visitor, OnHeaderForStream(stream_id1, "set-cookie",
"chocolate_chip=yummy"));
EXPECT_CALL(client_visitor, OnHeaderForStream(stream_id1, "set-cookie",
"macadamia_nut=okay"));
EXPECT_CALL(client_visitor, OnEndHeadersForStream(stream_id1));
EXPECT_CALL(client_visitor, OnEndStream(stream_id1));
EXPECT_CALL(client_visitor,
OnCloseStream(stream_id1, Http2ErrorCode::HTTP2_NO_ERROR));
ASSERT_EQ(server_visitor.data().size(),
client_adapter->ProcessBytes(server_visitor.data()));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/oghttp2_adapter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/oghttp2_adapter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
29a2a2be-c9b2-4451-bd94-deb8b9faf4a1 | cpp | tensorflow/tensorflow | primitive_util | third_party/xla/xla/primitive_util.cc | third_party/xla/xla/primitive_util_test.cc | #include "xla/primitive_util.h"
#include <cstdint>
#include <limits>
#include <string>
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace primitive_util {
int SignificandWidth(PrimitiveType type) {
return FloatingPointTypeSwitch<int>(
[&](auto constant_type) -> int {
return std::numeric_limits<NativeTypeOf<constant_type>>::digits;
},
type);
}
int ExponentWidth(PrimitiveType type) {
int total_bit_width = BitWidth(type);
int trailing_significand_field_width = SignificandWidth(type) - 1;
int kSignBitWidth = 1;
return total_bit_width - (trailing_significand_field_width + kSignBitWidth);
}
int UnderflowExponent(PrimitiveType type) {
return FloatingPointTypeSwitch<int>(
[&](auto constant_type) -> int {
return std::numeric_limits<NativeTypeOf<constant_type>>::min_exponent;
},
type);
}
int OverflowExponent(PrimitiveType type) {
return FloatingPointTypeSwitch<int>(
[&](auto constant_type) -> int {
return std::numeric_limits<NativeTypeOf<constant_type>>::max_exponent;
},
type);
}
int ExponentBias(PrimitiveType type) {
return (1 - UnderflowExponent(type)) + 1;
}
bool HasInfinity(PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsFloatingPointType(type))) {
return FloatingPointTypeSwitch<bool>(
[&](auto constant_type) -> bool {
return std::numeric_limits<NativeTypeOf<constant_type>>::has_infinity;
},
type);
}
return false;
}
bool HasNegativeZero(PrimitiveType type) {
if (ABSL_PREDICT_TRUE(IsFloatingPointType(type))) {
return FloatingPointTypeSwitch<bool>(
[&](auto constant_type) -> bool {
return has_negative_zero_v<NativeTypeOf<constant_type>>;
},
type);
}
return false;
}
xla::PrimitiveType SignedIntegralTypeForBitWidth(int64_t src_bitwidth) {
switch (src_bitwidth) {
case 2:
return xla::S2;
case 4:
return xla::S4;
case 8:
return xla::S8;
case 16:
return xla::S16;
case 32:
return xla::S32;
case 64:
return xla::S64;
default:
return xla::PRIMITIVE_TYPE_INVALID;
}
}
class PrimitiveTypeNameGenerator {
public:
PrimitiveTypeNameGenerator() {
for (int i = 0; i < PrimitiveType_ARRAYSIZE; i++) {
if (i == static_cast<int>(OPAQUE_TYPE)) {
lowercase_name_[i] = "opaque";
} else if (PrimitiveType_IsValid(i)) {
lowercase_name_[i] = absl::AsciiStrToLower(
PrimitiveType_Name(static_cast<PrimitiveType>(i)));
}
}
}
const std::string& LowercaseName(PrimitiveType t) {
CHECK_LT(t, PrimitiveType_ARRAYSIZE);
return lowercase_name_[static_cast<int>(t)];
}
private:
std::string lowercase_name_[PrimitiveType_ARRAYSIZE];
};
const std::string& LowercasePrimitiveTypeName(PrimitiveType s) {
static auto* gen = new PrimitiveTypeNameGenerator();
return gen->LowercaseName(s);
}
namespace {
const absl::flat_hash_map<std::string, PrimitiveType>&
GetPrimitiveTypeStringMap() {
static absl::flat_hash_map<std::string, PrimitiveType>* name_to_type = [] {
static auto* map = new absl::flat_hash_map<std::string, PrimitiveType>;
for (int i = 0; i < PrimitiveType_ARRAYSIZE; i++) {
if (PrimitiveType_IsValid(i) && i != PRIMITIVE_TYPE_INVALID) {
auto value = static_cast<PrimitiveType>(i);
(*map)[LowercasePrimitiveTypeName(value)] = value;
}
}
(*map)["opaque"] = OPAQUE_TYPE;
return map;
}();
return *name_to_type;
}
}
absl::StatusOr<PrimitiveType> StringToPrimitiveType(absl::string_view name) {
const auto& map = GetPrimitiveTypeStringMap();
auto found = map.find(name);
if (found == map.end()) {
return InvalidArgument("Invalid element type string: \"%s\".", name);
}
return found->second;
}
bool IsPrimitiveTypeName(absl::string_view name) {
const auto& map = GetPrimitiveTypeStringMap();
auto found = map.find(name);
return found != map.end();
}
}
} | #include "xla/primitive_util.h"
#include <numeric>
#include <string>
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(PrimitiveUtilTest, StringToPrimitiveType) {
auto expect_ok_and_equal = [](const std::string& str,
PrimitiveType expected) {
TF_ASSERT_OK_AND_ASSIGN(PrimitiveType actual,
primitive_util::StringToPrimitiveType(str));
EXPECT_EQ(expected, actual);
};
expect_ok_and_equal("f32", F32);
expect_ok_and_equal("tuple", TUPLE);
expect_ok_and_equal("pred", PRED);
expect_ok_and_equal("s32", S32);
EXPECT_IS_NOT_OK(primitive_util::StringToPrimitiveType("F32").status());
EXPECT_IS_NOT_OK(primitive_util::StringToPrimitiveType("Pred").status());
EXPECT_IS_NOT_OK(primitive_util::StringToPrimitiveType("preD").status());
}
TEST(PrimitiveUtilTest, FloatTypes) {
EXPECT_EQ(primitive_util::SignificandWidth(F32), 24);
EXPECT_EQ(primitive_util::SignificandWidth(BF16), 8);
EXPECT_EQ(primitive_util::ExponentWidth(F32), 8);
EXPECT_EQ(primitive_util::ExponentWidth(BF16), 8);
EXPECT_EQ(primitive_util::UnderflowExponent(F32), -125);
EXPECT_EQ(primitive_util::UnderflowExponent(BF16), -125);
EXPECT_EQ(primitive_util::OverflowExponent(F32), 128);
EXPECT_EQ(primitive_util::OverflowExponent(BF16), 128);
}
TEST(PrimitiveUtilTest, CastPreservesValues) {
bool expecteds[PrimitiveType_ARRAYSIZE][PrimitiveType_ARRAYSIZE];
expecteds[PRED][PRED] = true;
expecteds[PRED][S2] = true;
expecteds[PRED][S4] = true;
expecteds[PRED][S8] = true;
expecteds[PRED][S16] = true;
expecteds[PRED][S32] = true;
expecteds[PRED][S64] = true;
expecteds[PRED][U2] = true;
expecteds[PRED][U4] = true;
expecteds[PRED][U8] = true;
expecteds[PRED][U16] = true;
expecteds[PRED][U32] = true;
expecteds[PRED][U64] = true;
expecteds[PRED][F16] = true;
expecteds[PRED][F32] = true;
expecteds[PRED][F64] = true;
expecteds[PRED][C64] = true;
expecteds[PRED][BF16] = true;
expecteds[PRED][C128] = true;
expecteds[PRED][F8E5M2] = true;
expecteds[PRED][F8E4M3] = true;
expecteds[PRED][F8E4M3FN] = true;
expecteds[PRED][F8E4M3B11FNUZ] = true;
expecteds[PRED][F8E5M2FNUZ] = true;
expecteds[PRED][F8E4M3FNUZ] = true;
expecteds[PRED][F8E3M4] = true;
expecteds[S2][PRED] = false;
expecteds[S2][S2] = true;
expecteds[S2][S4] = true;
expecteds[S2][S8] = true;
expecteds[S2][S16] = true;
expecteds[S2][S32] = true;
expecteds[S2][S64] = true;
expecteds[S2][U2] = false;
expecteds[S2][U4] = false;
expecteds[S2][U8] = false;
expecteds[S2][U16] = false;
expecteds[S2][U32] = false;
expecteds[S2][U64] = false;
expecteds[S2][F16] = true;
expecteds[S2][F32] = true;
expecteds[S2][F64] = true;
expecteds[S2][C64] = true;
expecteds[S2][BF16] = true;
expecteds[S2][C128] = true;
expecteds[S2][F8E5M2] = true;
expecteds[S2][F8E4M3] = true;
expecteds[S2][F8E4M3FN] = true;
expecteds[S2][F8E4M3B11FNUZ] = true;
expecteds[S2][F8E5M2FNUZ] = true;
expecteds[S2][F8E4M3FNUZ] = true;
expecteds[S2][F8E3M4] = true;
expecteds[S4][PRED] = false;
expecteds[S4][S2] = false;
expecteds[S4][S4] = true;
expecteds[S4][S8] = true;
expecteds[S4][S16] = true;
expecteds[S4][S32] = true;
expecteds[S4][S64] = true;
expecteds[S4][U2] = false;
expecteds[S4][U4] = false;
expecteds[S4][U8] = false;
expecteds[S4][U16] = false;
expecteds[S4][U32] = false;
expecteds[S4][U64] = false;
expecteds[S4][F16] = true;
expecteds[S4][F32] = true;
expecteds[S4][F64] = true;
expecteds[S4][C64] = true;
expecteds[S4][BF16] = true;
expecteds[S4][C128] = true;
expecteds[S4][F8E5M2] = true;
expecteds[S4][F8E4M3] = true;
expecteds[S4][F8E4M3FN] = true;
expecteds[S4][F8E4M3B11FNUZ] = true;
expecteds[S4][F8E5M2FNUZ] = true;
expecteds[S4][F8E4M3FNUZ] = true;
expecteds[S4][F8E3M4] = true;
expecteds[S8][PRED] = false;
expecteds[S8][S2] = false;
expecteds[S8][S4] = false;
expecteds[S8][S8] = true;
expecteds[S8][S16] = true;
expecteds[S8][S32] = true;
expecteds[S8][S64] = true;
expecteds[S8][U2] = false;
expecteds[S8][U4] = false;
expecteds[S8][U8] = false;
expecteds[S8][U16] = false;
expecteds[S8][U32] = false;
expecteds[S8][U64] = false;
expecteds[S8][F16] = true;
expecteds[S8][F32] = true;
expecteds[S8][F64] = true;
expecteds[S8][C64] = true;
expecteds[S8][BF16] = true;
expecteds[S8][C128] = true;
expecteds[S8][F8E5M2] = false;
expecteds[S8][F8E4M3] = false;
expecteds[S8][F8E4M3FN] = false;
expecteds[S8][F8E4M3B11FNUZ] = false;
expecteds[S8][F8E5M2FNUZ] = false;
expecteds[S8][F8E4M3FNUZ] = false;
expecteds[S8][F8E3M4] = false;
expecteds[S16][PRED] = false;
expecteds[S16][S2] = false;
expecteds[S16][S4] = false;
expecteds[S16][S8] = false;
expecteds[S16][S16] = true;
expecteds[S16][S32] = true;
expecteds[S16][S64] = true;
expecteds[S16][U2] = false;
expecteds[S16][U4] = false;
expecteds[S16][U8] = false;
expecteds[S16][U16] = false;
expecteds[S16][U32] = false;
expecteds[S16][U64] = false;
expecteds[S16][F16] = false;
expecteds[S16][F32] = true;
expecteds[S16][F64] = true;
expecteds[S16][C64] = true;
expecteds[S16][BF16] = false;
expecteds[S16][C128] = true;
expecteds[S16][F8E5M2] = false;
expecteds[S16][F8E4M3] = false;
expecteds[S16][F8E4M3FN] = false;
expecteds[S16][F8E4M3B11FNUZ] = false;
expecteds[S16][F8E5M2FNUZ] = false;
expecteds[S16][F8E4M3FNUZ] = false;
expecteds[S16][F8E3M4] = false;
expecteds[S32][PRED] = false;
expecteds[S32][S2] = false;
expecteds[S32][S4] = false;
expecteds[S32][S8] = false;
expecteds[S32][S16] = false;
expecteds[S32][S32] = true;
expecteds[S32][S64] = true;
expecteds[S32][U2] = false;
expecteds[S32][U4] = false;
expecteds[S32][U8] = false;
expecteds[S32][U16] = false;
expecteds[S32][U32] = false;
expecteds[S32][U64] = false;
expecteds[S32][F16] = false;
expecteds[S32][F32] = false;
expecteds[S32][F64] = true;
expecteds[S32][C64] = false;
expecteds[S32][BF16] = false;
expecteds[S32][C128] = true;
expecteds[S32][F8E5M2] = false;
expecteds[S32][F8E4M3] = false;
expecteds[S32][F8E4M3FN] = false;
expecteds[S32][F8E4M3B11FNUZ] = false;
expecteds[S32][F8E5M2FNUZ] = false;
expecteds[S32][F8E4M3FNUZ] = false;
expecteds[S32][F8E3M4] = false;
expecteds[S64][PRED] = false;
expecteds[S64][S2] = false;
expecteds[S64][S4] = false;
expecteds[S64][S8] = false;
expecteds[S64][S16] = false;
expecteds[S64][S32] = false;
expecteds[S64][S64] = true;
expecteds[S64][U2] = false;
expecteds[S64][U4] = false;
expecteds[S64][U8] = false;
expecteds[S64][U16] = false;
expecteds[S64][U32] = false;
expecteds[S64][U64] = false;
expecteds[S64][F16] = false;
expecteds[S64][F32] = false;
expecteds[S64][F64] = false;
expecteds[S64][C64] = false;
expecteds[S64][BF16] = false;
expecteds[S64][C128] = false;
expecteds[S64][F8E5M2] = false;
expecteds[S64][F8E4M3] = false;
expecteds[S64][F8E4M3FN] = false;
expecteds[S64][F8E4M3B11FNUZ] = false;
expecteds[S64][F8E5M2FNUZ] = false;
expecteds[S64][F8E4M3FNUZ] = false;
expecteds[S64][F8E3M4] = false;
expecteds[U2][PRED] = false;
expecteds[U2][S2] = false;
expecteds[U2][S4] = true;
expecteds[U2][S8] = true;
expecteds[U2][S16] = true;
expecteds[U2][S32] = true;
expecteds[U2][S64] = true;
expecteds[U2][U2] = true;
expecteds[U2][U4] = true;
expecteds[U2][U8] = true;
expecteds[U2][U16] = true;
expecteds[U2][U32] = true;
expecteds[U2][U64] = true;
expecteds[U2][F16] = true;
expecteds[U2][F32] = true;
expecteds[U2][F64] = true;
expecteds[U2][C64] = true;
expecteds[U2][BF16] = true;
expecteds[U2][C128] = true;
expecteds[U2][BF16] = true;
expecteds[U2][C128] = true;
expecteds[U2][F8E5M2] = true;
expecteds[U2][F8E4M3] = true;
expecteds[U2][F8E4M3FN] = true;
expecteds[U2][F8E4M3B11FNUZ] = true;
expecteds[U2][F8E5M2FNUZ] = true;
expecteds[U2][F8E4M3FNUZ] = true;
expecteds[U2][F8E3M4] = true;
expecteds[U4][PRED] = false;
expecteds[U4][S2] = false;
expecteds[U4][S4] = false;
expecteds[U4][S8] = true;
expecteds[U4][S16] = true;
expecteds[U4][S32] = true;
expecteds[U4][S64] = true;
expecteds[U4][U2] = false;
expecteds[U4][U4] = true;
expecteds[U4][U8] = true;
expecteds[U4][U16] = true;
expecteds[U4][U32] = true;
expecteds[U4][U64] = true;
expecteds[U4][F16] = true;
expecteds[U4][F32] = true;
expecteds[U4][F64] = true;
expecteds[U4][C64] = true;
expecteds[U4][BF16] = true;
expecteds[U4][C128] = true;
expecteds[U4][BF16] = true;
expecteds[U4][C128] = true;
expecteds[U4][F8E5M2] = false;
expecteds[U4][F8E4M3] = true;
expecteds[U4][F8E4M3FN] = true;
expecteds[U4][F8E4M3B11FNUZ] = true;
expecteds[U4][F8E5M2FNUZ] = false;
expecteds[U4][F8E4M3FNUZ] = true;
expecteds[U4][F8E3M4] = true;
expecteds[U8][PRED] = false;
expecteds[U8][S2] = false;
expecteds[U8][S4] = false;
expecteds[U8][S8] = false;
expecteds[U8][S16] = true;
expecteds[U8][S32] = true;
expecteds[U8][S64] = true;
expecteds[U8][U2] = false;
expecteds[U8][U4] = false;
expecteds[U8][U8] = true;
expecteds[U8][U16] = true;
expecteds[U8][U32] = true;
expecteds[U8][U64] = true;
expecteds[U8][F16] = true;
expecteds[U8][F32] = true;
expecteds[U8][F64] = true;
expecteds[U8][C64] = true;
expecteds[U8][BF16] = true;
expecteds[U8][C128] = true;
expecteds[U8][BF16] = true;
expecteds[U8][C128] = true;
expecteds[U8][F8E5M2] = false;
expecteds[U8][F8E4M3] = false;
expecteds[U8][F8E4M3FN] = false;
expecteds[U8][F8E4M3B11FNUZ] = false;
expecteds[U8][F8E5M2FNUZ] = false;
expecteds[U8][F8E4M3FNUZ] = false;
expecteds[U8][F8E3M4] = false;
expecteds[U16][PRED] = false;
expecteds[U16][S2] = false;
expecteds[U16][S4] = false;
expecteds[U16][S8] = false;
expecteds[U16][S16] = false;
expecteds[U16][S32] = true;
expecteds[U16][S64] = true;
expecteds[U16][U2] = false;
expecteds[U16][U4] = false;
expecteds[U16][U8] = false;
expecteds[U16][U16] = true;
expecteds[U16][U32] = true;
expecteds[U16][U64] = true;
expecteds[U16][F16] = false;
expecteds[U16][F32] = true;
expecteds[U16][F64] = true;
expecteds[U16][C64] = true;
expecteds[U16][BF16] = false;
expecteds[U16][C128] = true;
expecteds[U16][F8E5M2] = false;
expecteds[U16][F8E4M3] = false;
expecteds[U16][F8E4M3FN] = false;
expecteds[U16][F8E4M3B11FNUZ] = false;
expecteds[U16][F8E5M2FNUZ] = false;
expecteds[U16][F8E4M3FNUZ] = false;
expecteds[U16][F8E3M4] = false;
expecteds[U32][PRED] = false;
expecteds[U32][S2] = false;
expecteds[U32][S4] = false;
expecteds[U32][S8] = false;
expecteds[U32][S16] = false;
expecteds[U32][S32] = false;
expecteds[U32][S64] = true;
expecteds[U32][U2] = false;
expecteds[U32][U4] = false;
expecteds[U32][U8] = false;
expecteds[U32][U16] = false;
expecteds[U32][U32] = true;
expecteds[U32][U64] = true;
expecteds[U32][F16] = false;
expecteds[U32][F32] = false;
expecteds[U32][F64] = true;
expecteds[U32][C64] = false;
expecteds[U32][BF16] = false;
expecteds[U32][C128] = true;
expecteds[U32][F8E5M2] = false;
expecteds[U32][F8E4M3] = false;
expecteds[U32][F8E4M3FN] = false;
expecteds[U32][F8E4M3B11FNUZ] = false;
expecteds[U32][F8E5M2FNUZ] = false;
expecteds[U32][F8E4M3FNUZ] = false;
expecteds[U32][F8E3M4] = false;
expecteds[U64][PRED] = false;
expecteds[U64][S2] = false;
expecteds[U64][S4] = false;
expecteds[U64][S8] = false;
expecteds[U64][S16] = false;
expecteds[U64][S32] = false;
expecteds[U64][S64] = false;
expecteds[U64][U2] = false;
expecteds[U64][U4] = false;
expecteds[U64][U8] = false;
expecteds[U64][U16] = false;
expecteds[U64][U32] = false;
expecteds[U64][U64] = true;
expecteds[U64][F16] = false;
expecteds[U64][F32] = false;
expecteds[U64][F64] = false;
expecteds[U64][C64] = false;
expecteds[U64][BF16] = false;
expecteds[U64][C128] = false;
expecteds[U64][F8E5M2] = false;
expecteds[U64][F8E4M3] = false;
expecteds[U64][F8E4M3FN] = false;
expecteds[U64][F8E4M3B11FNUZ] = false;
expecteds[U64][F8E5M2FNUZ] = false;
expecteds[U64][F8E4M3FNUZ] = false;
expecteds[U64][F8E3M4] = false;
expecteds[F16][PRED] = false;
expecteds[F16][S2] = false;
expecteds[F16][S4] = false;
expecteds[F16][S8] = false;
expecteds[F16][S16] = false;
expecteds[F16][S32] = false;
expecteds[F16][S64] = false;
expecteds[F16][U2] = false;
expecteds[F16][U4] = false;
expecteds[F16][U8] = false;
expecteds[F16][U16] = false;
expecteds[F16][U32] = false;
expecteds[F16][U64] = false;
expecteds[F16][F16] = true;
expecteds[F16][F32] = true;
expecteds[F16][F64] = true;
expecteds[F16][C64] = true;
expecteds[F16][BF16] = false;
expecteds[F16][C128] = true;
expecteds[F16][F8E5M2] = false;
expecteds[F16][F8E4M3] = false;
expecteds[F16][F8E4M3FN] = false;
expecteds[F16][F8E4M3B11FNUZ] = false;
expecteds[F16][F8E5M2FNUZ] = false;
expecteds[F16][F8E4M3FNUZ] = false;
expecteds[F16][F8E3M4] = false;
expecteds[F32][PRED] = false;
expecteds[F32][S2] = false;
expecteds[F32][S4] = false;
expecteds[F32][S8] = false;
expecteds[F32][S16] = false;
expecteds[F32][S32] = false;
expecteds[F32][S64] = false;
expecteds[F32][U2] = false;
expecteds[F32][U4] = false;
expecteds[F32][U8] = false;
expecteds[F32][U16] = false;
expecteds[F32][U32] = false;
expecteds[F32][U64] = false;
expecteds[F32][F16] = false;
expecteds[F32][F32] = true;
expecteds[F32][F64] = true;
expecteds[F32][C64] = true;
expecteds[F32][BF16] = false;
expecteds[F32][C128] = true;
expecteds[F32][F8E5M2] = false;
expecteds[F32][F8E4M3] = false;
expecteds[F32][F8E4M3FN] = false;
expecteds[F32][F8E4M3B11FNUZ] = false;
expecteds[F32][F8E5M2FNUZ] = false;
expecteds[F32][F8E4M3FNUZ] = false;
expecteds[F32][F8E3M4] = false;
expecteds[F64][PRED] = false;
expecteds[F64][S2] = false;
expecteds[F64][S4] = false;
expecteds[F64][S8] = false;
expecteds[F64][S16] = false;
expecteds[F64][S32] = false;
expecteds[F64][S64] = false;
expecteds[F64][U2] = false;
expecteds[F64][U4] = false;
expecteds[F64][U8] = false;
expecteds[F64][U16] = false;
expecteds[F64][U32] = false;
expecteds[F64][U64] = false;
expecteds[F64][F16] = false;
expecteds[F64][F32] = false;
expecteds[F64][F64] = true;
expecteds[F64][C64] = false;
expecteds[F64][BF16] = false;
expecteds[F64][C128] = true;
expecteds[F64][F8E5M2] = false;
expecteds[F64][F8E4M3] = false;
expecteds[F64][F8E4M3FN] = false;
expecteds[F64][F8E4M3B11FNUZ] = false;
expecteds[F64][F8E5M2FNUZ] = false;
expecteds[F64][F8E4M3FNUZ] = false;
expecteds[F64][F8E3M4] = false;
expecteds[C64][PRED] = false;
expecteds[C64][S2] = false;
expecteds[C64][S4] = false;
expecteds[C64][S8] = false;
expecteds[C64][S16] = false;
expecteds[C64][S32] = false;
expecteds[C64][S64] = false;
expecteds[C64][U2] = false;
expecteds[C64][U4] = false;
expecteds[C64][U8] = false;
expecteds[C64][U16] = false;
expecteds[C64][U32] = false;
expecteds[C64][U64] = false;
expecteds[C64][F16] = false;
expecteds[C64][F32] = false;
expecteds[C64][F64] = false;
expecteds[C64][C64] = true;
expecteds[C64][BF16] = false;
expecteds[C64][C128] = true;
expecteds[C64][F8E5M2] = false;
expecteds[C64][F8E4M3] = false;
expecteds[C64][F8E4M3FN] = false;
expecteds[C64][F8E4M3B11FNUZ] = false;
expecteds[C64][F8E5M2FNUZ] = false;
expecteds[C64][F8E4M3FNUZ] = false;
expecteds[C64][F8E3M4] = false;
expecteds[BF16][PRED] = false;
expecteds[BF16][S2] = false;
expecteds[BF16][S4] = false;
expecteds[BF16][S8] = false;
expecteds[BF16][S16] = false;
expecteds[BF16][S32] = false;
expecteds[BF16][S64] = false;
expecteds[BF16][U2] = false;
expecteds[BF16][U4] = false;
expecteds[BF16][U8] = false;
expecteds[BF16][U16] = false;
expecteds[BF16][U32] = false;
expecteds[BF16][U64] = false;
expecteds[BF16][F16] = false;
expecteds[BF16][F32] = true;
expecteds[BF16][F64] = true;
expecteds[BF16][C64] = true;
expecteds[BF16][BF16] = true;
expecteds[BF16][C128] = true;
expecteds[BF16][F8E5M2] = false;
expecteds[BF16][F8E4M3] = false;
expecteds[BF16][F8E4M3FN] = false;
expecteds[BF16][F8E4M3B11FNUZ] = false;
expecteds[BF16][F8E5M2FNUZ] = false;
expecteds[BF16][F8E4M3FNUZ] = false;
expecteds[BF16][F8E3M4] = false;
expecteds[C128][PRED] = false;
expecteds[C128][S2] = false;
expecteds[C128][S4] = false;
expecteds[C128][S8] = false;
expecteds[C128][S16] = false;
expecteds[C128][S32] = false;
expecteds[C128][S64] = false;
expecteds[C128][U2] = false;
expecteds[C128][U4] = false;
expecteds[C128][U8] = false;
expecteds[C128][U16] = false;
expecteds[C128][U32] = false;
expecteds[C128][U64] = false;
expecteds[C128][F16] = false;
expecteds[C128][F32] = false;
expecteds[C128][F64] = false;
expecteds[C128][C64] = false;
expecteds[C128][BF16] = false;
expecteds[C128][C128] = true;
expecteds[C128][F8E5M2] = false;
expecteds[C128][F8E4M3] = false;
expecteds[C128][F8E4M3FN] = false;
expecteds[C128][F8E4M3B11FNUZ] = false;
expecteds[C128][F8E5M2FNUZ] = false;
expecteds[C128][F8E4M3FNUZ] = false;
expecteds[C128][F8E3M4] = false;
expecteds[F8E5M2][PRED] = false;
expecteds[F8E5M2][S2] = false;
expecteds[F8E5M2][S4] = false;
expecteds[F8E5M2][S8] = false;
expecteds[F8E5M2][S16] = false;
expecteds[F8E5M2][S32] = false;
expecteds[F8E5M2][S64] = false;
expecteds[F8E5M2][U2] = false;
expecteds[F8E5M2][U4] = false;
expecteds[F8E5M2][U8] = false;
expecteds[F8E5M2][U16] = false;
expecteds[F8E5M2][U32] = false;
expecteds[F8E5M2][U64] = false;
expecteds[F8E5M2][F16] = true;
expecteds[F8E5M2][F32] = true;
expecteds[F8E5M2][F64] = true;
expecteds[F8E5M2][C64] = true;
expecteds[F8E5M2][BF16] = true;
expecteds[F8E5M2][C128] = true;
expecteds[F8E5M2][F8E5M2] = true;
expecteds[F8E5M2][F8E4M3] = false;
expecteds[F8E5M2][F8E4M3FN] = false;
expecteds[F8E5M2][F8E4M3B11FNUZ] = false;
expecteds[F8E5M2][F8E5M2FNUZ] = false;
expecteds[F8E5M2][F8E4M3FNUZ] = false;
expecteds[F8E5M2][F8E3M4] = false;
expecteds[F8E4M3][PRED] = false;
expecteds[F8E4M3][S2] = false;
expecteds[F8E4M3][S4] = false;
expecteds[F8E4M3][S8] = false;
expecteds[F8E4M3][S16] = false;
expecteds[F8E4M3][S32] = false;
expecteds[F8E4M3][S64] = false;
expecteds[F8E4M3][U2] = false;
expecteds[F8E4M3][U4] = false;
expecteds[F8E4M3][U8] = false;
expecteds[F8E4M3][U16] = false;
expecteds[F8E4M3][U32] = false;
expecteds[F8E4M3][U64] = false;
expecteds[F8E4M3][F16] = true;
expecteds[F8E4M3][F32] = true;
expecteds[F8E4M3][F64] = true;
expecteds[F8E4M3][C64] = true;
expecteds[F8E4M3][BF16] = true;
expecteds[F8E4M3][C128] = true;
expecteds[F8E4M3][F8E5M2] = false;
expecteds[F8E4M3][F8E5M2FNUZ] = false;
expecteds[F8E4M3][F8E4M3] = true;
expecteds[F8E4M3][F8E4M3FN] = false;
expecteds[F8E4M3][F8E4M3FNUZ] = false;
expecteds[F8E4M3][F8E4M3B11FNUZ] = false;
expecteds[F8E4M3][F8E3M4] = false;
expecteds[F8E4M3FN][PRED] = false;
expecteds[F8E4M3FN][S2] = false;
expecteds[F8E4M3FN][S4] = false;
expecteds[F8E4M3FN][S8] = false;
expecteds[F8E4M3FN][S16] = false;
expecteds[F8E4M3FN][S32] = false;
expecteds[F8E4M3FN][S64] = false;
expecteds[F8E4M3FN][U2] = false;
expecteds[F8E4M3FN][U4] = false;
expecteds[F8E4M3FN][U8] = false;
expecteds[F8E4M3FN][U16] = false;
expecteds[F8E4M3FN][U32] = false;
expecteds[F8E4M3FN][U64] = false;
expecteds[F8E4M3FN][F16] = true;
expecteds[F8E4M3FN][F32] = true;
expecteds[F8E4M3FN][F64] = true;
expecteds[F8E4M3FN][C64] = true;
expecteds[F8E4M3FN][BF16] = true;
expecteds[F8E4M3FN][C128] = true;
expecteds[F8E4M3FN][F8E5M2] = false;
expecteds[F8E4M3FN][F8E5M2FNUZ] = false;
expecteds[F8E4M3FN][F8E4M3] = false;
expecteds[F8E4M3FN][F8E4M3FN] = true;
expecteds[F8E4M3FN][F8E4M3FNUZ] = false;
expecteds[F8E4M3FN][F8E4M3B11FNUZ] = false;
expecteds[F8E4M3FN][F8E3M4] = false;
expecteds[F8E4M3B11FNUZ][PRED] = false;
expecteds[F8E4M3B11FNUZ][S2] = false;
expecteds[F8E4M3B11FNUZ][S4] = false;
expecteds[F8E4M3B11FNUZ][S8] = false;
expecteds[F8E4M3B11FNUZ][S16] = false;
expecteds[F8E4M3B11FNUZ][S32] = false;
expecteds[F8E4M3B11FNUZ][S64] = false;
expecteds[F8E4M3B11FNUZ][U2] = false;
expecteds[F8E4M3B11FNUZ][U4] = false;
expecteds[F8E4M3B11FNUZ][U8] = false;
expecteds[F8E4M3B11FNUZ][U16] = false;
expecteds[F8E4M3B11FNUZ][U32] = false;
expecteds[F8E4M3B11FNUZ][U64] = false;
expecteds[F8E4M3B11FNUZ][F16] = true;
expecteds[F8E4M3B11FNUZ][F32] = true;
expecteds[F8E4M3B11FNUZ][F64] = true;
expecteds[F8E4M3B11FNUZ][C64] = true;
expecteds[F8E4M3B11FNUZ][BF16] = true;
expecteds[F8E4M3B11FNUZ][C128] = true;
expecteds[F8E4M3B11FNUZ][F8E5M2] = false;
expecteds[F8E4M3B11FNUZ][F8E4M3] = false;
expecteds[F8E4M3B11FNUZ][F8E4M3FN] = false;
expecteds[F8E4M3B11FNUZ][F8E4M3B11FNUZ] = true;
expecteds[F8E4M3B11FNUZ][F8E4M3FNUZ] = false;
expecteds[F8E4M3B11FNUZ][F8E5M2FNUZ] = false;
expecteds[F8E4M3B11FNUZ][F8E3M4] = false;
expecteds[F8E5M2FNUZ][PRED] = false;
expecteds[F8E5M2FNUZ][S2] = false;
expecteds[F8E5M2FNUZ][S4] = false;
expecteds[F8E5M2FNUZ][S8] = false;
expecteds[F8E5M2FNUZ][S16] = false;
expecteds[F8E5M2FNUZ][S32] = false;
expecteds[F8E5M2FNUZ][S64] = false;
expecteds[F8E5M2FNUZ][U2] = false;
expecteds[F8E5M2FNUZ][U4] = false;
expecteds[F8E5M2FNUZ][U8] = false;
expecteds[F8E5M2FNUZ][U16] = false;
expecteds[F8E5M2FNUZ][U32] = false;
expecteds[F8E5M2FNUZ][U64] = false;
expecteds[F8E5M2FNUZ][F16] = true;
expecteds[F8E5M2FNUZ][F32] = true;
expecteds[F8E5M2FNUZ][F64] = true;
expecteds[F8E5M2FNUZ][C64] = true;
expecteds[F8E5M2FNUZ][BF16] = true;
expecteds[F8E5M2FNUZ][C128] = true;
expecteds[F8E5M2FNUZ][F8E5M2] = false;
expecteds[F8E5M2FNUZ][F8E4M3] = false;
expecteds[F8E5M2FNUZ][F8E4M3FN] = false;
expecteds[F8E5M2FNUZ][F8E4M3B11FNUZ] = false;
expecteds[F8E5M2FNUZ][F8E5M2FNUZ] = true;
expecteds[F8E5M2FNUZ][F8E4M3FNUZ] = false;
expecteds[F8E5M2FNUZ][F8E3M4] = false;
expecteds[F8E4M3FNUZ][PRED] = false;
expecteds[F8E4M3FNUZ][S2] = false;
expecteds[F8E4M3FNUZ][S4] = false;
expecteds[F8E4M3FNUZ][S8] = false;
expecteds[F8E4M3FNUZ][S16] = false;
expecteds[F8E4M3FNUZ][S32] = false;
expecteds[F8E4M3FNUZ][S64] = false;
expecteds[F8E4M3FNUZ][U2] = false;
expecteds[F8E4M3FNUZ][U4] = false;
expecteds[F8E4M3FNUZ][U8] = false;
expecteds[F8E4M3FNUZ][U16] = false;
expecteds[F8E4M3FNUZ][U32] = false;
expecteds[F8E4M3FNUZ][U64] = false;
expecteds[F8E4M3FNUZ][F16] = true;
expecteds[F8E4M3FNUZ][F32] = true;
expecteds[F8E4M3FNUZ][F64] = true;
expecteds[F8E4M3FNUZ][C64] = true;
expecteds[F8E4M3FNUZ][BF16] = true;
expecteds[F8E4M3FNUZ][C128] = true;
expecteds[F8E4M3FNUZ][F8E5M2] = false;
expecteds[F8E4M3FNUZ][F8E4M3] = false;
expecteds[F8E4M3FNUZ][F8E4M3FN] = false;
expecteds[F8E4M3FNUZ][F8E4M3B11FNUZ] = false;
expecteds[F8E4M3FNUZ][F8E5M2FNUZ] = false;
expecteds[F8E4M3FNUZ][F8E4M3FNUZ] = true;
expecteds[F8E4M3FNUZ][F8E3M4] = false;
expecteds[F8E3M4][PRED] = false;
expecteds[F8E3M4][S2] = false;
expecteds[F8E3M4][S4] = false;
expecteds[F8E3M4][S8] = false;
expecteds[F8E3M4][S16] = false;
expecteds[F8E3M4][S32] = false;
expecteds[F8E3M4][S64] = false;
expecteds[F8E3M4][U2] = false;
expecteds[F8E3M4][U4] = false;
expecteds[F8E3M4][U8] = false;
expecteds[F8E3M4][U16] = false;
expecteds[F8E3M4][U32] = false;
expecteds[F8E3M4][U64] = false;
expecteds[F8E3M4][F16] = true;
expecteds[F8E3M4][F32] = true;
expecteds[F8E3M4][F64] = true;
expecteds[F8E3M4][C64] = true;
expecteds[F8E3M4][BF16] = true;
expecteds[F8E3M4][C128] = true;
expecteds[F8E3M4][F8E5M2] = false;
expecteds[F8E3M4][F8E5M2FNUZ] = false;
expecteds[F8E3M4][F8E4M3] = false;
expecteds[F8E3M4][F8E4M3FN] = false;
expecteds[F8E3M4][F8E4M3FNUZ] = false;
expecteds[F8E3M4][F8E4M3B11FNUZ] = false;
expecteds[F8E3M4][F8E3M4] = true;
for (int from_type_int = PrimitiveType_MIN;
from_type_int < PrimitiveType_ARRAYSIZE; ++from_type_int) {
auto from_type = static_cast<PrimitiveType>(from_type_int);
if (!primitive_util::IsArrayType(from_type)) {
continue;
}
for (int to_type_int = PrimitiveType_MIN;
to_type_int < PrimitiveType_ARRAYSIZE; ++to_type_int) {
auto to_type = static_cast<PrimitiveType>(to_type_int);
if (!primitive_util::IsArrayType(to_type)) {
continue;
}
bool expected = expecteds[from_type][to_type];
bool actual = primitive_util::CastPreservesValues(from_type, to_type);
EXPECT_EQ(expected, actual)
<< primitive_util::LowercasePrimitiveTypeName(from_type) << " -> "
<< primitive_util::LowercasePrimitiveTypeName(to_type);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/primitive_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/primitive_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49a4e579-c145-4cc7-ada9-64353d57ef67 | cpp | tensorflow/tensorflow | xprof_gpu_cost_analysis | tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.cc | tensorflow/core/profiler/utils/xprof_gpu_cost_analysis_test.cc | #include "tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
namespace tensorflow {
namespace profiler {
namespace {
std::vector<uint32_t> GetInputBitwidths(const xla::HloInstruction& hlo) {
std::vector<uint32_t> input_bitwidths;
for (const auto& operand : hlo.operands()) {
switch (operand->shape().element_type()) {
case xla::PRIMITIVE_TYPE_INVALID:
case xla::TUPLE:
case xla::OPAQUE_TYPE:
case xla::TOKEN:
break;
default:
input_bitwidths.push_back(
xla::primitive_util::BitWidth(operand->shape().element_type()));
}
}
return input_bitwidths;
}
}
absl::Status XProfGpuCostAnalysis::Postprocess(const xla::HloInstruction* hlo) {
if (hlo == nullptr) {
return absl::OkStatus();
}
uint32_t flop_rate_adjustment = 1;
float model_flops = current_properties_[kFlopsKey];
std::vector<uint32_t> input_bitwidths = GetInputBitwidths(*hlo);
if (!input_bitwidths.empty()) {
int max_input_bitwidth =
*std::max_element(input_bitwidths.begin(), input_bitwidths.end());
if (model_flops) {
switch (max_input_bitwidth) {
case 8:
flop_rate_adjustment = 2;
break;
case 4:
flop_rate_adjustment = 4;
break;
}
}
}
current_properties_[kDeviceFlopsAdjustment] =
model_flops - model_flops / flop_rate_adjustment;
return xla::gpu::GpuHloCostAnalysis::Postprocess(hlo);
}
std::unique_ptr<xla::HloCostAnalysis>
XProfGpuCostAnalysis::CreateNestedCostAnalysis() {
return std::make_unique<XProfGpuCostAnalysis>(options_);
}
int64_t XProfGpuCostAnalysis::GetDeviceFlopsAdjustment(
const xla::HloInstruction& hlo) {
return GetPropertyForHlo(hlo, kDeviceFlopsAdjustment, hlo_properties_);
}
}
} | #include "tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace profiler {
class XprofGpuHloCostAnalysisTest : public xla::HloTestBase {
xla::HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const xla::Shape& shape) {
constexpr int64_t kPointerSize = 8;
return xla::ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
xla::HloCostAnalysis::Options options_{
ShapeSizeBytesFunction(),
{},
{},
true};
XProfGpuCostAnalysis analysis_{options_};
XprofGpuHloCostAnalysisTest() : xla::HloTestBase() {}
};
TEST_F(XprofGpuHloCostAnalysisTest, Fp16GemmNoAdjustment) {
absl::string_view hlo_string = R"(
HloModule r
ENTRY e {
arg0 = f16[65536,32800] parameter(0)
arg1 = f16[32800,32] parameter(1)
gemm = (f16[65536,32], s8[0]) custom-call(arg0, arg1),
custom_call_target="__cublas$gemm",
backend_config="{
\"gemm_backend_config\": {
\"alpha_real\":1,
\"beta\":0,
\"dot_dimension_numbers\":{
\"lhs_contracting_dimensions\":[\"1\"],
\"rhs_contracting_dimensions\":[\"0\"],
\"lhs_batch_dimensions\":[],
\"rhs_batch_dimensions\":[]
},
\"alpha_imag\":0,
\"precision_config\":{
\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]
},
\"epilogue\":\"DEFAULT\"
}
}"
ROOT get-tuple-element = f16[65536,32]
get-tuple-element((f16[65536,32], s8[0]) gemm), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
xla::HloComputation* comp = module->entry_computation();
const xla::HloInstruction* fp16gemm = comp->GetInstructionWithName("gemm");
int64_t gold_flops = 65536LL * 32800 * 32 * 2;
EXPECT_EQ(analysis_.flop_count(*fp16gemm), gold_flops);
EXPECT_EQ(analysis_.GetDeviceFlopsAdjustment(*fp16gemm), 0);
}
TEST_F(XprofGpuHloCostAnalysisTest, S8GemmAdjustment) {
absl::string_view hlo_string = R"(
HloModule r
ENTRY e {
arg0 = s8[65536,32800] parameter(0)
arg1 = s8[32800,32] parameter(1)
gemm = (s32[65536,32], s8[0]) custom-call(arg0, arg1),
custom_call_target="__cublas$gemm",
backend_config="{
\"gemm_backend_config\": {
\"alpha_real\":1,
\"beta\":0,
\"dot_dimension_numbers\":{
\"lhs_contracting_dimensions\":[\"1\"],
\"rhs_contracting_dimensions\":[\"0\"],
\"lhs_batch_dimensions\":[],
\"rhs_batch_dimensions\":[]
},
\"alpha_imag\":0,
\"precision_config\":{
\"operand_precision\":[\"DEFAULT\",\"DEFAULT\"]
},
\"epilogue\":\"DEFAULT\"
}
}"
ROOT get-tuple-element = s32[65536,32]
get-tuple-element((s32[65536,32], s8[0]) gemm), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
xla::HloComputation* comp = module->entry_computation();
const xla::HloInstruction* s8gemm = comp->GetInstructionWithName("gemm");
int64_t gold_flops = 65536LL * 32800 * 32 * 2;
EXPECT_EQ(analysis_.flop_count(*s8gemm), gold_flops);
EXPECT_EQ(analysis_.GetDeviceFlopsAdjustment(*s8gemm), gold_flops / 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/xprof_gpu_cost_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/xprof_gpu_cost_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e3b1365-8dff-460a-9cc1-436efbb0f2a9 | cpp | tensorflow/tensorflow | path | third_party/xla/third_party/tsl/tsl/platform/path.cc | third_party/xla/third_party/tsl/tsl/platform/path_test.cc | #include "tsl/platform/path.h"
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#if defined(PLATFORM_WINDOWS)
#include <windows.h>
#else
#include <unistd.h>
#endif
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace io {
namespace internal {
namespace {
const char kPathSep[] = "/";
}
string JoinPathImpl(std::initializer_list<absl::string_view> paths) {
string result;
for (absl::string_view path : paths) {
if (path.empty()) continue;
if (result.empty()) {
result = string(path);
continue;
}
if (IsAbsolutePath(path)) path = path.substr(1);
if (result[result.size() - 1] == kPathSep[0]) {
strings::StrAppend(&result, path);
} else {
strings::StrAppend(&result, kPathSep, path);
}
}
return result;
}
std::pair<absl::string_view, absl::string_view> SplitPath(
absl::string_view uri) {
absl::string_view scheme, host, path;
ParseURI(uri, &scheme, &host, &path);
auto pos = path.rfind('/');
#ifdef PLATFORM_WINDOWS
if (pos == StringPiece::npos) pos = path.rfind('\\');
#endif
if (pos == absl::string_view::npos)
return std::make_pair(
absl::string_view(uri.data(), host.end() - uri.begin()), path);
if (pos == 0)
return std::make_pair(
absl::string_view(uri.data(), path.begin() + 1 - uri.begin()),
absl::string_view(path.data() + 1, path.size() - 1));
return std::make_pair(
absl::string_view(uri.data(), path.begin() + pos - uri.begin()),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
std::pair<absl::string_view, absl::string_view> SplitBasename(
absl::string_view path) {
path = Basename(path);
auto pos = path.rfind('.');
if (pos == absl::string_view::npos)
return std::make_pair(path,
absl::string_view(path.data() + path.size(), 0));
return std::make_pair(
absl::string_view(path.data(), pos),
absl::string_view(path.data() + pos + 1, path.size() - (pos + 1)));
}
}
bool IsAbsolutePath(absl::string_view path) {
return !path.empty() && path[0] == '/';
}
absl::string_view Dirname(absl::string_view path) {
return internal::SplitPath(path).first;
}
absl::string_view Basename(absl::string_view path) {
return internal::SplitPath(path).second;
}
absl::string_view Extension(absl::string_view path) {
return internal::SplitBasename(path).second;
}
absl::string_view BasenamePrefix(absl::string_view path) {
return internal::SplitBasename(path).first;
}
string CleanPath(absl::string_view unclean_path) {
string path(unclean_path);
const char* src = path.c_str();
string::iterator dst = path.begin();
const bool is_absolute_path = *src == '/';
if (is_absolute_path) {
*dst++ = *src++;
while (*src == '/') ++src;
}
string::const_iterator backtrack_limit = dst;
while (*src) {
bool parsed = false;
if (src[0] == '.') {
if (src[1] == '/' || !src[1]) {
if (*++src) {
++src;
}
parsed = true;
} else if (src[1] == '.' && (src[2] == '/' || !src[2])) {
src += 2;
if (dst != backtrack_limit) {
for (--dst; dst != backtrack_limit && dst[-1] != '/'; --dst) {
}
} else if (!is_absolute_path) {
src -= 2;
*dst++ = *src++;
*dst++ = *src++;
if (*src) {
*dst++ = *src;
}
backtrack_limit = dst;
}
if (*src) {
++src;
}
parsed = true;
}
}
if (!parsed) {
while (*src && *src != '/') {
*dst++ = *src++;
}
if (*src) {
*dst++ = *src++;
}
}
while (*src == '/') {
++src;
}
}
string::difference_type path_length = dst - path.begin();
if (path_length != 0) {
if (path_length > 1 && path[path_length - 1] == '/') {
--path_length;
}
path.resize(path_length);
} else {
path.assign(1, '.');
}
return path;
}
void ParseURI(absl::string_view uri, absl::string_view* scheme,
absl::string_view* host, absl::string_view* path) {
if (!strings::Scanner(uri)
.One(strings::Scanner::LETTER)
.Many(strings::Scanner::LETTER_DIGIT_DOT)
.StopCapture()
.OneLiteral(":
.GetResult(&uri, scheme)) {
*scheme = absl::string_view(uri.data(), 0);
*host = absl::string_view(uri.data(), 0);
*path = uri;
return;
}
if (!strings::Scanner(uri).ScanUntil('/').GetResult(&uri, host)) {
*host = uri;
*path = absl::string_view();
return;
}
*path = uri;
}
string CreateURI(absl::string_view scheme, absl::string_view host,
absl::string_view path) {
if (scheme.empty()) {
return string(path);
}
return strings::StrCat(scheme, ":
}
int64_t UniqueId() {
static mutex mu(LINKER_INITIALIZED);
static int64_t id = 0;
mutex_lock l(mu);
return ++id;
}
string CommonPathPrefix(absl::Span<const string> paths) {
if (paths.empty()) return "";
size_t min_filename_size =
absl::c_min_element(paths, [](const string& a, const string& b) {
return a.size() < b.size();
})->size();
if (min_filename_size == 0) return "";
size_t common_prefix_size = [&] {
for (size_t prefix_size = 0; prefix_size < min_filename_size;
prefix_size++) {
char c = paths[0][prefix_size];
for (int f = 1; f < paths.size(); f++) {
if (paths[f][prefix_size] != c) {
return prefix_size;
}
}
}
return min_filename_size;
}();
size_t rpos = absl::string_view(paths[0])
.substr(0, common_prefix_size)
.rfind(internal::kPathSep);
return rpos == std::string::npos
? ""
: std::string(absl::string_view(paths[0]).substr(0, rpos + 1));
}
string GetTempFilename(const string& extension) {
#if defined(__ANDROID__)
LOG(FATAL) << "GetTempFilename is not implemented in this platform.";
#elif defined(PLATFORM_WINDOWS)
char temp_dir[_MAX_PATH];
DWORD retval;
retval = GetTempPath(_MAX_PATH, temp_dir);
if (retval > _MAX_PATH || retval == 0) {
LOG(FATAL) << "Cannot get the directory for temporary files.";
}
char temp_file_name[_MAX_PATH];
retval = GetTempFileName(temp_dir, "", UniqueId(), temp_file_name);
if (retval > _MAX_PATH || retval == 0) {
LOG(FATAL) << "Cannot get a temporary file in: " << temp_dir;
}
string full_tmp_file_name(temp_file_name);
full_tmp_file_name.append(extension);
return full_tmp_file_name;
#else
for (const char* dir : std::vector<const char*>(
{getenv("TEST_TMPDIR"), getenv("TMPDIR"), getenv("TMP"), "/tmp"})) {
if (!dir || !dir[0]) {
continue;
}
struct stat statbuf;
if (!stat(dir, &statbuf) && S_ISDIR(statbuf.st_mode)) {
string tmp_filepath;
int fd;
if (extension.length()) {
tmp_filepath = io::JoinPath(
dir, strings::StrCat("tmp_file_tensorflow_", UniqueId(), "_XXXXXX.",
extension));
fd = mkstemps(&tmp_filepath[0], extension.length() + 1);
} else {
tmp_filepath = io::JoinPath(
dir,
strings::StrCat("tmp_file_tensorflow_", UniqueId(), "_XXXXXX"));
fd = mkstemp(&tmp_filepath[0]);
}
if (fd < 0) {
LOG(FATAL) << "Failed to create temp file.";
} else {
if (close(fd) < 0) {
LOG(ERROR) << "close() failed: " << strerror(errno);
}
return tmp_filepath;
}
}
}
LOG(FATAL) << "No temp directory found.";
std::abort();
#endif
}
namespace {
bool StartsWithSegment(absl::string_view path, absl::string_view segment) {
return absl::StartsWith(path, segment) &&
(path.size() == segment.size() ||
path.at(segment.size()) == internal::kPathSep[0]);
}
}
bool GetTestWorkspaceDir(string* dir) {
const char* srcdir = getenv("TEST_SRCDIR");
if (srcdir == nullptr) {
return false;
}
const char* workspace = getenv("TEST_WORKSPACE");
if (workspace == nullptr) {
return false;
}
if (dir != nullptr) {
*dir = tsl::io::JoinPath(srcdir, workspace);
}
return true;
}
bool GetTestUndeclaredOutputsDir(string* dir) {
const char* outputs_dir = getenv("TEST_UNDECLARED_OUTPUTS_DIR");
if (outputs_dir == nullptr) {
return false;
}
if (dir != nullptr) {
*dir = outputs_dir;
}
return true;
}
bool ResolveTestPrefixes(absl::string_view path, string& resolved_path) {
constexpr absl::string_view kTestWorkspaceSegment = "TEST_WORKSPACE";
constexpr absl::string_view kOutputDirSegment = "TEST_UNDECLARED_OUTPUTS_DIR";
if (StartsWithSegment(path, kTestWorkspaceSegment)) {
if (!GetTestWorkspaceDir(&resolved_path)) {
return false;
}
resolved_path += path.substr(kTestWorkspaceSegment.size());
return true;
} else if (StartsWithSegment(path, kOutputDirSegment)) {
if (!GetTestUndeclaredOutputsDir(&resolved_path)) {
return false;
}
resolved_path += path.substr(kOutputDirSegment.size());
return true;
} else {
resolved_path = path;
return true;
}
}
[[maybe_unused]] std::string& AppendDotExeIfWindows(std::string& path) {
#ifdef PLATFORM_WINDOWS
path.append(".exe");
#endif
return path;
}
}
} | #include "tsl/platform/path.h"
#include <string>
#include "tsl/platform/env.h"
#include "tsl/platform/stringpiece.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace io {
TEST(PathTest, JoinPath) {
EXPECT_EQ("/foo/bar", JoinPath("/foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "bar"));
EXPECT_EQ("foo/bar", JoinPath("foo", "/bar"));
EXPECT_EQ("/foo/bar", JoinPath("/foo", "/bar"));
EXPECT_EQ("/bar", JoinPath("", "/bar"));
EXPECT_EQ("bar", JoinPath("", "bar"));
EXPECT_EQ("/foo", JoinPath("/foo", ""));
EXPECT_EQ("/foo/bar/baz/blah/blink/biz",
JoinPath("/foo/bar/baz/", "/blah/blink/biz"));
EXPECT_EQ("/foo/bar/baz/blah", JoinPath("/foo", "bar", "baz", "blah"));
}
TEST(PathTest, IsAbsolutePath) {
EXPECT_FALSE(IsAbsolutePath(""));
EXPECT_FALSE(IsAbsolutePath("../foo"));
EXPECT_FALSE(IsAbsolutePath("foo"));
EXPECT_FALSE(IsAbsolutePath("./foo"));
EXPECT_FALSE(IsAbsolutePath("foo/bar/baz/"));
EXPECT_TRUE(IsAbsolutePath("/foo"));
EXPECT_TRUE(IsAbsolutePath("/foo/bar/../baz"));
}
TEST(PathTest, Dirname) {
EXPECT_EQ("hdfs:
Dirname("hdfs:
EXPECT_EQ("/hello", Dirname("/hello/"));
EXPECT_EQ("/", Dirname("/hello"));
EXPECT_EQ("hello", Dirname("hello/world"));
EXPECT_EQ("hello", Dirname("hello/"));
EXPECT_EQ("", Dirname("world"));
EXPECT_EQ("/", Dirname("/"));
EXPECT_EQ("", Dirname(""));
}
TEST(PathTest, Basename) {
EXPECT_EQ("", Basename("/hello/"));
EXPECT_EQ("hello", Basename("/hello"));
EXPECT_EQ("world", Basename("hello/world"));
EXPECT_EQ("", Basename("hello/"));
EXPECT_EQ("world", Basename("world"));
EXPECT_EQ("", Basename("/"));
EXPECT_EQ("", Basename(""));
}
TEST(PathTest, Extension) {
EXPECT_EQ("gif", Extension("foo.gif"));
EXPECT_EQ("", Extension("foo."));
EXPECT_EQ("", Extension(""));
EXPECT_EQ("", Extension("/"));
EXPECT_EQ("", Extension("foo"));
EXPECT_EQ("", Extension("foo/"));
EXPECT_EQ("gif", Extension("/a/path/to/foo.gif"));
EXPECT_EQ("html", Extension("/a/path.bar/to/foo.html"));
EXPECT_EQ("", Extension("/a/path.bar/to/foo"));
EXPECT_EQ("baz", Extension("/a/path.bar/to/foo.bar.baz"));
}
TEST(PathTest, CleanPath) {
EXPECT_EQ(".", CleanPath(""));
EXPECT_EQ("x", CleanPath("x"));
EXPECT_EQ("/a/b/c/d", CleanPath("/a/b/c/d"));
EXPECT_EQ("/a/b/c/dtrue);
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
EXPECT_TRUE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, "/repo/src/my/workspace");
EXPECT_TRUE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_SRCDIR");
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::setenv("TEST_SRCDIR", "/repo/src", true);
tsl::unsetenv("TEST_WORKSPACE");
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_SRCDIR");
tsl::unsetenv("TEST_WORKSPACE");
EXPECT_FALSE(GetTestWorkspaceDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestWorkspaceDir(nullptr));
}
TEST(PathTest, GetTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string dir;
dir = kOriginalValue;
tsl::setenv("TEST_UNDECLARED_OUTPUTS_DIR", "/test/outputs",
true);
EXPECT_TRUE(GetTestUndeclaredOutputsDir(&dir));
EXPECT_EQ(dir, "/test/outputs");
EXPECT_TRUE(GetTestUndeclaredOutputsDir(nullptr));
dir = kOriginalValue;
tsl::unsetenv("TEST_UNDECLARED_OUTPUTS_DIR");
EXPECT_FALSE(GetTestUndeclaredOutputsDir(&dir));
EXPECT_EQ(dir, kOriginalValue);
EXPECT_FALSE(GetTestUndeclaredOutputsDir(nullptr));
}
TEST(PathTest, ResolveTestPrefixesKeepsThePathUnchanged) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("", resolved_path));
EXPECT_EQ(resolved_path, "");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/", resolved_path));
EXPECT_EQ(resolved_path, "/");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("alpha/beta", resolved_path));
EXPECT_EQ(resolved_path, "alpha/beta");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/alpha/beta", resolved_path));
EXPECT_EQ(resolved_path, "/alpha/beta");
}
TEST(PathTest, ResolveTestPrefixesCanResolveTestWorkspace) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::setenv("TEST_SRCDIR", "/repo/src", true);
tsl::setenv("TEST_WORKSPACE", "my/workspace", true);
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE/", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace/");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACE/a/b", resolved_path));
EXPECT_EQ(resolved_path, "/repo/src/my/workspace/a/b");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("TEST_WORKSPACEE", resolved_path));
EXPECT_EQ(resolved_path, "TEST_WORKSPACEE");
resolved_path = kOriginalValue;
EXPECT_TRUE(ResolveTestPrefixes("/TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, "/TEST_WORKSPACE");
}
TEST(PathTest, ResolveTestPrefixesCannotResolveTestWorkspace) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::unsetenv("TEST_SRCDIR");
tsl::unsetenv("TEST_WORKSPACE");
resolved_path = kOriginalValue;
EXPECT_FALSE(ResolveTestPrefixes("TEST_WORKSPACE", resolved_path));
EXPECT_EQ(resolved_path, kOriginalValue);
}
TEST(PathTest, ResolveTestPrefixesCanResolveTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::setenv("TEST_UNDECLARED_OUTPUTS_DIR", "/test/outputs",
true);
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR/", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs/");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR/a/b", resolved_path));
EXPECT_EQ(resolved_path, "/test/outputs/a/b");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIRR", resolved_path));
EXPECT_EQ(resolved_path, "TEST_UNDECLARED_OUTPUTS_DIRR");
resolved_path = kOriginalValue;
EXPECT_TRUE(
ResolveTestPrefixes("/TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, "/TEST_UNDECLARED_OUTPUTS_DIR");
}
TEST(PathTest, ResolveTestPrefixesCannotResolveTestUndeclaredOutputsDir) {
constexpr absl::string_view kOriginalValue = "original value";
std::string resolved_path;
tsl::unsetenv("TEST_UNDECLARED_OUTPUTS_DIR");
resolved_path = kOriginalValue;
EXPECT_FALSE(
ResolveTestPrefixes("TEST_UNDECLARED_OUTPUTS_DIR", resolved_path));
EXPECT_EQ(resolved_path, kOriginalValue);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/path.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/path_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9906f79c-113e-4101-9d0a-a3ebc20508da | cpp | tensorflow/tensorflow | sparse_to_dense_op | tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc | tensorflow/core/kernels/sparse_to_dense_op_test.cc | #include <vector>
#include "tensorflow/compiler/tf2xla/lib/scatter.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
class SparseToDenseOp : public XlaOpKernel {
public:
explicit SparseToDenseOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
const TensorShape indices_shape = context->InputShape(0);
OP_REQUIRES(context, indices_shape.dims() <= 2,
errors::InvalidArgument(
"sparse_indices should be a scalar, vector, or matrix, "
"got shape ",
indices_shape.DebugString()));
const int64_t num_elems =
indices_shape.dims() > 0 ? indices_shape.dim_size(0) : 1;
const int64_t num_dims =
indices_shape.dims() > 1 ? indices_shape.dim_size(1) : 1;
TensorShape output_shape;
OP_REQUIRES_OK(context,
context->ConstantInputAsShape(
1, &output_shape, xla::ValueInferenceMode::kUpperBound));
OP_REQUIRES(context, output_shape.dims() == num_dims,
errors::InvalidArgument(
"output_shape has incorrect number of elements: ",
output_shape.num_elements(), " should be: ", num_dims));
const TensorShape sparse_values_shape = context->InputShape(2);
const int64_t num_values = sparse_values_shape.num_elements();
OP_REQUIRES(
context,
sparse_values_shape.dims() == 0 ||
(sparse_values_shape.dims() == 1 && num_values == num_elems),
errors::InvalidArgument("sparse_values has incorrect shape ",
sparse_values_shape.DebugString(),
", should be [] or [", num_elems, "]"));
const TensorShape default_value_shape = context->InputShape(3);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(default_value_shape),
errors::InvalidArgument("default_value should be a scalar."));
xla::XlaOp indices = context->Input(0);
xla::XlaOp sparse_values = context->Input(2);
xla::XlaOp default_value = context->Input(3);
if (sparse_values_shape.dims() == 0 && num_elems != 1) {
sparse_values = Broadcast(sparse_values, {num_elems});
}
xla::XlaBuilder* builder = context->builder();
auto buffer = Broadcast(default_value, output_shape.dim_sizes());
std::vector<bool> dynamic_dims;
OP_REQUIRES_OK(
context, context->ResolveInputDynamismIntoPredVector(1, &dynamic_dims));
for (int64_t i = 0; i < dynamic_dims.size(); ++i) {
if (dynamic_dims[i]) {
auto dynamic_dim_size =
xla::Slice(context->Input(1), {i}, {i + 1}, {1});
dynamic_dim_size = xla::Reshape(dynamic_dim_size, {});
dynamic_dim_size = xla::ConvertElementType(dynamic_dim_size, xla::S32);
buffer = xla::SetDimensionSize(buffer, dynamic_dim_size, i);
}
}
auto result = XlaScatter(buffer, sparse_values, indices,
indices_shape.dims() > 1,
false,
{}, builder);
context->SetOutput(0, builder->ReportErrorOrReturn(result));
}
};
REGISTER_XLA_OP(Name("SparseToDense").CompileTimeConstantInput("output_shape"),
SparseToDenseOp);
}
} | #include <functional>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
class SparseToDenseTest : public OpsTestBase {
protected:
void MakeOp(int dim, DataType index_type, DataType value_type) {
TF_ASSERT_OK(NodeDefBuilder("sparsetodense", "SparseToDense")
.Input(FakeInput(index_type))
.Input(FakeInput(index_type))
.Input(FakeInput(value_type))
.Input(FakeInput(value_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SparseToDenseTest, OneD_OneValue) {
MakeOp(1, DT_INT32, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3}), {1, 3, 4});
AddInputFromArray<int32>(TensorShape({1}), {5});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {-2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {5});
test::FillValues<float>(&expected, {-2, 2, -2, 2, 2});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseToDenseTest, OneD_OneValue_int64_double) {
MakeOp(1, DT_INT64, DT_DOUBLE);
AddInputFromArray<int64_t>(TensorShape({3}), {1, 3, 4});
AddInputFromArray<int64_t>(TensorShape({1}), {5});
AddInputFromArray<double>(TensorShape({}), {2});
AddInputFromArray<double>(TensorShape({}), {-2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_DOUBLE, {5});
test::FillValues<double>(&expected, {-2, 2, -2, 2, 2});
test::ExpectTensorEqual<double>(expected, *GetOutput(0));
}
TEST_F(SparseToDenseTest, OneD_MultValues) {
MakeOp(1, DT_INT32, DT_FLOAT);
AddInputFromArray<int32>({3}, {1, 3, 4});
AddInputFromArray<int32>({1}, {5});
AddInputFromArray<float>({3}, {3, 4, 5});
AddInputFromArray<float>({}, {-2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {5});
test::FillValues<float>(&expected, {-2, 3, -2, 4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseToDenseTest, TwoD_OneValue) {
MakeOp(2, DT_INT32, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3, 2}), {0, 1, 0, 2, 2, 3});
AddInputFromArray<int32>(TensorShape({2}), {3, 4});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {-2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4});
expected.flat<float>().setConstant(-2);
expected.tensor<float, 2>()(0, 1) = 2;
expected.tensor<float, 2>()(0, 2) = 2;
expected.tensor<float, 2>()(2, 3) = 2;
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseToDenseTest, TwoD_MultValues) {
MakeOp(2, DT_INT32, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3, 2}), {0, 1, 0, 2, 2, 3});
AddInputFromArray<int32>(TensorShape({2}), {3, 4});
AddInputFromArray<float>(TensorShape({3}), {3, 4, 5});
AddInputFromArray<float>(TensorShape({}), {-2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4});
expected.flat<float>().setConstant(-2);
expected.tensor<float, 2>()(0, 1) = 3;
expected.tensor<float, 2>()(0, 2) = 4;
expected.tensor<float, 2>()(2, 3) = 5;
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseToDenseTest, ThreeD_OneValue) {
MakeOp(3, DT_INT32, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3, 3}), {0, 1, 1, 0, 2, 0, 2, 3, 1});
AddInputFromArray<int32>(TensorShape({3}), {3, 4, 2});
AddInputFromArray<float>(TensorShape({}), {2});
AddInputFromArray<float>(TensorShape({}), {-2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4, 2});
expected.flat<float>().setConstant(-2);
expected.tensor<float, 3>()(0, 1, 1) = 2;
expected.tensor<float, 3>()(0, 2, 0) = 2;
expected.tensor<float, 3>()(2, 3, 1) = 2;
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(SparseToDenseTest, ThreeD_MultValues) {
MakeOp(3, DT_INT32, DT_FLOAT);
AddInputFromArray<int32>(TensorShape({3, 3}), {0, 1, 1, 0, 2, 0, 2, 3, 1});
AddInputFromArray<int32>(TensorShape({3}), {3, 4, 2});
AddInputFromArray<float>(TensorShape({3}), {3, 4, 5});
AddInputFromArray<float>(TensorShape({}), {-2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, {3, 4, 2});
expected.flat<float>().setConstant(-2);
expected.tensor<float, 3>()(0, 1, 1) = 3;
expected.tensor<float, 3>()(0, 2, 0) = 4;
expected.tensor<float, 3>()(2, 3, 1) = 5;
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
}
static void BM_SparseToDense(::testing::benchmark::State& state) {
const int NDIM = state.range(0);
const int N = state.range(1);
const int IndexDim = (NDIM == 1) ? 0 : 1;
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0"));
absl::InlinedVector<TensorValue, 4UL> inputs;
Tensor output_shape(DT_INT32, TensorShape({NDIM}));
Tensor sparse_indices(DT_INT64, TensorShape({N, NDIM}));
Tensor sparse_values(DT_FLOAT, TensorShape({N}));
Tensor default_value(DT_FLOAT, TensorShape({}));
auto output_shape_t = output_shape.vec<int32>();
for (int d = 0; d < NDIM; ++d) {
output_shape_t(d) = (d == IndexDim) ? N : 3;
}
auto sparse_indices_t = sparse_indices.matrix<int64_t>();
for (int n = 0; n < N; ++n) {
for (int d = 0; d < NDIM; ++d)
sparse_indices_t(n, d) = (d == IndexDim) ? n : 0;
}
for (auto* ptr :
{&sparse_indices, &output_shape, &sparse_values, &default_value}) {
inputs.push_back({nullptr, ptr});
}
NodeDef sparse_node_def;
TF_CHECK_OK(NodeDefBuilder("sparsetodense", "SparseToDense")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(&sparse_node_def));
Status status;
std::unique_ptr<OpKernel> op(CreateOpKernel(DEVICE_CPU, device.get(),
cpu_allocator(), sparse_node_def,
TF_GRAPH_DEF_VERSION, &status));
OpKernelContext::Params params;
params.device = device.get();
params.frame_iter = FrameAndIter(0, 0);
params.inputs = inputs;
params.op_kernel = op.get();
std::vector<AllocatorAttributes> attrs;
test::SetOutputAttrs(¶ms, &attrs);
std::unique_ptr<OpKernelContext> sparse_context(new OpKernelContext(¶ms));
op->Compute(sparse_context.get());
for (auto s : state) {
delete sparse_context->release_output(0).tensor;
op->Compute(sparse_context.get());
TF_ASSERT_OK(sparse_context->status());
}
int64_t bytes_per_iter = static_cast<int64_t>((N + N * NDIM) * sizeof(float));
state.SetBytesProcessed(bytes_per_iter * state.iterations());
}
BENCHMARK(BM_SparseToDense)
->ArgPair(1, 10)
->ArgPair(1, 100)
->ArgPair(1, 1000)
->ArgPair(1, 10000)
->ArgPair(2, 10)
->ArgPair(2, 100)
->ArgPair(2, 1000)
->ArgPair(2, 10000)
->ArgPair(3, 10)
->ArgPair(3, 100)
->ArgPair(3, 1000)
->ArgPair(3, 10000)
->ArgPair(5, 10)
->ArgPair(5, 100)
->ArgPair(5, 1000)
->ArgPair(5, 10000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/sparse_to_dense_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_to_dense_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe2c227f-65c5-4a70-b35a-da2c942eea18 | cpp | tensorflow/tensorflow | colocation | tensorflow/core/grappler/utils/colocation.cc | tensorflow/core/grappler/utils/colocation_test.cc | #include "tensorflow/core/grappler/utils/colocation.h"
#include <cstring>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
string GetColocationGroupRoot(std::unordered_map<string, string>* map,
const string& node_name) {
if (map->find(node_name) == map->end()) {
map->insert({node_name, node_name});
return node_name;
}
std::list<string> nodes_to_root;
string cur = node_name;
while ((*map)[cur] != cur) {
nodes_to_root.push_back(cur);
cur = (*map)[cur];
}
if (!nodes_to_root.empty()) {
nodes_to_root.pop_back();
for (const string& node : nodes_to_root) {
(*map)[node] = cur;
}
}
return cur;
}
void MergeColocationGroup(std::unordered_map<string, string>* map,
const string& left, const string& right) {
if (map->find(left) == map->end() || map->find(right) == map->end()) {
return;
}
if (left != right) {
map->at(right) = left;
}
}
}
void ReassignColocation(GraphDef* graph) {
constexpr char kClassAttr[] = "_class";
constexpr char kColocPrefix[] = "loc:@";
std::unordered_map<string, string> coloc_groups;
NodeMap node_map(graph);
for (const auto& node : graph->node()) {
auto iter = node.attr().find(kClassAttr);
if (iter != node.attr().end() && iter->second.has_list()) {
for (const auto& str : iter->second.list().s()) {
size_t pos = str.find(kColocPrefix);
if (pos == 0) {
string colocate_node = str.substr(pos + strlen(kColocPrefix));
MergeColocationGroup(
&coloc_groups, GetColocationGroupRoot(&coloc_groups, node.name()),
GetColocationGroupRoot(&coloc_groups, colocate_node));
}
}
}
}
for (const auto& pair : coloc_groups) {
if (pair.first != pair.second) {
NodeDef* node = node_map.GetNode(pair.first);
if (node) {
AttrValue new_value;
new_value.mutable_list()->add_s(
kColocPrefix + GetColocationGroupRoot(&coloc_groups, pair.first));
node->mutable_attr()->erase(kClassAttr);
node->mutable_attr()->insert({kClassAttr, new_value});
}
} else {
NodeDef* node = node_map.GetNode(pair.first);
if (node) {
node->mutable_attr()->erase(kClassAttr);
}
}
}
}
}
} | #include "tensorflow/core/grappler/utils/colocation.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class ColocationTest : public ::testing::Test {};
bool VerifyNodeHasColocation(const NodeDef& ndef, const string& coloc) {
if (ndef.attr().empty()) {
return false;
}
if (ndef.attr().find("_class") == ndef.attr().end()) {
return false;
}
return ndef.attr().at("_class").list().s(0) == coloc;
}
TEST(ColocationTest, ReassignColocation_SingleNode) {
NodeDef ndef;
const Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@B"}).Finalize(&ndef);
TF_EXPECT_OK(status);
GraphDef gdef = test::function::GDef({ndef});
EXPECT_EQ(1, gdef.node_size());
EXPECT_EQ(1, gdef.node(0).attr_size());
ReassignColocation(&gdef);
EXPECT_EQ(1, gdef.node_size());
EXPECT_EQ(0, gdef.node(0).attr_size());
}
TEST(ColocationTest, ReassignColocation_MultiNode_SingleGroup) {
NodeDef ndef_a, ndef_b, ndef_c, ndef_d, ndef_e;
Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_a);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("B", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_b);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("C", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_c);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("D", "Const").Attr("_class", {"loc:@C"}).Finalize(&ndef_d);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("E", "Const").Attr("_class", {"loc:@D"}).Finalize(&ndef_e);
TF_EXPECT_OK(status);
GraphDef gdef =
test::function::GDef({ndef_a, ndef_b, ndef_c, ndef_d, ndef_e});
EXPECT_EQ(5, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@C"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(4), "loc:@D"));
ReassignColocation(&gdef);
EXPECT_EQ(5, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@E"));
EXPECT_EQ(0, gdef.node(4).attr_size());
}
TEST(ColocationTest, ReassignColocation_MultiNode_MultiGroup) {
NodeDef ndef_a, ndef_b, ndef_c, ndef_d, ndef_e, ndef_u, ndef_v;
Status status =
NodeDefBuilder("A", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_a);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("B", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_b);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("C", "Const").Attr("_class", {"loc:@X"}).Finalize(&ndef_c);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("D", "Const").Attr("_class", {"loc:@C"}).Finalize(&ndef_d);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("E", "Const").Attr("_class", {"loc:@D"}).Finalize(&ndef_e);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("U", "Const").Attr("_class", {"loc:@W"}).Finalize(&ndef_u);
TF_EXPECT_OK(status);
status =
NodeDefBuilder("V", "Const").Attr("_class", {"loc:@W"}).Finalize(&ndef_v);
TF_EXPECT_OK(status);
GraphDef gdef = test::function::GDef(
{ndef_a, ndef_b, ndef_c, ndef_d, ndef_e, ndef_u, ndef_v});
EXPECT_EQ(7, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@X"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@C"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(4), "loc:@D"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(5), "loc:@W"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(6), "loc:@W"));
ReassignColocation(&gdef);
EXPECT_EQ(7, gdef.node_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(0), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(1), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(2), "loc:@E"));
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(3), "loc:@E"));
EXPECT_EQ(0, gdef.node(4).attr_size());
EXPECT_TRUE(VerifyNodeHasColocation(gdef.node(5), "loc:@V"));
EXPECT_EQ(0, gdef.node(6).attr_size());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/colocation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/colocation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce08a832-4904-4f05-8fe8-fe63853de4ff | cpp | google/tensorstore | elementwise_function | tensorstore/internal/elementwise_function.h | tensorstore/internal/elementwise_function_test.cc | #ifndef TENSORSTORE_UTIL_ELEMENTWISE_FUNCTION_H_
#define TENSORSTORE_UTIL_ELEMENTWISE_FUNCTION_H_
#include <array>
#include <cstddef>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/internal/void_wrapper.h"
#include "tensorstore/util/byte_strided_pointer.h"
namespace tensorstore {
namespace internal {
enum class IterationBufferKind {
kContiguous,
kStrided,
kIndexed,
};
constexpr size_t kNumIterationBufferKinds = 3;
struct IterationBufferPointer {
IterationBufferPointer() = default;
explicit IterationBufferPointer(ByteStridedPointer<void> pointer,
Index outer_byte_stride,
Index inner_byte_stride)
: pointer(pointer),
outer_byte_stride(outer_byte_stride),
inner_byte_stride(inner_byte_stride) {}
explicit IterationBufferPointer(ByteStridedPointer<void> pointer,
Index byte_offsets_outer_stride,
const Index* byte_offsets)
: pointer(pointer),
byte_offsets_outer_stride(byte_offsets_outer_stride),
byte_offsets(byte_offsets) {}
ByteStridedPointer<void> pointer;
union {
Index outer_byte_stride;
Index byte_offsets_outer_stride;
};
union {
Index inner_byte_stride;
const Index* byte_offsets;
};
void AddElementOffset(IterationBufferKind kind, Index outer_offset,
Index inner_offset) {
if (kind == IterationBufferKind::kIndexed) {
byte_offsets += inner_offset;
byte_offsets +=
wrap_on_overflow::Multiply(byte_offsets_outer_stride, outer_offset);
} else {
pointer += wrap_on_overflow::Multiply(inner_byte_stride, inner_offset);
pointer += wrap_on_overflow::Multiply(outer_byte_stride, outer_offset);
}
}
};
template <IterationBufferKind BufferKind>
struct IterationBufferAccessor;
template <>
struct IterationBufferAccessor<IterationBufferKind::kStrided> {
constexpr static IterationBufferKind buffer_kind =
IterationBufferKind::kStrided;
template <typename Element>
static Element* GetPointerAtPosition(IterationBufferPointer ptr, Index outer,
Index inner) {
return static_cast<Element*>(
ptr.pointer +
internal::wrap_on_overflow::Multiply(ptr.outer_byte_stride, outer) +
internal::wrap_on_overflow::Multiply(ptr.inner_byte_stride, inner));
}
};
template <>
struct IterationBufferAccessor<IterationBufferKind::kContiguous> {
constexpr static IterationBufferKind buffer_kind =
IterationBufferKind::kContiguous;
template <typename Element>
static Element* GetPointerAtPosition(IterationBufferPointer ptr, Index outer,
Index inner) {
return static_cast<Element*>(
ptr.pointer +
internal::wrap_on_overflow::Multiply(ptr.outer_byte_stride, outer) +
internal::wrap_on_overflow::Multiply(
static_cast<Index>(sizeof(Element)), inner));
}
};
template <>
struct IterationBufferAccessor<IterationBufferKind::kIndexed> {
constexpr static IterationBufferKind buffer_kind =
IterationBufferKind::kIndexed;
template <typename Element>
static Element* GetPointerAtPosition(IterationBufferPointer ptr, Index outer,
Index inner) {
return static_cast<Element*>(
ptr.pointer +
ptr.byte_offsets[internal::wrap_on_overflow::Multiply(
ptr.byte_offsets_outer_stride, outer) +
inner]);
}
};
template <size_t Arity, typename... ExtraArg>
class ElementwiseFunction;
using IterationBufferShape = std::array<Index, 2>;
}
namespace internal_elementwise_function {
template <typename SequenceType, typename... ExtraArg>
struct ElementwiseFunctionPointerHelper;
template <size_t I>
using IterationBufferPointerHelper = internal::IterationBufferPointer;
template <size_t... Is, typename... ExtraArg>
struct ElementwiseFunctionPointerHelper<std::index_sequence<Is...>,
ExtraArg...> {
using type = bool (*)(void*, internal::IterationBufferShape,
IterationBufferPointerHelper<Is>..., ExtraArg...);
};
template <typename, typename SFINAE, typename...>
constexpr inline bool HasApplyContiguous = false;
template <typename Func, typename... Element, typename... ExtraArg>
constexpr inline bool HasApplyContiguous<
Func(Element...),
std::void_t<decltype(std::declval<Func>().ApplyContiguous(
std::declval<Index>(), std::declval<Element*>()...,
std::declval<ExtraArg>()...))>,
ExtraArg...> = true;
template <typename, typename...>
struct SimpleLoopTemplate;
template <typename T, typename Func>
struct Stateless {
static_assert(std::is_empty_v<Func>);
using type = Func;
using ContextType = T;
};
template <typename T>
struct StatelessTraits {
constexpr static bool is_stateless = false;
using type = T;
};
template <typename T, typename Func>
struct StatelessTraits<Stateless<T, Func>> {
constexpr static bool is_stateless = true;
using type = Func;
};
template <typename Func, typename... Element, typename... ExtraArg>
struct SimpleLoopTemplate<Func(Element...), ExtraArg...> {
using ElementwiseFunctionType =
internal::ElementwiseFunction<sizeof...(Element), ExtraArg...>;
template <typename ArrayAccessor>
static constexpr auto GetLoopFn() {
if constexpr (ArrayAccessor::buffer_kind ==
internal::IterationBufferKind::kContiguous &&
HasApplyContiguous<Func(Element...), void,
ExtraArg...>) {
return &FastLoop<ArrayAccessor>;
} else {
return &Loop<ArrayAccessor>;
}
}
template <typename ArrayAccessor>
static bool FastLoop(
void* context, internal::IterationBufferShape shape,
internal::FirstType<internal::IterationBufferPointer, Element>... pointer,
ExtraArg... extra_arg) {
using Traits = StatelessTraits<Func>;
using FuncType = typename Traits::type;
static_assert(ArrayAccessor::buffer_kind ==
internal::IterationBufferKind::kContiguous);
static_assert(
HasApplyContiguous<Func(Element...), void, ExtraArg...>);
internal::PossiblyEmptyObjectGetter<FuncType> func_helper;
FuncType& func = func_helper.get(static_cast<FuncType*>(context));
for (Index outer = 0; outer < shape[0]; ++outer) {
if constexpr (StatelessTraits<Func>::is_stateless) {
if (!func.ApplyContiguous(
*static_cast<typename Func::ContextType*>(context), shape[1],
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, 0)...,
extra_arg...)) {
return false;
}
} else {
if (!func.ApplyContiguous(
shape[1],
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, 0)...,
extra_arg...)) {
return false;
}
}
}
return true;
}
template <typename ArrayAccessor>
static bool Loop(
void* context, internal::IterationBufferShape shape,
internal::FirstType<internal::IterationBufferPointer, Element>... pointer,
ExtraArg... extra_arg) {
static_assert(
!(ArrayAccessor::buffer_kind ==
internal::IterationBufferKind::kContiguous &&
HasApplyContiguous<Func(Element...), void, ExtraArg...>));
using Traits = StatelessTraits<Func>;
using FuncType = typename Traits::type;
internal::PossiblyEmptyObjectGetter<FuncType> func_helper;
FuncType& func = func_helper.get(static_cast<FuncType*>(context));
for (Index outer = 0; outer < shape[0]; ++outer) {
for (Index inner = 0; inner < shape[1]; ++inner) {
if constexpr (StatelessTraits<Func>::is_stateless) {
if (!static_cast<bool>(internal::Void::CallAndWrap(
func, *static_cast<typename Func::ContextType*>(context),
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, inner)...,
extra_arg...))) {
return false;
}
} else {
if (!static_cast<bool>(internal::Void::CallAndWrap(
func,
ArrayAccessor::template GetPointerAtPosition<Element>(
pointer, outer, inner)...,
extra_arg...))) {
return false;
}
}
}
}
return true;
}
};
}
namespace internal {
template <size_t Arity, typename... ExtraArg>
using SpecializedElementwiseFunctionPointer =
typename internal_elementwise_function::ElementwiseFunctionPointerHelper<
std::make_index_sequence<Arity>, ExtraArg...>::type;
template <size_t Arity, typename... ExtraArg>
struct ElementwiseClosure {
using Function = ElementwiseFunction<Arity, ExtraArg...>;
constexpr static size_t arity = Arity;
const Function* function;
void* context;
};
template <size_t Arity, typename... ExtraArg>
class ElementwiseFunction {
public:
constexpr static size_t arity = Arity;
using Closure = ElementwiseClosure<Arity, ExtraArg...>;
using SpecializedFunctionPointer =
SpecializedElementwiseFunctionPointer<Arity, ExtraArg...>;
constexpr ElementwiseFunction() = default;
template <typename LoopTemplate,
typename = decltype(LoopTemplate::template GetLoopFn<
IterationBufferAccessor<
IterationBufferKind::kContiguous>>())>
constexpr explicit ElementwiseFunction(LoopTemplate)
: functions_{
LoopTemplate::template GetLoopFn<
IterationBufferAccessor<IterationBufferKind::kContiguous>>(),
LoopTemplate::template GetLoopFn<
IterationBufferAccessor<IterationBufferKind::kStrided>>(),
LoopTemplate::template GetLoopFn<
IterationBufferAccessor<IterationBufferKind::kIndexed>>()} {}
constexpr SpecializedFunctionPointer operator[](
IterationBufferKind buffer_kind) const {
return functions_[static_cast<size_t>(buffer_kind)];
}
constexpr SpecializedFunctionPointer& operator[](
IterationBufferKind buffer_kind) {
return functions_[static_cast<size_t>(buffer_kind)];
}
private:
SpecializedFunctionPointer functions_[kNumIterationBufferKinds];
};
template <typename LoopTemplate>
struct GetElementwiseFunction {
using ElementwiseFunctionType =
typename LoopTemplate::ElementwiseFunctionType;
constexpr static ElementwiseFunctionType function{LoopTemplate{}};
constexpr operator const ElementwiseFunctionType*() const {
return &function;
}
constexpr operator ElementwiseFunctionType() const { return function; }
};
template <typename LoopTemplate>
constexpr typename LoopTemplate::ElementwiseFunctionType
GetElementwiseFunction<LoopTemplate>::function;
template <typename, typename...>
struct SimpleElementwiseFunction;
template <typename Func, typename... Element, typename... ExtraArg>
struct SimpleElementwiseFunction<Func(Element...), ExtraArg...>
: public GetElementwiseFunction<
internal_elementwise_function::SimpleLoopTemplate<
std::remove_reference_t<Func>(Element...), ExtraArg...>> {
using ElementwiseFunctionType =
internal::ElementwiseFunction<sizeof...(Element), ExtraArg...>;
using ClosureType =
internal::ElementwiseClosure<sizeof...(Element), ExtraArg...>;
constexpr static ClosureType Closure(std::remove_reference_t<Func>* func) {
return ClosureType{SimpleElementwiseFunction{},
const_cast<absl::remove_cvref_t<Func>*>(func)};
}
template <int&... ExplicitArgumentBarrier,
std::enable_if_t<
(sizeof...(ExplicitArgumentBarrier) == 0 &&
std::is_empty<absl::remove_cvref_t<Func>>::value)>* = nullptr>
constexpr operator ClosureType() const {
return {SimpleElementwiseFunction{}, nullptr};
}
};
}
namespace internal_elementwise_function {
template <size_t Arity, typename... ExtraArg, typename Pointers, size_t... Is>
inline bool InvokeElementwiseFunctionImpl(
std::index_sequence<Is...>,
internal::SpecializedElementwiseFunctionPointer<Arity, ExtraArg...>
function,
void* context, internal::IterationBufferShape shape,
const Pointers& pointers, ExtraArg... extra_arg) {
using std::get;
return function(context, shape, get<Is>(pointers)...,
std::forward<ExtraArg>(extra_arg)...);
}
}
namespace internal {
template <size_t Arity, typename... ExtraArg, typename Pointers>
inline bool InvokeElementwiseClosure(
ElementwiseClosure<Arity, ExtraArg...> closure,
IterationBufferKind buffer_kind, internal::IterationBufferShape shape,
const Pointers& pointers,
internal::type_identity_t<ExtraArg>... extra_arg) {
return internal_elementwise_function::InvokeElementwiseFunctionImpl<
Arity, ExtraArg...>(
std::make_index_sequence<Arity>{}, (*closure.function)[buffer_kind],
closure.context, shape, pointers, std::forward<ExtraArg>(extra_arg)...);
}
template <size_t Arity, typename... ExtraArg, typename Pointers>
inline bool InvokeElementwiseFunction(
SpecializedElementwiseFunctionPointer<Arity, ExtraArg...> function,
void* context, internal::IterationBufferShape shape,
const Pointers& pointers, ExtraArg... extra_arg) {
return internal_elementwise_function::InvokeElementwiseFunctionImpl<
Arity, ExtraArg...>(std::make_index_sequence<Arity>{}, function, context,
shape, pointers,
std::forward<ExtraArg>(extra_arg)...);
}
}
}
#endif | #include "tensorstore/internal/elementwise_function.h"
#include <functional>
#include <limits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/attributes.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::internal::ElementwiseClosure;
using ::tensorstore::internal::ElementwiseFunction;
using ::tensorstore::internal::IterationBufferAccessor;
using ::tensorstore::internal::IterationBufferKind;
using ::tensorstore::internal::IterationBufferPointer;
using ::tensorstore::internal::SimpleElementwiseFunction;
using ContiguousAccessor =
IterationBufferAccessor<IterationBufferKind::kContiguous>;
using StridedAccessor = IterationBufferAccessor<IterationBufferKind::kStrided>;
using OffsetArrayAccessor =
IterationBufferAccessor<IterationBufferKind::kIndexed>;
TEST(ContiguousAccessorTest, Basic) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), Index(0)};
EXPECT_EQ(&arr[0], ContiguousAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[1], ContiguousAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(ContiguousAccessorTest, WrapOnOverflow) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), Index(0)};
const Index base_index = std::numeric_limits<Index>::max() - 3;
ptr.pointer -= tensorstore::internal::wrap_on_overflow::Multiply(
base_index, static_cast<Index>(sizeof(int)));
EXPECT_EQ(&arr[0], ContiguousAccessor::GetPointerAtPosition<int>(
ptr, 0, base_index + 0));
EXPECT_EQ(&arr[1], ContiguousAccessor::GetPointerAtPosition<int>(
ptr, 0, base_index + 1));
}
TEST(StridedAccessorTest, Basic) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), sizeof(int) * 2};
EXPECT_EQ(&arr[0], StridedAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[2], StridedAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(StridedAccessorTest, WrapOnOverflow) {
int arr[3] = {1, 2, 3};
IterationBufferPointer ptr{&arr[0], Index(0), sizeof(int) * 2};
const Index base_index = std::numeric_limits<Index>::max() - 3;
ptr.pointer -= tensorstore::internal::wrap_on_overflow::Multiply(
base_index, ptr.inner_byte_stride);
EXPECT_EQ(&arr[0],
StridedAccessor::GetPointerAtPosition<int>(ptr, 0, base_index + 0));
EXPECT_EQ(&arr[2],
StridedAccessor::GetPointerAtPosition<int>(ptr, 0, base_index + 1));
}
TEST(OffsetArrayAccessorTest, Basic) {
int arr[3] = {1, 2, 3};
Index offsets[] = {0, sizeof(int) * 2};
IterationBufferPointer ptr{&arr[0], Index(0), &offsets[0]};
EXPECT_EQ(&arr[0], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[2], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(OffsetArrayAccessorTest, WrapOnOverflow) {
int arr[3] = {1, 2, 3};
const Index base_index = std::numeric_limits<Index>::max() - 100;
Index offsets[] = {base_index + 0, base_index + sizeof(int) * 2};
IterationBufferPointer ptr{&arr[0], Index(0), &offsets[0]};
ptr.pointer -= base_index;
EXPECT_EQ(&arr[0], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 0));
EXPECT_EQ(&arr[2], OffsetArrayAccessor::GetPointerAtPosition<int>(ptr, 0, 1));
}
TEST(SimpleElementwiseFunctionTest, ArityOne) {
struct AddOneB {
AddOneB() = delete;
bool operator()(int* x) const {
if (*x > 0) return false;
*x += 1;
return true;
}
};
ElementwiseFunction<1> function = SimpleElementwiseFunction<AddOneB(int)>();
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
EXPECT_FALSE(function[IterationBufferKind::kStrided](
nullptr, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int) * 2}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -5, 1, 2));
Index offsets[] = {sizeof(int), sizeof(int)};
EXPECT_TRUE(function[IterationBufferKind::kIndexed](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), &offsets[0]}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -3, 1, 2));
}
TEST(SimpleElementwiseFunctionTest, ArityOneCaptureLessLambda) {
[[maybe_unused]] const auto add_one = [](int* x) {
if (*x > 0) return false;
*x += 1;
return true;
};
ElementwiseFunction<1> function =
SimpleElementwiseFunction<decltype(add_one)(int)>();
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
}
TEST(SimpleElementwiseFunctionTest, NonEmptyArityOne) {
struct AddOneC {
int value = 0;
bool operator()(int* x) {
++value;
if (*x > 0) return false;
*x += 1;
return true;
}
};
AddOneC add_one;
ElementwiseClosure<1> closure =
SimpleElementwiseFunction<AddOneC(int)>::Closure(&add_one);
EXPECT_EQ(&add_one, closure.context);
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE((*closure.function)[IterationBufferKind::kContiguous](
closure.context, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
EXPECT_EQ(2, add_one.value);
}
TEST(SimpleElementwiseFunctionTest, NonEmptyArityOneBind) {
struct AddOneD {
bool operator()(int* x, int* counter) {
++*counter;
if (*x > 0) return false;
*x += 1;
return true;
}
};
int counter = 0;
auto add_one = std::bind(AddOneD{}, std::placeholders::_1, &counter);
ElementwiseClosure<1> closure =
SimpleElementwiseFunction<decltype(add_one)(int)>::Closure(&add_one);
std::vector<int> arr{-5, -6, 1, 2};
EXPECT_TRUE((*closure.function)[IterationBufferKind::kContiguous](
&add_one, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
EXPECT_EQ(2, counter);
}
TEST(SimpleElementwiseFunctionTest, ArityTwo) {
struct Convert {
bool operator()(int* x, double* y) const {
*x = static_cast<int>(*y);
return (*x < 0);
}
};
ElementwiseFunction<2> function =
SimpleElementwiseFunction<Convert(int, double)>();
std::vector<int> arr{0, 0, 0, 0};
std::vector<double> arr2{-3.5, -2.5, -1.5, 2.5};
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)},
IterationBufferPointer{&arr2[0], Index(0), sizeof(double)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -2, 0, 0));
EXPECT_TRUE(function[IterationBufferKind::kStrided](
nullptr, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int) * 2},
IterationBufferPointer{&arr2[0], Index(0), sizeof(double)}));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -2, -2, 0));
Index offsets[] = {0, sizeof(int), 2 * sizeof(int)};
Index offsets2[] = {sizeof(double), sizeof(double) * 3, 0};
EXPECT_FALSE(function[IterationBufferKind::kIndexed](
nullptr, {1, 3}, IterationBufferPointer{&arr[0], Index(0), &offsets[0]},
IterationBufferPointer{&arr2[0], Index(0), &offsets2[0]}));
EXPECT_THAT(arr, ::testing::ElementsAre(-2, 2, -2, 0));
}
TEST(SimpleElementwiseFunctionTest, ArityOneExtraArgsIndexReturn) {
struct AddOneA {
bool operator()(int* x, int* sum) const {
if (*x > 0) return false;
*sum += *x;
*x += 1;
return true;
}
};
ElementwiseFunction<1, int*> function =
SimpleElementwiseFunction<AddOneA(int), int*>();
std::vector<int> arr{-5, -6, 1, 2};
{
int sum = 0;
EXPECT_TRUE(function[IterationBufferKind::kContiguous](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), sizeof(int)},
&sum));
EXPECT_EQ(-11, sum);
EXPECT_THAT(arr, ::testing::ElementsAre(-4, -5, 1, 2));
}
{
int sum = 0;
EXPECT_FALSE(function[IterationBufferKind::kStrided](
nullptr, {1, 2},
IterationBufferPointer{&arr[0], Index(0), sizeof(int) * 2}, &sum));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -5, 1, 2));
EXPECT_EQ(-4, sum);
}
{
int sum = 0;
Index offsets[] = {sizeof(int), sizeof(int)};
EXPECT_TRUE(function[IterationBufferKind::kIndexed](
nullptr, {1, 2}, IterationBufferPointer{&arr[0], Index(0), &offsets[0]},
&sum));
EXPECT_THAT(arr, ::testing::ElementsAre(-3, -3, 1, 2));
EXPECT_EQ(-9, sum);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/elementwise_function.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/elementwise_function_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
de686d71-386c-4413-83f6-6378d7e0f65c | cpp | tensorflow/tensorflow | cpu_client | third_party/xla/xla/pjrt/cpu/cpu_client.cc | third_party/xla/xla/pjrt/cpu/cpu_client_test.cc | #include "xla/pjrt/cpu/cpu_client.h"
#define EIGEN_USE_THREADS
#include <algorithm>
#include <cfenv>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "mlir/IR/BuiltinOps.h"
#include "xla/array.h"
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/backends/cpu/runtime/thunk_executor.h"
#include "xla/client/executable_build_options.h"
#include "xla/debug_options_flags.h"
#include "xla/executable_run_options.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/compile_options.pb.h"
#include "xla/pjrt/cpu/abstract_tfrt_cpu_buffer.h"
#include "xla/pjrt/cpu/cpu_topology.h"
#include "xla/pjrt/cpu/tracked_tfrt_cpu_device_buffer.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/mlir_to_hlo.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_compiler.h"
#include "xla/pjrt/pjrt_device_description.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/pjrt/semaphore.h"
#include "xla/pjrt/transpose.h"
#include "xla/pjrt/utils.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/service/cpu/collectives_interface.h"
#include "xla/service/cpu/cpu_compiler.h"
#include "xla/service/cpu/cpu_event.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/cpu/cpu_executable_run_options.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/cpu_xfeed.h"
#include "xla/service/cpu/simple_orc_jit.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_status_internal.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_module_util.h"
#include "xla/service/hlo_value.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/setround.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/connected_traceme.h"
#include "tsl/profiler/lib/context_types.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
namespace {
absl::StatusOr<std::unique_ptr<TfrtCpuBuffer>> AllocateDestinationBuffer(
const Shape& on_device_shape,
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events,
TfrtCpuDevice* device, TfrtCpuClient* client) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
AbstractTfrtCpuBuffer::AllocateTrackedDeviceBuffer(
on_device_shape, std::move(definition_events)));
return std::make_unique<TfrtCpuBuffer>(
on_device_shape, std::move(tracked_device_buffer), client, device,
*device->default_memory_space());
}
absl::StatusOr<std::unique_ptr<TfrtCpuBuffer>> AllocateDestinationBufferAndAvs(
const Shape& shape,
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4>* avs,
TfrtCpuDevice* device, TfrtCpuClient* client) {
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events;
AbstractTfrtCpuBuffer::AllocateAvsAndEvents(shape, avs, &definition_events);
return AllocateDestinationBuffer(
shape, std::move(definition_events),
tensorflow::down_cast<TfrtCpuDevice*>(device), client);
}
const char kCpuPlatformName[] = "cpu";
void EnqueueWork(tsl::thread::ThreadPool* pool,
absl::AnyInvocable<void()> callee) {
pool->Schedule([ptr = new absl::AnyInvocable<void()>(std::move(callee))]() {
(*ptr)();
delete ptr;
});
}
void EnqueueWorkWhenReady(
tsl::thread::ThreadPool* pool,
absl::Span<const tsl::RCReference<tsl::AsyncValue>> values,
absl::AnyInvocable<void()> callee) {
RunWhenReady(values, [pool, callee = std::move(callee)]() mutable {
EnqueueWork(pool, std::move(callee));
});
}
class ThreadPoolAsyncWorkRunner : public AsyncWorkRunner {
public:
explicit ThreadPoolAsyncWorkRunner(tsl::thread::ThreadPool* pool)
: pool_(pool) {}
void Schedule(absl::AnyInvocable<void()> work) override {
EnqueueWork(pool_, std::move(work));
}
void ScheduleWhenReady(
absl::Span<const tsl::RCReference<tsl::AsyncValue>> values,
absl::AnyInvocable<void()> work) override {
EnqueueWorkWhenReady(pool_, values, std::move(work));
}
private:
tsl::thread::ThreadPool* pool_;
};
class TfrtCpuAsyncHostToDeviceTransferManager
: public AbstractAsyncHostToHostMemoryTransferManager {
public:
static absl::StatusOr<
std::unique_ptr<TfrtCpuAsyncHostToDeviceTransferManager>>
Create(absl::Span<const Shape> shapes, TfrtCpuDevice* device,
TfrtCpuClient* client) {
absl::InlinedVector<std::unique_ptr<AbstractTfrtCpuBuffer>, 4> buffers;
buffers.reserve(shapes.size());
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> avs;
avs.reserve(shapes.size());
for (const auto& shape : shapes) {
if (shape.IsTuple()) {
return Unimplemented(
"Tuples are not supported by "
"TfrtCpuAsyncHostToDeviceTransferManager");
}
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> local_avs;
TF_ASSIGN_OR_RETURN(auto buffer, AllocateDestinationBufferAndAvs(
shape, &local_avs, device, client));
CHECK_EQ(local_avs.size(), 1);
avs.push_back(std::move(local_avs[0]));
buffers.push_back(std::move(buffer));
}
absl::InlinedVector<TrackedTfrtCpuDeviceBuffer*, 4> device_buffers;
absl::InlinedVector<size_t, 4> buffer_sizes;
absl::InlinedVector<int64_t, 4> buffer_transfers_in_flight;
absl::InlinedVector<bool, 4> last_transfer_finished;
TF_RETURN_IF_ERROR(
AbstractAsyncHostToHostMemoryTransferManager::
PopulateAsyncTransferManagerData(
buffers, device_buffers, buffer_sizes,
buffer_transfers_in_flight, last_transfer_finished));
return absl::WrapUnique(new TfrtCpuAsyncHostToDeviceTransferManager(
std::move(avs), std::move(buffers), std::move(device_buffers),
std::move(buffer_sizes), std::move(buffer_transfers_in_flight),
std::move(last_transfer_finished), client->async_work_runner(),
device));
}
PjRtDevice* device() const override { return device_; }
private:
TfrtCpuAsyncHostToDeviceTransferManager(
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> avs,
absl::InlinedVector<std::unique_ptr<AbstractTfrtCpuBuffer>, 4> buffers,
absl::InlinedVector<TrackedTfrtCpuDeviceBuffer*, 4> device_buffers,
absl::InlinedVector<size_t, 4> buffer_sizes,
absl::InlinedVector<int64_t, 4> buffer_transfers_in_flight,
absl::InlinedVector<bool, 4> last_transfer_finished,
AsyncWorkRunner* async_work_runner, TfrtCpuDevice* device)
: AbstractAsyncHostToHostMemoryTransferManager(
std::move(avs), std::move(buffers), std::move(device_buffers),
std::move(buffer_sizes), std::move(buffer_transfers_in_flight),
std::move(last_transfer_finished), async_work_runner),
device_(device) {}
TfrtCpuDevice* device_;
};
}
TfrtCpuDeviceDescription::TfrtCpuDeviceDescription(int process_id,
int local_device_id)
: id_(PackCpuDeviceId(process_id, local_device_id)),
process_index_(process_id),
local_hardware_id_(local_device_id) {
debug_string_ = absl::StrCat("TFRT_CPU_", id_.value());
to_string_ = absl::StrCat("CpuDevice(id=", id_.value(), ")");
}
absl::string_view TfrtCpuDeviceDescription::device_kind() const {
return kCpuPlatformName;
}
absl::string_view TfrtCpuDeviceDescription::DebugString() const {
return debug_string_;
}
absl::string_view TfrtCpuDeviceDescription::ToString() const {
return to_string_;
}
TfrtCpuTopologyDescription TfrtCpuTopologyDescription::Create(
PjRtPlatformId platform_id, absl::string_view platform_name,
absl::string_view platform_version,
absl::Span<const std::unique_ptr<TfrtCpuDevice>> devices,
absl::Span<const std::string> machine_attributes) {
std::vector<CpuTopology::CpuDevice> cpu_devices;
cpu_devices.reserve(devices.size());
for (auto& device : devices) {
cpu_devices.push_back(CpuTopology::CpuDevice{
device->process_index(), device->local_hardware_id().value()});
}
return TfrtCpuTopologyDescription(platform_id, platform_name,
platform_version, cpu_devices,
machine_attributes);
}
absl::StatusOr<Layout> TfrtCpuTopologyDescription::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) const {
Shape shape = ShapeUtil::MakeShape(element_type, dims);
return LayoutUtil::GetWithDefaultLayout(shape).layout();
}
absl::StatusOr<std::string> TfrtCpuTopologyDescription::Serialize() const {
std::string result;
if (!tsl::SerializeToStringDeterministic(cpu_topology_.ToProto(), &result)) {
return absl::InternalError("Failed to serialize cpu_topology");
}
return result;
}
std::vector<std::unique_ptr<const PjRtDeviceDescription>>
TfrtCpuTopologyDescription::DeviceDescriptions() const {
std::vector<std::unique_ptr<const PjRtDeviceDescription>> devices;
devices.reserve(cpu_topology_.number_of_devices());
for (const CpuTopology::CpuDevice& device : cpu_topology_.devices()) {
devices.push_back(std::make_unique<TfrtCpuDeviceDescription>(
device.process_id, device.local_device_id));
}
return devices;
}
TfrtCpuDevice::TfrtCpuDevice(int process_id, int local_device_id,
int max_inflight_computations)
: description_(process_id, local_device_id),
max_inflight_computations_semaphore_(
max_inflight_computations) {}
absl::Status TfrtCpuDevice::TransferToInfeed(const LiteralSlice& literal) {
return TransferLiteralToInfeedOnCpu(local_hardware_id().value(), literal);
}
absl::Status TfrtCpuDevice::TransferFromOutfeed(
MutableBorrowingLiteral literal) {
return TransferLiteralFromOutfeedOnCpu(local_hardware_id().value(), literal);
}
void TfrtCpuDevice::AttachMemorySpace(PjRtMemorySpace* memory_space) {
CHECK(memory_space != nullptr);
CHECK(client_ == memory_space->client()) << absl::StrFormat(
"Could not attach a TfrtCpuDevice to a PjRtMemorySpace owned by a "
"different client, the device's client: %s, the memory space's client: "
"%s.",
client_->platform_name(), memory_space->client()->platform_name());
memory_spaces_.push_back(memory_space);
memory_spaces_by_id_.emplace(memory_space->kind_id(), memory_space);
}
absl::Span<PjRtMemorySpace* const> TfrtCpuDevice::memory_spaces() const {
return memory_spaces_;
}
absl::StatusOr<PjRtMemorySpace*> TfrtCpuDevice::default_memory_space() const {
return memory_space_by_kind_id(UnpinnedHostMemorySpace::kKindId);
}
absl::StatusOr<PjRtMemorySpace*> TfrtCpuDevice::memory_space_by_kind(
absl::string_view memory_space_kind) const {
auto it =
absl::c_find_if(memory_spaces_, [memory_space_kind](PjRtMemorySpace* ms) {
return ms->kind() == memory_space_kind;
});
if (it != memory_spaces_.end()) {
return *it;
}
return absl::InternalError(
absl::StrCat("No memory space found (kind: ", memory_space_kind, ")"));
}
absl::StatusOr<PjRtMemorySpace*> TfrtCpuDevice::memory_space_by_kind_id(
int id) const {
auto it = memory_spaces_by_id_.find(id);
if (it == memory_spaces_by_id_.end()) {
return absl::InternalError(
absl::StrCat("No memory space found (kind_id: ", id, ")"));
}
return it->second;
}
static int CpuDeviceCount() {
return GetDebugOptionsFromFlags().xla_force_host_platform_device_count();
}
absl::StatusOr<std::unique_ptr<PjRtClient>> GetTfrtCpuClient(
const CpuClientOptions& options) {
int cpu_device_count = options.cpu_device_count.value_or(CpuDeviceCount());
size_t num_threads = std::max(DefaultThreadPoolSize(), cpu_device_count);
std::vector<std::unique_ptr<TfrtCpuDevice>> devices;
for (int i = 0; i < cpu_device_count; ++i) {
auto device = std::make_unique<TfrtCpuDevice>(
options.process_id, i,
options.max_inflight_computations_per_device);
devices.push_back(std::move(device));
}
return std::unique_ptr<PjRtClient>(std::make_unique<TfrtCpuClient>(
options.process_id, std::move(devices), std::move(options.collectives),
num_threads, options.asynchronous));
}
static const size_t kMaxIntraOpThreads = 256;
static tsl::ThreadOptions GetThreadOptions() {
tsl::ThreadOptions thread_options;
thread_options.stack_size = 8 * 1024 * 1024;
return thread_options;
}
TfrtCpuClient::TfrtCpuClient(
int process_index, std::vector<std::unique_ptr<TfrtCpuDevice>> devices,
std::shared_ptr<cpu::CollectivesInterface> collectives, size_t num_threads,
bool asynchronous)
: process_index_(process_index),
owned_devices_(std::move(devices)),
computation_placer_(std::make_unique<ComputationPlacer>()),
eigen_intraop_pool_(new tsl::thread::ThreadPool(
tsl::Env::Default(), GetThreadOptions(), "XLAEigen",
std::min(num_threads, kMaxIntraOpThreads))),
eigen_intraop_device_(
new Eigen::ThreadPoolDevice(eigen_intraop_pool_->AsEigenThreadPool(),
eigen_intraop_pool_->NumThreads())),
pjrt_client_thread_pool_(
new tsl::thread::ThreadPool(tsl::Env::Default(), GetThreadOptions(),
"XLATfrtCpuClient", num_threads)),
async_work_runner_(std::make_unique<ThreadPoolAsyncWorkRunner>(
pjrt_client_thread_pool_.get())),
last_collective_launch_event_(
tsl::MakeAvailableAsyncValueRef<CpuEvent>()),
transpose_cache_(1024),
collectives_(std::move(collectives)),
topology_(TfrtCpuTopologyDescription::Create(
platform_id(), platform_name(), platform_version(), owned_devices_,
cpu::DetectMachineAttributes())),
asynchronous_(asynchronous) {
for (const std::unique_ptr<TfrtCpuDevice>& device : owned_devices_) {
devices_.push_back(device.get());
CHECK(
id_to_device_.insert({device->global_device_id(), device.get()}).second)
<< "Duplicate device id: " << device->global_device_id();
device->SetClient(this);
if (device->IsAddressable()) {
int idx = device->local_hardware_id().value();
if (idx >= addressable_devices_.size()) {
addressable_devices_.resize(idx + 1);
}
CHECK(addressable_devices_[idx] == nullptr) << idx;
addressable_devices_[idx] = device.get();
}
}
for (int idx = 0; idx < addressable_devices_.size(); ++idx) {
auto* const device = addressable_devices_[idx];
CHECK(device != nullptr) << idx;
const int id = device->id();
auto memory_space = std::make_unique<UnpinnedHostMemorySpace>(id, device);
tensorflow::down_cast<TfrtCpuDevice*>(device)->AttachMemorySpace(
memory_space.get());
memory_spaces_.push_back(memory_space.get());
owned_memory_spaces_.push_back(std::move(memory_space));
}
VLOG(1) << "TfrtCpuClient created.";
}
TfrtCpuClient::~TfrtCpuClient() { VLOG(1) << "TfrtCpuClient destroyed."; }
absl::StatusOr<PjRtDevice*> TfrtCpuClient::LookupDevice(
xla::PjRtGlobalDeviceId global_device_id) const {
auto it = id_to_device_.find(global_device_id);
if (it != id_to_device_.end()) {
return it->second;
}
return InvalidArgument("No matching device found for device_id %d",
global_device_id.value());
}
absl::StatusOr<PjRtDevice*> TfrtCpuClient::LookupAddressableDevice(
PjRtLocalDeviceId local_device_id) const {
for (auto* device : addressable_devices_) {
if (local_device_id == device->local_device_id()) {
return device;
}
}
return InvalidArgument("No matching device found for local_device_id %d",
local_device_id.value());
}
absl::Span<PjRtMemorySpace* const> TfrtCpuClient::memory_spaces() const {
return memory_spaces_;
}
absl::StatusOr<DeviceAssignment> TfrtCpuClient::GetDefaultDeviceAssignment(
int num_replicas, int num_partitions) const {
if (num_partitions * num_replicas <= addressable_devices().size()) {
xla::DeviceAssignment assignment(num_replicas, num_partitions);
for (int i = 0; i < num_replicas; ++i) {
for (int j = 0; j < num_partitions; ++j) {
assignment(i, j) =
addressable_devices().at(i * num_partitions + j)->id();
}
}
return assignment;
}
return computation_placer_->AssignDevices(num_replicas, num_partitions);
}
absl::StatusOr<Layout> TfrtCpuClient::GetDefaultLayout(
PrimitiveType element_type, absl::Span<const int64_t> dims) {
Shape shape = ShapeUtil::MakeShape(element_type, dims);
return LayoutUtil::GetWithDefaultLayout(shape).layout();
}
absl::StatusOr<std::unique_ptr<HloCostAnalysis>>
TfrtCpuClient::GetHloCostAnalysis() const {
return std::make_unique<HloCostAnalysis>(cpu::CpuExecutable::ShapeSizeBytes);
}
static const InstructionValueSet& GetRootValueSet(
const BufferAssignment& assignment, const HloModule& module) {
return assignment.dataflow_analysis().GetInstructionValueSet(
module.entry_computation()->root_instruction());
}
static absl::StatusOr<absl::InlinedVector<BufferAllocation::Index, 4>>
FindResultBufferAllocationIndex(const BufferAssignment& assignment,
const HloModule& module) {
absl::InlinedVector<BufferAllocation::Index, 4> buffer_indices;
const InstructionValueSet& root_value_set =
GetRootValueSet(assignment, module);
const Shape& result_shape = module.result_shape();
if (!result_shape.IsTuple()) {
const HloValueSet& sources = root_value_set.element({});
CHECK_EQ(1, sources.values().size());
const HloValue* value_source = sources.values()[0];
HloInstruction* src = value_source->instruction();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment.GetUniqueSlice(src, value_source->index()));
const BufferAllocation::Index buffer_index = slice.index();
buffer_indices.push_back(buffer_index);
return {std::move(buffer_indices)};
}
buffer_indices.reserve(result_shape.tuple_shapes_size());
for (int i = 0; i < result_shape.tuple_shapes_size(); ++i) {
const HloValueSet& sources = root_value_set.element({i});
CHECK_EQ(1, sources.values().size());
const HloValue* value_source = sources.values()[0];
HloInstruction* src = value_source->instruction();
TF_ASSIGN_OR_RETURN(const BufferAllocation::Slice slice,
assignment.GetUniqueSlice(src, value_source->index()));
const BufferAllocation::Index buffer_index = slice.index();
buffer_indices.push_back(buffer_index);
}
return {std::move(buffer_indices)};
}
absl::StatusOr<std::string> TfrtCpuExecutable::SerializeExecutable() const {
cpu::CpuCompiler compiler;
TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result,
compiler.Export(cpu_executable_.get()));
TF_ASSIGN_OR_RETURN(std::string serialized, aot_result->SerializeAsString());
if (serialized.empty()) {
return Internal(
"TfrtCpuClient::SerializeExecutable proto serialization failed");
}
ExecutableAndOptionsProto proto;
*proto.mutable_serialized_executable() = std::move(serialized);
TF_ASSIGN_OR_RETURN(*proto.mutable_compile_options(),
compile_options_.ToProto());
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>>
TfrtCpuClient::DeserializeExecutable(absl::string_view serialized,
std::optional<CompileOptions> options) {
ExecutableAndOptionsProto proto;
if (serialized.size() > std::numeric_limits<int>::max()) {
return Internal(
"TfrtCpuClient::DeserializeExecutable proto too large (>2GB)");
}
if (!proto.ParseFromArray(serialized.data(), serialized.size())) {
return Internal(
"TfrtCpuClient::DeserializeExecutable proto deserialization failed");
}
CompileOptions compile_options;
if (options.has_value()) {
compile_options = *std::move(options);
} else {
TF_ASSIGN_OR_RETURN(compile_options,
CompileOptions::FromProto(proto.compile_options()));
}
auto input_options = compile_options;
cpu::CpuCompiler compiler;
std::string str = std::move(*proto.mutable_serialized_executable());
TF_ASSIGN_OR_RETURN(std::unique_ptr<AotCompilationResult> aot_result,
compiler.LoadAotCompilationResult(str));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> executable,
aot_result->LoadExecutable(&compiler, nullptr));
int num_replicas;
int num_partitions;
std::shared_ptr<DeviceAssignment> device_assignment;
TF_RETURN_IF_ERROR(ParseDeviceAssignmentCompileOptions(
compile_options.compile_portable_executable,
&compile_options.executable_build_options,
[this](int num_replicas, int num_partitions) {
return this->GetDefaultDeviceAssignment(num_replicas, num_partitions);
},
&num_replicas, &num_partitions, &device_assignment));
auto cpu_executable_ptr =
tensorflow::down_cast<cpu::CpuExecutable*>(executable.get());
TF_ASSIGN_OR_RETURN(
const BufferAllocation::Slice result_slice,
cpu_executable_ptr->buffer_assignment().GetUniqueTopLevelOutputSlice());
TF_ASSIGN_OR_RETURN(
auto result_buffer_indices,
FindResultBufferAllocationIndex(cpu_executable_ptr->buffer_assignment(),
executable->module()));
std::vector<PjRtLoadedExecutable::LogicalDeviceIds>
addressable_device_logical_ids;
std::vector<PjRtDevice*> addressable_devices;
ExecutableBuildOptions& build_options =
compile_options.executable_build_options;
if (device_assignment != nullptr) {
addressable_device_logical_ids.reserve(num_replicas * num_partitions);
addressable_devices.reserve(num_replicas * num_partitions);
for (int replica = 0; replica < num_replicas; ++replica) {
for (int partition = 0; partition < num_partitions; ++partition) {
PjRtGlobalDeviceId device_id((*device_assignment)(replica, partition));
if (UnpackCpuProcessIndex(device_id) != process_index()) {
VLOG(3) << "Non-local device: " << device_id;
continue;
}
TF_ASSIGN_OR_RETURN(PjRtDevice * device, LookupDevice(device_id));
PjRtLoadedExecutable::LogicalDeviceIds logica_device_ids;
logica_device_ids.replica = replica;
logica_device_ids.partition = partition;
addressable_device_logical_ids.push_back(std::move(logica_device_ids));
addressable_devices.push_back(device);
}
}
if (addressable_devices.empty()) {
return InvalidArgument(
"Device assignment (%s) does not have any local devices.",
device_assignment->ToString());
}
if (build_options.device_ordinal() < 0) {
build_options.set_device_ordinal(
addressable_devices.front()->local_hardware_id().value());
}
}
auto tfrt_cpu_executable = std::make_unique<TfrtCpuExecutable>(
num_replicas, num_partitions, std::move(device_assignment),
compile_options.parameter_is_tupled_arguments, std::move(input_options),
std::move(executable), result_slice.index(),
std::move(result_buffer_indices),
std::move(addressable_device_logical_ids), std::move(addressable_devices),
this);
TF_RETURN_IF_ERROR(tfrt_cpu_executable->SetUpDonation(
compile_options.parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(tfrt_cpu_executable));
}
static absl::StatusOr<std::unique_ptr<xla::Executable>> JitCompile(
const XlaComputation& computation,
const absl::Span<const Shape* const> argument_layouts,
const ExecutableBuildOptions& build_options,
const ExecutionOptions& execution_options,
const xla::Compiler::CompileOptions& compile_options, int num_threads) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModuleConfig> hlo_module_config,
CreateModuleConfig(program_shape, argument_layouts, &execution_options,
execution_options.num_replicas(), num_threads,
nullptr));
const xla::HloModuleProto& hlo_module_proto = computation.proto();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> hlo_module,
xla::HloModule::CreateFromProto(hlo_module_proto, *hlo_module_config));
VLOG(3) << "Unoptimized HLO module: " << hlo_module->ToString();
static constexpr char kBeforeOptimizationsDumpName[] = "before_optimizations";
DumpHloModuleIfEnabled(*hlo_module, kBeforeOptimizationsDumpName);
cpu::CpuCompiler compiler;
TF_ASSIGN_OR_RETURN(hlo_module, compiler.RunHloPasses(std::move(hlo_module),
nullptr,
compile_options));
return compiler.RunBackend(std::move(hlo_module), nullptr,
compile_options);
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> TfrtCpuClient::Compile(
const XlaComputation& computation, CompileOptions options) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::Compile (XlaComputation)");
auto input_options = options;
ExecutableBuildOptions& build_options = options.executable_build_options;
TF_RETURN_IF_ERROR(options.ApplyAllOptionOverrides());
int num_replicas;
int num_partitions;
std::shared_ptr<DeviceAssignment> device_assignment;
TF_RETURN_IF_ERROR(ParseDeviceAssignmentCompileOptions(
options.compile_portable_executable, &options.executable_build_options,
[this](int num_replicas, int num_partitions) {
return this->GetDefaultDeviceAssignment(num_replicas, num_partitions);
},
&num_replicas, &num_partitions, &device_assignment));
if (collectives_ == nullptr && device_assignment) {
for (int replica = 0; replica < device_assignment->replica_count();
++replica) {
for (int computation = 0;
computation < device_assignment->computation_count();
++computation) {
PjRtGlobalDeviceId id((*device_assignment)(replica, computation));
if (UnpackCpuProcessIndex(id) != process_index()) {
return InvalidArgument(
"Multiprocess computations aren't implemented on the CPU "
"backend.");
}
}
}
}
std::vector<const Shape*> argument_layout_pointers;
TF_RETURN_IF_ERROR(DetermineArgumentLayoutsFromCompileOptions(
computation, &LayoutUtil::GetWithDefaultLayout, options.argument_layouts,
&options.executable_build_options, &argument_layout_pointers));
std::vector<PjRtLoadedExecutable::LogicalDeviceIds>
addressable_device_logical_ids;
std::vector<PjRtDevice*> addressable_devices;
if (device_assignment != nullptr) {
addressable_device_logical_ids.reserve(num_replicas * num_partitions);
addressable_devices.reserve(num_replicas * num_partitions);
for (int replica = 0; replica < num_replicas; ++replica) {
for (int partition = 0; partition < num_partitions; ++partition) {
PjRtGlobalDeviceId device_id((*device_assignment)(replica, partition));
if (UnpackCpuProcessIndex(device_id) != process_index()) {
VLOG(3) << "Non-local device: " << device_id;
continue;
}
TF_ASSIGN_OR_RETURN(PjRtDevice * device, LookupDevice(device_id));
PjRtLoadedExecutable::LogicalDeviceIds logica_device_ids;
logica_device_ids.replica = replica;
logica_device_ids.partition = partition;
addressable_device_logical_ids.push_back(std::move(logica_device_ids));
addressable_devices.push_back(device);
}
}
if (addressable_devices.empty()) {
return InvalidArgument(
"Device assignment (%s) does not have any local devices.",
device_assignment->ToString());
}
if (build_options.device_ordinal() < 0) {
build_options.set_device_ordinal(
addressable_devices.front()->local_hardware_id().value());
}
}
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
computation.GetProgramShape());
ExecutionOptions execution_options =
CreateExecutionOptions(build_options, &program_shape);
xla::Compiler::CompileOptions compile_options{
build_options.device_allocator(), build_options.compile_thread_pool(),
build_options.layout_canonicalization_callback()};
if (!compile_options.thread_pool) {
compile_options.thread_pool = pjrt_client_thread_pool();
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<Executable> cpu_executable,
JitCompile(computation, argument_layout_pointers, build_options,
execution_options, compile_options,
eigen_intraop_device()->getPool()->NumThreads()));
auto cpu_executable_ptr =
tensorflow::down_cast<cpu::CpuExecutable*>(cpu_executable.get());
TF_ASSIGN_OR_RETURN(
const BufferAllocation::Slice result_slice,
cpu_executable_ptr->buffer_assignment().GetUniqueTopLevelOutputSlice());
TF_ASSIGN_OR_RETURN(
auto result_buffer_indices,
FindResultBufferAllocationIndex(cpu_executable_ptr->buffer_assignment(),
cpu_executable->module()));
auto executable = std::make_unique<TfrtCpuExecutable>(
num_replicas, num_partitions, std::move(device_assignment),
options.parameter_is_tupled_arguments, std::move(input_options),
std::move(cpu_executable), result_slice.index(),
std::move(result_buffer_indices),
std::move(addressable_device_logical_ids), std::move(addressable_devices),
this);
TF_RETURN_IF_ERROR(
executable->SetUpDonation(options.parameter_is_tupled_arguments));
return std::unique_ptr<PjRtLoadedExecutable>(std::move(executable));
}
absl::StatusOr<std::unique_ptr<PjRtLoadedExecutable>> TfrtCpuClient::Compile(
mlir::ModuleOp module, CompileOptions options) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::Compile (mlir::ModuleOp)");
XlaComputation xla_computation;
const ExecutableBuildOptions& exec_build_options =
options.executable_build_options;
TF_RETURN_IF_ERROR(MlirToXlaComputation(
module, xla_computation,
options.parameter_is_tupled_arguments,
false, exec_build_options.use_shardy_partitioner()));
return Compile(xla_computation, options);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::CreateViewOfDeviceBuffer(
void* device_ptr, const Shape& shape, PjRtDevice* device,
std::function<void()> on_delete_callback,
std::optional<std::intptr_t> stream) {
if (stream) {
return Unimplemented(
"TfrtCpuClient::CreateViewOfDeviceBuffer does not support `stream` "
"argument.");
}
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers;
size_t byte_size = ShapeUtil::ByteSizeOf(shape);
auto non_owning_buffer =
tsl::MakeAvailableAsyncValueRef<MaybeOwningCpuMemory>(device_ptr,
byte_size);
buffers.push_back(std::move(non_owning_buffer));
auto tracked_device_buffer = std::make_unique<TrackedTfrtCpuDeviceBuffer>(
false, false, std::move(buffers),
tsl::MakeAvailableAsyncValueRef<CpuEvent>(),
std::move(on_delete_callback));
return std::unique_ptr<PjRtBuffer>(std::make_unique<TfrtCpuBuffer>(
shape, std::move(tracked_device_buffer), this,
tensorflow::down_cast<TfrtCpuDevice*>(device),
*device->default_memory_space()));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::CreateErrorBuffer(
absl::Status error, const Shape& shape, PjRtDevice* device) {
if (device->client() != this) {
return absl::InvalidArgumentError("Device is not attached to this client");
}
return std::make_unique<TfrtCpuBuffer>(
shape,
std::make_unique<TrackedTfrtCpuDeviceBuffer>(
false, true,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>{},
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4>{
tsl::AsyncValueRef<CpuEvent>(
tsl::MakeErrorAsyncValueRef(std::move(error)))}),
this, tensorflow::down_cast<TfrtCpuDevice*>(device),
*device->default_memory_space());
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::CreateErrorBuffer(
absl::Status error, const Shape& shape, PjRtMemorySpace* memory) {
return CreateErrorBuffer(std::move(error), shape, memory->devices()[0]);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::CreateUninitializedBuffer(const Shape& shape,
PjRtDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::CreateUninitializedBuffer");
VLOG(1) << "TfrtCpuClient::CreateUninitializedBuffer: shape: "
<< shape.DebugString() << " device: " << device->DebugString();
return AllocateDestinationBuffer(
shape, {},
tensorflow::down_cast<TfrtCpuDevice*>(device), this);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
TfrtCpuClient::CreateBuffersForAsyncHostToDevice(absl::Span<const Shape> shapes,
PjRtDevice* device) {
auto* tfrt_device = tensorflow::down_cast<TfrtCpuDevice*>(device);
return TfrtCpuAsyncHostToDeviceTransferManager::Create(shapes, tfrt_device,
this);
}
absl::StatusOr<std::unique_ptr<PjRtClient::AsyncHostToDeviceTransferManager>>
TfrtCpuClient::CreateBuffersForAsyncHostToDevice(
absl::Span<const Shape> shapes, PjRtMemorySpace* memory_space) {
CHECK_EQ(memory_space->devices().size(), 1);
return CreateBuffersForAsyncHostToDevice(shapes, memory_space->devices()[0]);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::BufferFromHostBuffer");
Shape shape = ShapeUtil::MakeShape(type, dims);
VLOG(2) << "TfrtCpuClient::BufferFromHostBuffer: shape: " << shape.ToString()
<< " device: " << device->DebugString();
if (!device->IsAddressable()) {
return InvalidArgument("Cannot copy array to non-addressable device %s",
device->DebugString());
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
AbstractTfrtCpuBuffer::BufferFromHostBufferHelper(
data, type, dims, byte_strides, host_buffer_semantics,
std::move(on_done_with_host_buffer), shape, async_work_runner(),
&transpose_mu_, &transpose_cache_));
return std::unique_ptr<PjRtBuffer>(std::make_unique<TfrtCpuBuffer>(
shape, std::move(tracked_device_buffer), this,
tensorflow::down_cast<TfrtCpuDevice*>(device),
*device->default_memory_space()));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer, PjRtDevice* device,
const Layout* device_layout) {
if (device_layout != nullptr) {
return absl::UnimplementedError(absl::StrCat(
"BufferFromHostBuffer with an optional device layout is not "
"implemented on platform: ",
platform_name()));
}
return BufferFromHostBuffer(data, type, dims, byte_strides,
host_buffer_semantics,
std::move(on_done_with_host_buffer), device);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
absl::AnyInvocable<void() &&> on_done_with_host_buffer,
PjRtMemorySpace* memory_space, const Layout* device_layout) {
CHECK_EQ(memory_space->devices().size(), 1);
return BufferFromHostBuffer(data, type, dims, byte_strides,
host_buffer_semantics,
std::move(on_done_with_host_buffer),
memory_space->devices()[0], device_layout);
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::BufferFromHostLiteral(const LiteralSlice& literal,
PjRtDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuClient::BufferFromHostLiteral");
VLOG(1) << "TfrtCpuClient::BufferFromHostLiteral: shape: "
<< literal.shape().DebugString()
<< " device: " << device->DebugString();
const Shape& shape = literal.shape();
absl::InlinedVector<tsl::RCReference<tsl::AsyncValue>, 4> avs;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TfrtCpuBuffer> output_buffer,
AllocateDestinationBufferAndAvs(
shape, &avs, tensorflow::down_cast<TfrtCpuDevice*>(device), this));
output_buffer->CopyFromLiteral(literal, shape, &avs, async_work_runner());
return std::unique_ptr<PjRtBuffer>(std::move(output_buffer));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>>
TfrtCpuClient::BufferFromHostLiteral(const LiteralSlice& literal,
PjRtMemorySpace* memory_space) {
CHECK_EQ(memory_space->devices().size(), 1);
return BufferFromHostLiteral(literal, memory_space->devices()[0]);
}
TfrtCpuBuffer::TfrtCpuBuffer(
Shape on_device_shape,
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
TfrtCpuClient* client, TfrtCpuDevice* device, PjRtMemorySpace* memory_space)
: AbstractTfrtCpuBuffer(std::move(on_device_shape),
std::move(tracked_device_buffer)),
client_(client),
device_(device),
memory_space_(memory_space) {}
static std::vector<tsl::RCReference<tsl::AsyncValue>> CopyAsyncValues(
absl::Span<const tsl::RCReference<tsl::AsyncValue>> events) {
std::vector<tsl::RCReference<tsl::AsyncValue>> avs;
avs.reserve(events.size());
for (const auto& ev : events) {
avs.push_back(ev.CopyRef());
}
return avs;
}
PjRtFuture<> TfrtCpuBuffer::ToLiteral(MutableLiteralBase* literal) {
return ToLiteralHelper(literal, client()->async_work_runner());
}
PjRtFuture<> TfrtCpuBuffer::LazyToLiteral(
absl::AnyInvocable<absl::StatusOr<MutableLiteralBase*>() &&> generator) {
auto buffer = std::move(generator)();
if (!buffer.ok()) {
return PjRtFuture<>(buffer.status());
}
return ToLiteralHelper(buffer.value(), client()->async_work_runner());
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuBuffer::CopyToDevice(
PjRtDevice* dst_device) {
tsl::profiler::TraceMe traceme("TfrtCpuBuffer::CopyToDevice");
if (dst_device == device_) {
return InvalidArgument(
"CopyToDevice cannot accept the same source and destination devices");
}
if (dst_device->client() != client_) {
return CopyToDeviceAcrossClients(dst_device);
}
if (!dst_device->IsAddressable()) {
return InvalidArgument("Cannot copy array to non-addressable device %s",
dst_device->DebugString());
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tracked_device_buffer,
CopyToDeviceHelper(client()->async_work_runner()));
return std::unique_ptr<PjRtBuffer>(std::make_unique<TfrtCpuBuffer>(
on_device_shape_, std::move(tracked_device_buffer), client(),
tensorflow::down_cast<TfrtCpuDevice*>(dst_device),
*dst_device->default_memory_space()));
}
absl::StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuBuffer::CopyToMemorySpace(
PjRtMemorySpace* dst_memory_space) {
CHECK_EQ(dst_memory_space->devices().size(), 1);
return CopyToDevice(dst_memory_space->devices()[0]);
}
TfrtCpuExecutable::TfrtCpuExecutable(
int num_replicas, int num_partitions,
std::shared_ptr<DeviceAssignment> device_assignment,
bool parameter_is_tupled_arguments, CompileOptions compile_options,
std::unique_ptr<Executable> cpu_executable,
BufferAllocation::Index result_buffer_index,
absl::InlinedVector<BufferAllocation::Index, 4> result_buffer_indices,
std::vector<LogicalDeviceIds> addressable_device_logical_ids,
std::vector<PjRtDevice*> addressable_devices, TfrtCpuClient* client)
: client_(client),
num_replicas_(num_replicas),
num_partitions_(num_partitions),
device_assignment_(std::move(device_assignment)),
parameter_is_tupled_arguments_(parameter_is_tupled_arguments),
compile_options_(std::move(compile_options)),
cpu_executable_(std::move(cpu_executable)),
result_buffer_index_(result_buffer_index),
result_buffer_indices_(std::move(result_buffer_indices)),
addressable_device_logical_ids_(
std::move(addressable_device_logical_ids)),
addressable_devices_(std::move(addressable_devices)) {
auto hlo_cost_analysis =
std::make_unique<HloCostAnalysis>(cpu::CpuExecutable::ShapeSizeBytes);
CHECK_OK(cpu_executable_->module().entry_computation()->Accept(
hlo_cost_analysis.get()));
cheap_computation_ = hlo_cost_analysis->flop_count() < 1000;
const auto& computation_layout =
cpu_executable_->module().entry_computation_layout();
if (computation_layout.parameter_count() == 0) {
return;
}
if (computation_layout.parameter_count() > 1 ||
!computation_layout.parameter_shape(0).IsTuple()) {
input_buffer_sizes_in_bytes_.reserve(computation_layout.parameter_count());
for (int i = 0; i < computation_layout.parameter_count(); ++i) {
input_buffer_sizes_in_bytes_.push_back(
ShapeUtil::ByteSizeOf(computation_layout.parameter_shape(i)));
}
} else {
input_buffer_sizes_in_bytes_.reserve(
computation_layout.parameter_shape(0).tuple_shapes_size());
for (int i = 0;
i < computation_layout.parameter_shape(0).tuple_shapes_size(); ++i) {
input_buffer_sizes_in_bytes_.push_back(ShapeUtil::ByteSizeOf(
computation_layout.parameter_shape(0).tuple_shapes(i)));
}
}
}
void TfrtCpuExecutable::Delete() {}
bool TfrtCpuExecutable::IsDeleted() { return false; }
absl::StatusOr<std::optional<std::string>> TfrtCpuExecutable::Fingerprint()
const {
return std::optional<std::string>();
}
absl::Status TfrtCpuExecutable::SetUpDonation(bool tuple_inputs) {
TF_ASSIGN_OR_RETURN(parameters_that_must_be_donated_,
ComputeParametersThatMustBeDonated(
*cpu_executable_->shared_module(), tuple_inputs));
return absl::OkStatus();
}
namespace {
struct BufferInfo {
tsl::AsyncValueRef<MaybeOwningCpuMemory> buffer;
bool owns_buffer;
size_t buffer_size;
};
struct BufferAlloc {
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> buffers;
absl::InlinedVector<size_t, 4> allocation_sizes;
void Allocate() {
for (int i = 0; i < buffers.size(); ++i) {
auto memory = MaybeOwningCpuMemory::Allocate(allocation_sizes[i]);
if (!memory.ok()) {
buffers[i].SetError(memory.status());
return;
}
buffers[i].emplace(std::move(*memory));
ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(buffers[i]->data(),
allocation_sizes[i]);
}
}
};
struct BufferAllocAndCopy {
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> src_buffers;
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4> dst_buffers;
absl::InlinedVector<size_t, 4> allocation_sizes;
void AllocateAndCopy() {
for (int i = 0; i < src_buffers.size(); ++i) {
auto memory = MaybeOwningCpuMemory::Allocate(allocation_sizes[i]);
if (!memory.ok()) {
dst_buffers[i].SetError(memory.status());
return;
}
dst_buffers[i].emplace(std::move(*memory));
CHECK(src_buffers[i].IsConcrete());
std::memcpy(dst_buffers[i]->data(), src_buffers[i]->data(),
allocation_sizes[i]);
}
}
};
}
static absl::StatusOr<BufferInfo> MemoryForAllocation(
const BufferAllocation& allocation,
absl::Span<const cpu::CpuExecutable::ConstantAllocation> constants,
absl::Span<std::pair<bool, TrackedTfrtCpuDeviceBuffer*> const> arguments,
BufferAlloc& buffer_alloc, BufferAllocAndCopy& buffer_alloc_and_copy) {
BufferInfo buffer_info;
if (allocation.is_entry_computation_parameter()) {
auto [can_donate, arg] = arguments[allocation.parameter_number()];
tsl::AsyncValueRef<MaybeOwningCpuMemory> out =
arg->Buffer(allocation.param_shape_index());
CHECK_EQ(allocation.size(), arg->BufferSize(allocation.param_shape_index()))
<< "Size mismatch on param " << allocation.parameter_number()
<< " at shape index " << allocation.param_shape_index().ToString();
if ((!can_donate || !arg->owns_buffers()) && !allocation.is_readonly()) {
auto copy = tsl::MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
buffer_alloc_and_copy.src_buffers.push_back(std::move(out));
buffer_alloc_and_copy.dst_buffers.push_back(copy);
buffer_alloc_and_copy.allocation_sizes.push_back(allocation.size());
buffer_info.buffer = std::move(copy);
buffer_info.owns_buffer = true;
buffer_info.buffer_size = allocation.size();
return buffer_info;
}
buffer_info.buffer = std::move(out);
buffer_info.owns_buffer = arg->owns_buffers();
buffer_info.buffer_size = arg->BufferSize(allocation.param_shape_index());
return buffer_info;
} else if (allocation.is_constant() &&
allocation.index() < constants.size()) {
se::DeviceMemoryBase constant =
constants[allocation.index()].AsDeviceMemoryBase();
buffer_info.buffer = tsl::MakeAvailableAsyncValueRef<MaybeOwningCpuMemory>(
constant.opaque(), constant.size());
buffer_info.owns_buffer = false;
buffer_info.buffer_size = constant.size();
return buffer_info;
} else if (allocation.is_constant() || allocation.is_thread_local()) {
buffer_info.buffer =
tsl::MakeAvailableAsyncValueRef<MaybeOwningCpuMemory>();
buffer_info.owns_buffer = true;
buffer_info.buffer_size = 0;
return buffer_info;
}
auto out = tsl::MakeUnconstructedAsyncValueRef<MaybeOwningCpuMemory>();
buffer_alloc.buffers.push_back(out);
buffer_alloc.allocation_sizes.push_back(allocation.size());
buffer_info.buffer = std::move(out);
buffer_info.owns_buffer = true;
buffer_info.buffer_size = allocation.size();
return buffer_info;
}
static absl::StatusOr<std::vector<BufferInfo>> CreateBufferTable(
const BufferAssignment& assignment,
absl::Span<const cpu::CpuExecutable::ConstantAllocation> constants,
absl::Span<std::pair<bool, TrackedTfrtCpuDeviceBuffer*> const> arguments,
BufferAlloc& buffer_alloc, BufferAllocAndCopy& buffer_alloc_and_copy) {
std::vector<BufferInfo> buffer_table(assignment.Allocations().size());
for (BufferAllocation::Index i = 0; i < buffer_table.size(); ++i) {
const BufferAllocation& allocation = assignment.GetAllocation(i);
TF_ASSIGN_OR_RETURN(
buffer_table[i],
MemoryForAllocation(allocation, constants, arguments, buffer_alloc,
buffer_alloc_and_copy));
}
return std::move(buffer_table);
}
static absl::InlinedVector<BufferInfo, 4> CreateResultBufferInfo(
absl::Span<const BufferAllocation::Index> buffer_indices,
absl::Span<const BufferInfo> buffer_table) {
absl::InlinedVector<BufferInfo, 4> output_buffer_info;
output_buffer_info.reserve(buffer_indices.size());
for (int i = 0; i < buffer_indices.size(); ++i) {
output_buffer_info.push_back(buffer_table[buffer_indices[i]]);
}
return output_buffer_info;
}
absl::Status TfrtCpuExecutable::CheckBufferCompatibilities(
absl::Span<std::pair<bool, TrackedTfrtCpuDeviceBuffer*> const>
input_buffers) const {
if (input_buffers.size() != input_buffer_sizes_in_bytes_.size()) {
return InvalidArgument(
"Execution supplied %lld buffers but compiled program expected %lld "
"buffers",
input_buffers.size(), input_buffer_sizes_in_bytes_.size());
}
for (int i = 0; i < input_buffers.size(); ++i) {
const auto& buffer = input_buffers[i].second;
if (input_buffer_sizes_in_bytes_[i] != buffer->BufferSizes()[0]) {
return InvalidArgument(
"Executable expected parameter %d of size %lld but got buffer with "
"incompatible size %lld",
i, input_buffer_sizes_in_bytes_[i], buffer->BufferSizes()[0]);
}
}
return absl::OkStatus();
}
absl::StatusOr<PjRtLoadedExecutable::Result> TfrtCpuExecutable::ExecuteHelper(
absl::Span<PjRtBuffer* const> argument_handles, int replica, int partition,
const RunId& run_id, const ExecuteOptions& options,
tsl::AsyncValueRef<CpuEvent> last_collective_launch_event, bool fill_future,
TfrtCpuDevice* device) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::ExecuteHelper");
std::shared_ptr<DeviceAssignment> device_assignment;
if (device == nullptr) {
CHECK(device_assignment_ != nullptr);
const int64_t device_id = (*device_assignment_)(replica, partition);
PjRtGlobalDeviceId global_device_id(device_id);
TF_ASSIGN_OR_RETURN(PjRtDevice * pjrt_device,
client_->LookupDevice(global_device_id));
device = tensorflow::down_cast<TfrtCpuDevice*>(pjrt_device);
device_assignment = device_assignment_;
} else {
CHECK(device_assignment_ == nullptr);
CHECK_EQ(replica, 0);
CHECK_EQ(partition, 0);
CHECK(addressable_devices_.empty());
device_assignment = std::make_shared<DeviceAssignment>(1, 1);
(*device_assignment)(0, 0) = device->id();
}
CHECK_EQ(device->process_index(), client_->process_index());
if (options.arguments_are_tupled) {
if (!parameter_is_tupled_arguments_) {
return InvalidArgument(
"Arguments may only be supplied as a tuple when the executable was "
"compiled with a single tupled parameter");
}
if (argument_handles.size() != 1) {
return InvalidArgument(
"Option arguments_are_tupled was true but %d buffers were passed to "
"execution",
argument_handles.size());
}
}
auto execute_event = tsl::MakeConstructedAsyncValueRef<CpuEvent>();
MarkEventReadyOnExit ready_on_exit(execute_event);
absl::InlinedVector<TfrtCpuBuffer::DonationTransaction, 4>
donation_transactions;
absl::InlinedVector<std::pair<bool, TrackedTfrtCpuDeviceBuffer*>, 4>
tracked_buffers;
tracked_buffers.reserve(argument_handles.size());
std::vector<tsl::RCReference<tsl::AsyncValue>> input_deps;
input_deps.reserve(argument_handles.size());
auto donate_it = parameters_that_must_be_donated_.begin();
absl::flat_hash_map<const void*, std::pair<bool, int>> donation_clashes;
donation_clashes.reserve(argument_handles.size());
for (int i = 0; i < argument_handles.size(); ++i) {
PjRtBuffer* handle = argument_handles[i];
auto* tfrt_buffer = tensorflow::down_cast<TfrtCpuBuffer*>(handle);
if (tfrt_buffer->device() != device) {
return InvalidArgument(
"Buffer passed to Execute() as argument %d to replica %d is on "
"device %s, but replica is assigned to device %s.",
i, replica, tfrt_buffer->device()->DebugString(),
device->DebugString());
}
TrackedTfrtCpuDeviceBuffer* tracked_buffer;
auto get_buffer = [&](int i) -> absl::Status {
bool must_donate = donate_it != parameters_that_must_be_donated_.end() &&
*donate_it == i;
TF_RETURN_IF_ERROR(TestBufferDonationClashes(
tfrt_buffer, donation_clashes, must_donate, i, replica, partition));
if (must_donate) {
++donate_it;
absl::StatusOr<TfrtCpuBuffer::DonationTransaction>
donation_transaction = tfrt_buffer->AcquireDonation();
if (donation_transaction.ok()) {
for (const auto& ev :
donation_transaction->device_buffer()->UsageEvents()) {
if (!ev.IsAvailable()) {
input_deps.push_back(ev.CopyRCRef());
}
}
tracked_buffer = donation_transaction->device_buffer();
tracked_buffers.emplace_back(true, tracked_buffer);
donation_transactions.push_back(std::move(*donation_transaction));
return absl::OkStatus();
}
}
tracked_buffer = tfrt_buffer->AcquireUsage(execute_event);
if (!tracked_buffer)
return InvalidArgument(
"Invalid buffer passed: buffer has been deleted or donated.");
tracked_buffers.emplace_back(false, tracked_buffer);
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(get_buffer(i));
const auto& definition_event = tracked_buffer->definition_event();
if (!definition_event.IsAvailable()) {
input_deps.push_back(definition_event.CopyRCRef());
}
}
TF_RETURN_IF_ERROR(CheckBufferCompatibilities(tracked_buffers));
std::unique_ptr<TrackedTfrtCpuDeviceBuffer> tuplized_arg;
if (parameter_is_tupled_arguments_ && !options.arguments_are_tupled) {
bool owns_buffers = true;
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>
leaf_buffers;
absl::InlinedVector<size_t, 4> leaf_buffer_sizes;
leaf_buffers.reserve(tracked_buffers.size());
leaf_buffer_sizes.reserve(tracked_buffers.size());
for (const auto& tracked_buffer : tracked_buffers) {
owns_buffers = owns_buffers && tracked_buffer.second->owns_buffers();
auto span = tracked_buffer.second->Buffers();
leaf_buffers.insert(leaf_buffers.end(), span.begin(), span.end());
auto size_span = tracked_buffer.second->BufferSizes();
leaf_buffer_sizes.insert(leaf_buffer_sizes.end(), size_span.begin(),
size_span.end());
}
tracked_buffers.clear();
tuplized_arg = std::make_unique<TrackedTfrtCpuDeviceBuffer>(
true, owns_buffers, std::move(leaf_buffers),
std::move(leaf_buffer_sizes),
tsl::MakeConstructedAsyncValueRef<CpuEvent>());
tracked_buffers.emplace_back(false, tuplized_arg.get());
}
auto* cpu_executable =
tensorflow::down_cast<cpu::CpuExecutable*>(cpu_executable_.get());
BufferAlloc buffer_alloc;
BufferAllocAndCopy buffer_alloc_and_copy;
TF_ASSIGN_OR_RETURN(
std::vector<BufferInfo> buffer_table,
CreateBufferTable(cpu_executable->buffer_assignment(),
cpu_executable->constants(), tracked_buffers,
buffer_alloc, buffer_alloc_and_copy));
auto result_buffers_info =
CreateResultBufferInfo(result_buffer_indices_, buffer_table);
auto compute_reservation = std::make_unique<Semaphore::ScopedReservation>(
device->max_inflight_computations_semaphore().ScopedAcquire(1));
ExecutableRunOptions run_options;
run_options.set_run_id(run_id);
run_options.set_device_ordinal(device->id());
run_options.set_device_assignment(device_assignment.get());
run_options.set_intra_op_thread_pool(client_->eigen_intraop_device());
auto cpu_run_options = std::make_shared<cpu::CpuExecutableRunOptions>();
cpu_run_options->set_collectives(client_->collectives_.get());
run_options.set_cpu_executable_run_options(cpu_run_options.get());
bool is_a_collective_launch = !!last_collective_launch_event;
if (is_a_collective_launch) {
input_deps.push_back(std::move(last_collective_launch_event));
} else {
auto last_enqueue_event = client_->GetLastEnqueueEvent();
if (!last_enqueue_event.IsAvailable()) {
input_deps.push_back(std::move(last_enqueue_event));
}
}
if (options.context != nullptr) {
run_options.set_ffi_execution_context(&options.context->ffi_context());
}
bool execute_inline = cheap_computation_ || !client_->asynchronous_;
if (options.execution_mode == ExecuteOptions::ExecutionMode::kAsynchronous) {
execute_inline = false;
} else if (options.execution_mode ==
ExecuteOptions::ExecutionMode::kSynchronous) {
execute_inline = true;
}
if (input_deps.empty() && execute_inline) {
tsl::port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
XlaCustomCallStatus compute_function_status;
tsl::AsyncValueRef<cpu::Thunk::ExecuteEvent> thunks_execute_event;
buffer_alloc.Allocate();
buffer_alloc_and_copy.AllocateAndCopy();
std::vector<void*> buffer_pointers;
buffer_pointers.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
CHECK(buffer_info.buffer.IsAvailable());
if (buffer_info.buffer.IsError()) {
return buffer_info.buffer.GetError();
}
buffer_pointers.push_back(buffer_info.buffer->data());
}
void* result_buffer = buffer_pointers[result_buffer_index_];
if (cpu_executable->has_compute_function()) {
cpu_executable->compute_function()(result_buffer, &run_options, nullptr,
buffer_pointers.data(),
&compute_function_status, nullptr);
} else if (cpu_executable->has_thunks()) {
absl::InlinedVector<MaybeOwningDeviceMemory, 8> buffer_device_mem;
buffer_device_mem.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
buffer_device_mem.emplace_back(se::DeviceMemoryBase(
buffer_info.buffer->data(), buffer_info.buffer->size()));
}
cpu::BufferAllocations allocations(buffer_device_mem);
TF_ASSIGN_OR_RETURN(
cpu::Thunk::CollectiveExecuteParams collective_params,
cpu::Thunk::CollectiveExecuteParams::Create(&run_options));
TF_ASSIGN_OR_RETURN(
cpu::Thunk::CustomCallExecuteParams custom_call_execute_params,
cpu::Thunk::CustomCallExecuteParams::Create(&run_options));
cpu::Thunk::TaskRunner task_runner =
[&run_options](cpu::Thunk::Task task) {
run_options.intra_op_thread_pool()->getPool()->Schedule(
std::move(task));
};
cpu::Thunk::ExecuteParams execute_params = {
&cpu_executable->function_registry(),
&allocations,
cpu::runtime::GetXfeedManager(run_options.device_ordinal()),
run_options.intra_op_thread_pool(),
&task_runner,
&collective_params,
&custom_call_execute_params};
thunks_execute_event = cpu_executable->thunks().Execute(execute_params);
tsl::profiler::TraceMe trace(
"ThunkExecutor::Execute (wait for completion)");
tsl::BlockUntilReady(thunks_execute_event);
} else {
return Internal("CpuExecutable has no compute function or thunks.");
}
for (auto& donation_transaction : donation_transactions) {
std::move(donation_transaction).Commit();
}
if (cpu_executable->has_compute_function()) {
if (auto error_message =
xla::CustomCallStatusGetMessage(&compute_function_status)) {
return Internal("Generated function failed: %s", *error_message);
}
} else if (thunks_execute_event.IsError()) {
return thunks_execute_event.GetError();
}
} else {
if (is_a_collective_launch) {
client_->SetLastCollectiveLaunchEvent(execute_event.CopyRef());
} else {
client_->SetLastEnqueueEvent(execute_event.CopyRef());
}
std::vector<tsl::RCReference<tsl::AsyncValue>> input_deps_avs_copy =
CopyAsyncValues(input_deps);
EnqueueWorkWhenReady(
client()->pjrt_client_thread_pool(), input_deps,
[cpu_executable, buffer_alloc = std::move(buffer_alloc),
buffer_alloc_and_copy = std::move(buffer_alloc_and_copy),
result_buffer_index = result_buffer_index_,
buffer_table = std::move(buffer_table),
run_options = std::move(run_options),
cpu_executable_copy = cpu_executable_,
device_assignment = std::move(device_assignment),
cpu_run_options = std::move(cpu_run_options),
compute_reservation = std::move(compute_reservation),
tuplized_arg = std::move(tuplized_arg),
donation_transactions = std::move(donation_transactions),
execute_event = std::move(ready_on_exit).Release(),
input_deps_avs = std::move(input_deps_avs_copy),
eigen_device = client()->eigen_intraop_device()]() mutable {
buffer_alloc.Allocate();
buffer_alloc_and_copy.AllocateAndCopy();
for (const auto& av : input_deps_avs) {
if (auto* error = av->GetErrorIfPresent()) {
execute_event.SetError(absl::StrCat(
"Error dispatching computation: %s", error->message()));
return;
}
}
tsl::port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
std::vector<void*> buffer_pointers;
buffer_pointers.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
CHECK(buffer_info.buffer.IsAvailable());
if (buffer_info.buffer.IsError()) {
execute_event.SetError(
absl::StrCat("Error preparing computation: %s",
buffer_info.buffer.GetError().message()));
return;
}
buffer_pointers.push_back(buffer_info.buffer->data());
}
void* result_buffer = buffer_pointers[result_buffer_index];
absl::Status status;
if (cpu_executable->has_compute_function()) {
XlaCustomCallStatus compute_function_status;
cpu_executable->compute_function()(
result_buffer, &run_options, nullptr, buffer_pointers.data(),
&compute_function_status, nullptr);
if (auto error_message =
xla::CustomCallStatusGetMessage(&compute_function_status)) {
status =
Internal("Generated function failed: %s", *error_message);
}
} else if (cpu_executable->has_thunks()) {
absl::InlinedVector<MaybeOwningDeviceMemory, 8> buffer_device_mem;
buffer_device_mem.reserve(buffer_table.size());
for (const auto& buffer_info : buffer_table) {
buffer_device_mem.emplace_back(se::DeviceMemoryBase(
buffer_info.buffer->data(), buffer_info.buffer->size()));
}
cpu::BufferAllocations allocations(buffer_device_mem);
absl::StatusOr<cpu::Thunk::CollectiveExecuteParams>
collective_params =
cpu::Thunk::CollectiveExecuteParams::Create(&run_options);
absl::StatusOr<cpu::Thunk::CustomCallExecuteParams>
custom_call_params =
cpu::Thunk::CustomCallExecuteParams::Create(&run_options);
cpu::Thunk::TaskRunner task_runner =
[&run_options](cpu::Thunk::Task task) {
run_options.intra_op_thread_pool()->getPool()->Schedule(
std::move(task));
};
if (collective_params.ok()) {
cpu::Thunk::ExecuteParams execute_params = {
&cpu_executable->function_registry(),
&allocations,
cpu::runtime::GetXfeedManager(run_options.device_ordinal()),
run_options.intra_op_thread_pool(),
&task_runner,
&*collective_params,
&*custom_call_params};
auto thunks_execute_event =
cpu_executable->thunks().Execute(execute_params);
tsl::profiler::TraceMe trace(
"ThunkExecutor::Execute (wait for completion)");
tsl::BlockUntilReady(thunks_execute_event);
status = thunks_execute_event.IsError()
? thunks_execute_event.GetError()
: absl::OkStatus();
} else {
status = collective_params.status();
}
} else {
status =
Internal("CpuExecutable has no compute function or thunks.");
}
for (auto& donation_transaction : donation_transactions) {
std::move(donation_transaction).Commit();
}
if (!status.ok()) {
execute_event.SetError(std::move(status));
return;
}
execute_event.SetStateConcrete();
});
}
const Shape& result_shape = cpu_executable_->result_shape();
std::vector<std::unique_ptr<PjRtBuffer>> res;
if (options.untuple_result && result_shape.IsTuple()) {
res.reserve(result_buffers_info.size());
for (int i = 0; i < result_buffers_info.size(); ++i) {
absl::InlinedVector<tsl::AsyncValueRef<CpuEvent>, 4> definition_events;
definition_events.push_back(execute_event.CopyRef());
auto leaf_tracked_device_buffer =
std::make_unique<TrackedTfrtCpuDeviceBuffer>(
false, result_buffers_info[i].owns_buffer,
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>{
std::move(result_buffers_info[i].buffer)},
absl::InlinedVector<size_t, 4>{
result_buffers_info[i].buffer_size},
std::move(definition_events));
auto leaf_buffer = std::make_unique<TfrtCpuBuffer>(
result_shape.tuple_shapes(i), std::move(leaf_tracked_device_buffer),
client_, device, *device->default_memory_space());
res.push_back(std::move(leaf_buffer));
}
} else {
bool owns_buffers = true;
absl::InlinedVector<tsl::AsyncValueRef<MaybeOwningCpuMemory>, 4>
sub_buffers;
absl::InlinedVector<size_t, 4> sub_buffer_sizes;
sub_buffers.reserve(result_buffers_info.size());
sub_buffer_sizes.reserve(result_buffers_info.size());
for (int i = 0; i < result_buffers_info.size(); ++i) {
owns_buffers = owns_buffers && result_buffers_info[i].owns_buffer;
sub_buffers.push_back(std::move(result_buffers_info[i].buffer));
sub_buffer_sizes.push_back(result_buffers_info[i].buffer_size);
}
auto tracked_device_buffer = std::make_unique<TrackedTfrtCpuDeviceBuffer>(
result_shape.IsTuple(), owns_buffers,
std::move(sub_buffers), std::move(sub_buffer_sizes),
execute_event);
auto tfrt_output_buffer = std::make_unique<TfrtCpuBuffer>(
result_shape, std::move(tracked_device_buffer), client_, device,
*device->default_memory_space());
res.push_back(std::move(tfrt_output_buffer));
}
std::optional<PjRtFuture<>> future;
if (fill_future) {
PjRtFuture<>::Promise promise = PjRtFuture<>::CreatePromise();
execute_event.AndThen([promise, event = execute_event.CopyRef()]() mutable {
if (auto* error = event.GetErrorIfPresent()) {
promise.Set(Internal("Compute error: %s", error->message()));
} else {
promise.Set();
}
});
future = PjRtFuture<>(std::move(promise));
}
return Result({std::move(future), std::move(res)});
}
static void MaybeDumpHloSnapshot(
const HloModule& module, RunId run_id,
const std::vector<PjRtBuffer*>& arguments,
const std::vector<std::unique_ptr<PjRtBuffer>>& results) {
if (!DumpingEnabledForHloModule(module)) {
return;
}
if (!module.config().debug_options().xla_dump_hlo_snapshots()) {
return;
}
xla::HloSnapshot hlo_snapshot;
*hlo_snapshot.mutable_hlo()->mutable_hlo_module() = module.ToProto();
for (auto* argument : arguments) {
*hlo_snapshot.add_arguments() = (*argument->ToLiteralSync())->ToProto();
}
if (results.size() == 1) {
*hlo_snapshot.mutable_result() = (*results[0]->ToLiteralSync())->ToProto();
} else {
std::vector<Literal> result_literals;
result_literals.reserve(results.size());
for (auto& result : results) {
result_literals.push_back(std::move(**result->ToLiteralSync()));
}
*hlo_snapshot.mutable_result() =
LiteralUtil::MakeTupleOwned(std::move(result_literals)).ToProto();
}
DumpToFileInDir(module, "", absl::StrCat("snapshot.", run_id.ToInt(), ".pb"),
hlo_snapshot.SerializeAsString());
}
absl::StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
TfrtCpuExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
std::optional<std::vector<PjRtFuture<>>>& returned_futures) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::Execute");
if (device_assignment_ == nullptr) {
return InvalidArgument("Execute expects a non-null device_assignment");
}
RunId run_id;
tsl::profiler::TraceMeProducer activity("TfrtCpuExecutable::Execute",
tsl::profiler::ContextType::kPjRt,
run_id.ToInt());
const int num_addressable_devices = addressable_devices_.size();
if (argument_handles.size() != num_addressable_devices) {
return InvalidArgument(
"Attempted to execute with %d argument lists when local device "
"count is %d (total replica count: %d, partition count: %d)",
argument_handles.size(), num_addressable_devices, num_replicas(),
num_partitions());
}
VLOG(1) << "Executing computation " << name()
<< "; num_replicas=" << num_replicas()
<< " num_partitions=" << num_partitions()
<< " num_addressable_devices=" << num_addressable_devices;
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> wrapped_results(
num_addressable_devices);
if (returned_futures.has_value()) {
returned_futures->resize(num_addressable_devices);
}
if (num_addressable_devices == 1) {
const int replica = addressable_device_logical_ids_[0].replica;
const int partition = addressable_device_logical_ids_[0].partition;
MaybeDumpHloSnapshot(cpu_executable_->module(), run_id, argument_handles[0],
{});
auto statusor = ExecuteHelper(
argument_handles[0], replica, partition, run_id, options,
tsl::AsyncValueRef<CpuEvent>(),
returned_futures.has_value());
if (!statusor.ok()) {
return std::move(statusor).status();
}
wrapped_results[0] = std::move(statusor->buffers);
if (returned_futures.has_value()) {
(*returned_futures)[0] = std::move(*statusor->future);
}
MaybeDumpHloSnapshot(cpu_executable_->module(), run_id, argument_handles[0],
wrapped_results[0]);
} else {
tsl::AsyncValueRef<CpuEvent> last_collective_launch_event =
client_->GetLastCollectiveLaunchEvent();
absl::Mutex mu;
int running = num_addressable_devices;
int failed = 0;
absl::Status first_failure_status;
for (int i = 0; i < num_addressable_devices; ++i) {
const int replica = addressable_device_logical_ids_[i].replica;
const int partition = addressable_device_logical_ids_[i].partition;
auto* thread_pool = client()->pjrt_client_thread_pool();
EnqueueWork(thread_pool, [&, replica, partition, i] {
auto statusor =
ExecuteHelper(argument_handles[i], replica, partition, run_id,
options, last_collective_launch_event.CopyRef(),
returned_futures.has_value());
if (statusor.ok()) {
wrapped_results[i] = std::move(statusor->buffers);
if (returned_futures.has_value()) {
(*returned_futures)[i] = std::move(*statusor->future);
}
}
absl::MutexLock lock(&mu);
--running;
if (!statusor.ok()) {
if (failed == 0) {
first_failure_status = AppendStatus(
std::move(statusor).status(),
absl::StrFormat(
"while running replica %d and partition %d of a "
"replicated computation (other "
"replicas may have failed as well).",
replica, partition));
}
++failed;
}
});
}
{
auto done_running = [&]() {
mu.AssertHeld();
return running == 0;
};
absl::MutexLock lock(&mu);
mu.Await(absl::Condition(&done_running));
}
if (!first_failure_status.ok()) return first_failure_status;
}
VLOG(1) << "Replicated execution complete.";
return wrapped_results;
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfrtCpuExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::ExecuteSharded");
if (device_assignment_ == nullptr) {
return InvalidArgument("ExecuteShard expects a non-null device_assignment");
}
for (int i = 0; i < addressable_devices_.size(); ++i) {
if (addressable_devices_[i] == device) {
VLOG(1) << "ExecuteShard executes computation " << name()
<< " on assigned replica/partition on device "
<< device->DebugString();
TF_ASSIGN_OR_RETURN(
auto result,
ExecuteHelper(
argument_handles, addressable_device_logical_ids_[i].replica,
addressable_device_logical_ids_[i].partition, RunId(), options,
tsl::AsyncValueRef<CpuEvent>(), fill_future));
returned_future = std::move(result.future);
return std::move(result.buffers);
}
}
return InvalidArgument(
"ExecuteShard attempted to execute on device id %d which is not "
"addressable by this client",
device->id());
}
absl::StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>>
TfrtCpuExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options, std::optional<PjRtFuture<>>& returned_future,
bool fill_future) {
tsl::profiler::TraceMe traceme("TfrtCpuExecutable::ExecutePortable");
if (device_assignment_ != nullptr) {
return InvalidArgument("ExecutePortable gets a non-portable executable");
}
if (num_replicas() != 1 || num_partitions() != 1) {
return InvalidArgument(
"ExecutePortable expects a single-core executable but gets "
"one with %d replica %d partition",
num_replicas(), num_partitions());
}
if (device == nullptr) {
return InvalidArgument("ExecutePortable expects a device to be specified");
}
VLOG(1) << "ExecutePortable executes single-core portable executable "
<< name();
TF_ASSIGN_OR_RETURN(
auto result,
ExecuteHelper(
argument_handles,
0,
0, RunId(), options,
tsl::AsyncValueRef<CpuEvent>(),
fill_future, tensorflow::down_cast<TfrtCpuDevice*>(device)));
returned_future = std::move(result.future);
return std::move(result.buffers);
}
} | #include "xla/pjrt/cpu/cpu_client.h"
#include "xla/service/hlo.pb.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/host_memory_spaces.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::IsFalse;
using ::tsl::testing::IsOkAndHolds;
static absl::Status TestError(ffi::AnyBuffer, ffi::Result<ffi::AnyBuffer>,
ffi::Result<ffi::AnyBuffer>) {
return absl::InternalError("test error.");
}
XLA_FFI_DEFINE_HANDLER(kTestError, TestError,
ffi::Ffi::Bind()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$TestError", "Host",
kTestError);
TEST(TfrtCpuClientTest, MemorySpace) {
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(CpuClientOptions()));
ASSERT_GE(client->devices().size(), 1);
ASSERT_EQ(client->memory_spaces().size(),
client->addressable_devices().size());
for (auto* device : client->devices()) {
TF_ASSERT_OK_AND_ASSIGN(auto* memory_space, device->default_memory_space());
EXPECT_THAT(device->memory_spaces(), ElementsAre(memory_space));
EXPECT_EQ(memory_space->kind(), UnpinnedHostMemorySpace::kKind);
EXPECT_EQ(memory_space->kind_id(), UnpinnedHostMemorySpace::kKindId);
EXPECT_THAT(device->memory_space_by_kind(UnpinnedHostMemorySpace::kKind),
IsOkAndHolds(memory_space));
}
}
TEST(TfrtCpuClientTest, DonationWithExecutionError) {
static constexpr char kProgram[] =
R"(
HloModule DonationWithExecutionError,
input_output_alias={ {}: (0, {}, must-alias) }
ENTRY DonationWithExecutionError() -> f32[2, 2] {
%input = f32[2, 2] parameter(0)
%custom-call = (f32[2, 2], u8[0]) custom-call(%input),
custom_call_target="__xla_test$$TestError",
api_version=API_VERSION_TYPED_FFI,
output_to_operand_aliasing={{0}: (0, {})}
ROOT %result = f32[2, 2] get-tuple-element(%custom-call), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(CpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, {}));
std::vector<float> data(4, 0);
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer,
client->BufferFromHostBuffer(
data.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute({{buffer.get()}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(), HasSubstr("test error."));
result = pjrt_executable->Execute({{buffer.get()}},
{});
ASSERT_FALSE(result.ok());
EXPECT_THAT(result.status().message(),
HasSubstr("buffer has been deleted or donated."));
}
TEST(TfrtCpuClientTest, HloSnapshot) {
static constexpr char kProgram[] = R"(
HloModule add
ENTRY add {
x = f32[3,2] parameter(0)
y = f32[3,2] parameter(1)
ROOT add = f32[3,2] add(x, y)
})";
CpuClientOptions cpu_options;
cpu_options.cpu_device_count = 1;
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(cpu_options));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
std::string dir = tsl::testing::TmpDir();
xla::CompileOptions options;
auto* debug_opts = options.executable_build_options.mutable_debug_options();
debug_opts->set_xla_dump_to(dir);
debug_opts->set_xla_dump_hlo_snapshots(true);
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable,
client->Compile(xla_computation, options));
std::vector<float> data1{1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
std::vector<float> data2{10.0, 20.0, 30.0, 40.0, 50.0, 60.0};
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
TF_ASSERT_OK_AND_ASSIGN(
auto buffer1,
client->BufferFromHostBuffer(
data1.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
TF_ASSERT_OK_AND_ASSIGN(
auto buffer2,
client->BufferFromHostBuffer(
data2.data(), shape.element_type(), shape.dimensions(),
std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr,
client->addressable_devices()[0]));
auto result = pjrt_executable->Execute(
{{buffer1.get(), buffer2.get()}},
{});
ASSERT_TRUE(result.ok());
tsl::FileSystem* fs;
ASSERT_TRUE(tsl::Env::Default()->GetFileSystemForFile(dir, &fs).ok());
std::vector<std::string> paths;
ASSERT_TRUE(fs->GetMatchingPaths(dir + "false,
[]() {}));
TF_ASSERT_OK(transfer_manager->TransferRawDataToSubBuffer(
0, raw_data_view.data(), raw_data_size - 1, 1, true,
[]() {}));
TF_ASSERT_OK_AND_ASSIGN(auto literal, buffer->ToLiteralSync());
ASSERT_EQ(literal->element_count(), 3 * 2);
EXPECT_THAT(literal->data<uint32_t>(), Each(0x42424242));
}
struct MemsetValue {
explicit MemsetValue(float value) : value(value) {}
float value;
};
static absl::Status MemsetFromValue(
ffi::Result<ffi::BufferR1<PrimitiveType::F32>> result,
MemsetValue* memset_value) {
for (size_t i = 0; i < result->element_count(); ++i) {
result->typed_data()[i] = memset_value->value;
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kMemsetFromValue, MemsetFromValue,
ffi::Ffi::Bind()
.Ret<ffi::BufferR1<PrimitiveType::F32>>()
.Ctx<ffi::UserData<MemsetValue>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "MemsetFromValue", "HOST",
kMemsetFromValue);
TEST(TfrtCpuClientTest, ForwardUserDataToFfiHandler) {
static constexpr char const* kProgram = R"(
HloModule ffi_handler
ENTRY main {
ROOT %custom-call = f32[4] custom-call(),
custom_call_target="MemsetFromValue",
api_version=API_VERSION_TYPED_FFI
})";
TF_ASSERT_OK_AND_ASSIGN(auto client, GetTfrtCpuClient(CpuClientOptions()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module,
ParseAndReturnUnverifiedModule(kProgram, {}));
XlaComputation xla_computation(hlo_module->ToProto());
TF_ASSERT_OK_AND_ASSIGN(auto executable,
client->Compile(xla_computation, {}));
ExecuteContext context;
TF_ASSERT_OK(context.ffi_context().Emplace<MemsetValue>(42.0f));
ExecuteOptions opts;
opts.context = &context;
auto result = executable->Execute({{}}, opts);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::Literal> result_literal,
result->at(0).at(0)->ToLiteralSync());
EXPECT_TRUE(LiteralTestUtil::Equal(
LiteralUtil::CreateR1<float>({42.0f, 42.0f, 42.0f, 42.0f}),
*result_literal));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/cpu/cpu_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99489848-76b4-4607-b069-058084f822dd | cpp | tensorflow/tensorflow | bhwc_to_phwc4 | tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc | tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc | #include "tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
#include "tensorflow/lite/delegates/gpu/gl/converters/util.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_program.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
absl::Status ConverterBhwcToPhwc4::Create(ConverterBhwcToPhwc4* converter) {
uint3 workgroup_size = uint3(4, 4, 4);
std::string shader_source = GetShaderHeader(workgroup_size) + R"(
layout(std430) buffer;
precision highp float;
layout(binding = 0) readonly buffer B0 {
float elements[];
} input_data;
layout(binding = 1) writeonly buffer B1 {
vec4 elements[];
} output_data;
uniform ivec4 sizes_;
void main() {
ivec3 gid = ivec3(gl_GlobalInvocationID.xyz);
if (gid.x >= sizes_.x || gid.y >= sizes_.y || gid.z >= sizes_.z) {
return;
}
vec4 v = vec4(0);
int dst_channel = gid.z * 4;
int index = (gid.y * sizes_.x + gid.x) * sizes_.w + dst_channel;
for (int i = 0; i < 4; ++i, ++index, ++dst_channel) {
if (dst_channel >= sizes_.w) break;
v[i] = input_data.elements[index];
}
output_data.elements[(gid.z * sizes_.y + gid.y) * sizes_.x + gid.x] = v;
})";
GlShader shader;
RETURN_IF_ERROR(
GlShader::CompileShader(GL_COMPUTE_SHADER, shader_source, &shader));
GlProgram program;
RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
*converter = ConverterBhwcToPhwc4(std::move(program), workgroup_size);
return absl::OkStatus();
}
absl::Status ConverterBhwcToPhwc4::Convert(const BHWC& shape,
const GlBuffer& source,
CommandQueue* command_queue,
GlBuffer* destination) {
if (source.bytes_size() < BytesForBHWC(shape)) {
return absl::InvalidArgumentError(
"BhwcToPhwc4: Input data size does not match expected size.");
}
if (destination->bytes_size() < BytesForPHWC4(shape)) {
return absl::InvalidArgumentError(
"BhwcToPhwc4: output data size does not match expected size.");
}
if (shape.b != 1) {
return absl::UnimplementedError(
"BhwcToPhwc4: Batch size is not equal to 1.");
}
uint3 workload = uint3(shape.w, shape.h, DivideRoundUp(shape.c, 4));
uint3 num_workgroups = DivideRoundUp(workload, workgroup_size_);
RETURN_IF_ERROR(program_.SetParameter(
{"sizes_",
int4(static_cast<int32_t>(workload.x), static_cast<int32_t>(workload.y),
static_cast<int32_t>(workload.z), static_cast<int32_t>(shape.c))}));
RETURN_IF_ERROR(source.BindToIndex(0));
RETURN_IF_ERROR(destination->BindToIndex(1));
if (command_queue) {
return command_queue->Dispatch(program_, num_workgroups);
}
return program_.Dispatch(num_workgroups);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h"
#include <algorithm>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
inline std::vector<float> GenerateFloats(float multiplier, int size) {
std::vector<float> v(size);
for (int i = 0; i < size; ++i) {
v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1);
}
return v;
}
absl::Status RunTest(const BHWC& shape) {
std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct());
std::vector<float> output(GetElementsSizeForPHWC4(shape), 0);
RETURN_IF_ERROR(
ConvertToPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape,
absl::MakeSpan(output.data(), output.size())));
std::unique_ptr<EglEnvironment> env;
RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
GlBuffer input_buffer;
RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer(
absl::MakeConstSpan(input.data(), input.size()), &input_buffer));
GlBuffer output_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>(
GetElementsSizeForPHWC4(shape), &output_buffer));
ConverterBhwcToPhwc4 converter;
RETURN_IF_ERROR(ConverterBhwcToPhwc4::Create(&converter));
RETURN_IF_ERROR(
converter.Convert(shape, input_buffer, nullptr, &output_buffer));
std::vector<float> converted_output(output.size(), 0);
RETURN_IF_ERROR(output_buffer.Read(
absl::MakeSpan(converted_output.data(), converted_output.size())));
if (output != converted_output) {
return absl::InternalError("Outputs don't match");
}
return absl::OkStatus();
}
TEST(HwcToPhwc4, Smoke) {
for (int32_t h : {1, 2, 3, 7, 20}) {
for (int32_t w : {1, 2, 4, 5, 11}) {
for (int32_t c : {1, 2, 4, 5, 8, 9}) {
BHWC shape(1, h, w, c);
EXPECT_TRUE(RunTest(shape).ok())
<< shape.h << " " << shape.w << " " << shape.c;
}
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0c69f73-dc57-4fc8-8147-50f1158eaf32 | cpp | google/langsvr | session | src/session.cc | src/session_test.cc | #include "langsvr/session.h"
#include <string>
#include "langsvr/json/builder.h"
namespace langsvr {
Result<SuccessType> Session::Receive(std::string_view json) {
auto json_builder = json::Builder::Create();
auto object = json_builder->Parse(json);
if (object != Success) {
return object.Failure();
}
auto method = object.Get()->Get<json::String>("method");
if (method != Success) {
auto id = object.Get()->Get<json::I64>("id");
if (id != Success) {
return id.Failure();
}
auto handler_it = response_handlers_.find(id.Get());
if (handler_it == response_handlers_.end()) {
return Failure{"received response for unknown request with ID " +
std::to_string(id.Get())};
}
auto handler = std::move(handler_it->second);
response_handlers_.erase(handler_it);
return handler(*object.Get());
}
if (object.Get()->Has("id")) {
auto id = object.Get()->Get<json::I64>("id");
if (id != Success) {
return id.Failure();
}
auto it = request_handlers_.find(method.Get());
if (it == request_handlers_.end()) {
return Failure{"no handler registered for request method '" + method.Get() + "'"};
}
auto& request_handler = it->second;
auto result = request_handler.function(*object.Get(), *json_builder.get());
if (result != Success) {
return result.Failure();
}
std::array response_members{
json::Builder::Member{"id", json_builder->I64(id.Get())},
result.Get(),
};
auto* response = json_builder->Object(response_members);
if (auto res = SendJson(response->Json()); res != Success) {
return res.Failure();
}
if (request_handler.post_send) {
request_handler.post_send();
}
} else {
auto it = notification_handlers_.find(method.Get());
if (it == notification_handlers_.end()) {
return Failure{"no handler registered for request method '" + method.Get() + "'"};
}
auto& notification_handler = it->second;
return notification_handler.function(*object.Get());
}
return Success;
}
Result<SuccessType> Session::SendJson(std::string_view msg) {
if (!sender_) [[unlikely]] {
return Failure{"no sender set"};
}
return sender_(msg);
}
} | #include "langsvr/lsp/lsp.h"
#include "langsvr/session.h"
#include <gtest/gtest.h>
#include "langsvr/json/builder.h"
#include "langsvr/lsp/decode.h"
#include "gmock/gmock.h"
#include "langsvr/result.h"
namespace langsvr {
namespace {
Result<lsp::InitializeRequest> GetInitializeRequest() {
static constexpr std::string_view kJSON =
R"({"processId":71875,"clientInfo":{"name":"My Awesome Editor","version":"1.2.3"},"locale":"en-gb","rootPath":"/home/bob/src/langsvr","rootUri":"file:
auto b = json::Builder::Create();
auto msg = b->Parse(kJSON);
if (msg != Success) {
return msg.Failure();
}
lsp::InitializeRequest request;
if (auto res = lsp::Decode(*msg.Get(), request); res != Success) {
return res.Failure();
}
return request;
}
TEST(Session, InitializeRequest_ResultOrFailure) {
auto request = GetInitializeRequest();
ASSERT_EQ(request, Success);
Session server_session;
Session client_session;
client_session.SetSender([&](std::string_view msg) { return server_session.Receive(msg); });
server_session.SetSender([&](std::string_view msg) { return client_session.Receive(msg); });
bool handler_called = false;
server_session.Register([&](const lsp::InitializeRequest& init)
-> Result<lsp::InitializeResult, lsp::InitializeError> {
handler_called = true;
EXPECT_EQ(request, init);
lsp::InitializeResult res;
res.capabilities.hover_provider = true;
return res;
});
auto response_future = client_session.Send(request.Get());
ASSERT_EQ(response_future, Success);
EXPECT_TRUE(handler_called);
auto response = response_future.Get().get();
ASSERT_EQ(response, Success);
lsp::InitializeResult expected;
expected.capabilities.hover_provider = true;
EXPECT_EQ(response.Get(), expected);
}
TEST(Session, InitializeRequest_ResultOnly) {
auto request = GetInitializeRequest();
ASSERT_EQ(request, Success);
Session server_session;
Session client_session;
client_session.SetSender([&](std::string_view msg) { return server_session.Receive(msg); });
server_session.SetSender([&](std::string_view msg) { return client_session.Receive(msg); });
bool handler_called = false;
server_session.Register([&](const lsp::InitializeRequest& init) {
handler_called = true;
EXPECT_EQ(request, init);
lsp::InitializeResult res;
res.capabilities.hover_provider = true;
return res;
});
auto response_future = client_session.Send(request.Get());
ASSERT_EQ(response_future, Success);
EXPECT_TRUE(handler_called);
auto response = response_future.Get().get();
ASSERT_EQ(response, Success);
lsp::InitializeResult expected;
expected.capabilities.hover_provider = true;
EXPECT_EQ(response.Get(), expected);
}
TEST(Session, InitializeRequest_FailureOnly) {
auto request = GetInitializeRequest();
ASSERT_EQ(request, Success);
Session server_session;
Session client_session;
client_session.SetSender([&](std::string_view msg) { return server_session.Receive(msg); });
server_session.SetSender([&](std::string_view msg) { return client_session.Receive(msg); });
bool handler_called = false;
server_session.Register([&](const lsp::InitializeRequest& init) {
handler_called = true;
EXPECT_EQ(request, init);
lsp::InitializeError err;
err.retry = true;
return err;
});
auto response_future = client_session.Send(request.Get());
ASSERT_EQ(response_future, Success);
EXPECT_TRUE(handler_called);
auto response = response_future.Get().get();
ASSERT_NE(response, Success);
lsp::InitializeError expected;
expected.retry = true;
EXPECT_EQ(response.Failure(), expected);
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/session.cc | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/session_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
52ab6f11-3534-46d9-8fc7-1fd3a10cf0e4 | cpp | tensorflow/tensorflow | fused_batch_norm_op | tensorflow/core/kernels/fused_batch_norm_op.cc | tensorflow/core/kernels/fused_batch_norm_op_test.cc | #include <array>
#include <atomic>
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
#include "tensorflow/core/kernels/conv_2d.h"
#include "tensorflow/core/kernels/gpu_utils.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/core/util/stream_executor_util.h"
#endif
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/cast_op.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/fused_batch_norm_op.h"
#include "tensorflow/core/kernels/redux_functor.h"
#include "tensorflow/core/kernels/transpose_functor.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/tensor_format.h"
namespace tensorflow {
using CPUDevice = Eigen::ThreadPoolDevice;
using GPUDevice = Eigen::GpuDevice;
namespace functor {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
using se::DeviceMemory;
using se::ScratchAllocator;
using se::Stream;
using tsl::StatusOr;
#endif
string ToString(FusedBatchNormActivationMode activation_mode) {
switch (activation_mode) {
case FusedBatchNormActivationMode::kIdentity:
return "Identity";
case FusedBatchNormActivationMode::kRelu:
return "Relu";
}
}
Status ParseActivationMode(OpKernelConstruction* context,
FusedBatchNormActivationMode* activation_mode) {
string activation_mode_str;
TF_RETURN_IF_ERROR(context->GetAttr("activation_mode", &activation_mode_str));
if (activation_mode_str == "Identity") {
*activation_mode = FusedBatchNormActivationMode::kIdentity;
return absl::OkStatus();
}
if (activation_mode_str == "Relu") {
*activation_mode = FusedBatchNormActivationMode::kRelu;
return absl::OkStatus();
}
return errors::InvalidArgument("Unsupported activation mode: ",
activation_mode_str);
}
template <typename Device, typename T, typename U, bool is_training>
struct FusedBatchNorm;
template <typename Device, typename T, typename U>
struct FusedBatchNormGrad;
template <typename T, typename U>
struct FusedBatchNorm<CPUDevice, T, U, true> {
void operator()(OpKernelContext* context, const Tensor& x_input,
const Tensor& scale_input, const Tensor& offset_input,
const Tensor& running_mean_input,
const Tensor& running_variance_input,
const Tensor* side_input, U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode,
Tensor* y_output, Tensor* running_mean_output,
Tensor* running_var_output, Tensor* saved_batch_mean_output,
Tensor* saved_batch_var_output, TensorFormat tensor_format,
bool use_reserved_space) {
OP_REQUIRES(context, side_input == nullptr,
errors::Internal(
"The CPU implementation of FusedBatchNorm does not support "
"side input."));
OP_REQUIRES(context,
activation_mode == FusedBatchNormActivationMode::kIdentity,
errors::Internal("The CPU implementation of FusedBatchNorm "
"does not support activations."));
if (use_reserved_space) {
Tensor* dummy_reserve_space = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(5, {}, &dummy_reserve_space));
dummy_reserve_space->flat<U>()(0) = U();
}
if (x_input.shape().num_elements() == 0) {
functor::SetNanFunctor<CPUDevice, U> f;
f(context->eigen_device<CPUDevice>(), running_mean_output->flat<U>());
f(context->eigen_device<CPUDevice>(), running_var_output->flat<U>());
return;
}
Tensor transformed_x;
Tensor transformed_y;
if (tensor_format == FORMAT_NCHW) {
const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N');
const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H');
const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W');
const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C');
TensorShape transformed_x_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_x_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_shape, &transformed_x));
TensorShape transformed_y_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_y_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_y_shape, &transformed_y));
std::array<int32, 4> perm = {0, 2, 3, 1};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
x_input, perm, &transformed_x));
} else {
transformed_x = x_input;
transformed_y = *y_output;
}
typename TTypes<T, 4>::Tensor x(transformed_x.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec offset(offset_input.vec<U>());
typename TTypes<U>::ConstVec old_mean(running_mean_input.vec<U>());
typename TTypes<U>::ConstVec old_variance(running_variance_input.vec<U>());
typename TTypes<T, 4>::Tensor y(transformed_y.tensor<T, 4>());
typename TTypes<U>::Vec new_mean(running_mean_output->vec<U>());
typename TTypes<U>::Vec new_variance(running_var_output->vec<U>());
typename TTypes<U>::Vec saved_batch_mean(saved_batch_mean_output->vec<U>());
typename TTypes<U>::Vec saved_batch_var(saved_batch_var_output->vec<U>());
const CPUDevice& d = context->eigen_device<CPUDevice>();
const int depth = x.dimension(3);
const int size = x.size();
const int rest_size = size / depth;
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::type2index<0>> reduce_dims;
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec;
bcast_spec.set(0, rest_size);
auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>();
const int rest_size_minus_one = (rest_size > 1) ? (rest_size - 1) : 1;
U rest_size_inv = static_cast<U>(1.0f / static_cast<U>(rest_size));
U rest_size_adjust =
static_cast<U>(rest_size) / static_cast<U>(rest_size_minus_one);
Eigen::Tensor<U, 1, Eigen::RowMajor> batch_mean(depth);
Eigen::Tensor<U, 1, Eigen::RowMajor> batch_variance(depth);
batch_mean.device(d) = (x_rest_by_depth.sum(reduce_dims) * rest_size_inv);
auto x_centered = x_rest_by_depth -
batch_mean.reshape(one_by_depth).broadcast(bcast_spec);
batch_variance.device(d) =
x_centered.square().sum(reduce_dims) * rest_size_inv;
auto scaling_factor = ((batch_variance + epsilon).rsqrt() * scale)
.eval()
.reshape(one_by_depth)
.broadcast(bcast_spec);
auto x_scaled = x_centered * scaling_factor;
auto x_shifted =
(x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec))
.template cast<T>();
y.reshape(rest_by_depth).device(d) = x_shifted;
if (exponential_avg_factor == U(1.0)) {
saved_batch_var.device(d) = batch_variance;
saved_batch_mean.device(d) = batch_mean;
new_variance.device(d) = batch_variance * rest_size_adjust;
new_mean.device(d) = batch_mean;
} else {
U one_minus_factor = U(1) - exponential_avg_factor;
saved_batch_var.device(d) = batch_variance;
saved_batch_mean.device(d) = batch_mean;
new_variance.device(d) =
one_minus_factor * old_variance +
(exponential_avg_factor * rest_size_adjust) * batch_variance;
new_mean.device(d) =
one_minus_factor * old_mean + exponential_avg_factor * batch_mean;
}
if (tensor_format == FORMAT_NCHW) {
const std::array<int32, 4> perm = {0, 3, 1, 2};
const Status s = ::tensorflow::DoTranspose(
context->eigen_device<CPUDevice>(), transformed_y, perm, y_output);
if (!s.ok()) {
context->SetStatus(errors::InvalidArgument("Transpose failed: ", s));
}
}
}
};
template <typename T, typename U>
struct FusedBatchNorm<CPUDevice, T, U, false> {
void operator()(OpKernelContext* context, const Tensor& x_input,
const Tensor& scale_input, const Tensor& offset_input,
const Tensor& estimated_mean_input,
const Tensor& estimated_variance_input,
const Tensor* side_input, U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode,
Tensor* y_output, Tensor* batch_mean_output,
Tensor* batch_var_output, Tensor* saved_mean_output,
Tensor* saved_var_output, TensorFormat tensor_format,
bool use_reserved_space) {
OP_REQUIRES(context, side_input == nullptr,
errors::Internal(
"The CPU implementation of FusedBatchNorm does not support "
"side input."));
OP_REQUIRES(context,
activation_mode == FusedBatchNormActivationMode::kIdentity,
errors::Internal("The CPU implementation of FusedBatchNorm "
"does not support activations."));
if (use_reserved_space) {
Tensor* dummy_reserve_space = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(5, {}, &dummy_reserve_space));
dummy_reserve_space->flat<U>()(0) = U();
}
if (x_input.shape().num_elements() == 0) {
functor::SetNanFunctor<CPUDevice, U> f;
f(context->eigen_device<CPUDevice>(), batch_mean_output->flat<U>());
f(context->eigen_device<CPUDevice>(), batch_var_output->flat<U>());
return;
}
Tensor transformed_x;
Tensor transformed_y;
if (tensor_format == FORMAT_NCHW) {
const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N');
const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H');
const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W');
const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C');
TensorShape transformed_x_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_x_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_shape, &transformed_x));
TensorShape transformed_y_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_y_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_y_shape, &transformed_y));
std::array<int32, 4> perm = {0, 2, 3, 1};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
x_input, perm, &transformed_x));
} else {
transformed_x = x_input;
transformed_y = *y_output;
}
typename TTypes<T, 4>::Tensor x(transformed_x.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec offset(offset_input.vec<U>());
typename TTypes<U>::ConstVec estimated_mean(estimated_mean_input.vec<U>());
typename TTypes<U>::ConstVec estimated_variance(
estimated_variance_input.vec<U>());
typename TTypes<T, 4>::Tensor y(transformed_y.tensor<T, 4>());
typename TTypes<U>::Vec batch_mean(batch_mean_output->vec<U>());
typename TTypes<U>::Vec batch_variance(batch_var_output->vec<U>());
const CPUDevice& d = context->eigen_device<CPUDevice>();
const int depth = x.dimension(3);
OP_REQUIRES(
context, depth != 0,
errors::Internal("The 4th element in the input shape cannot be 0."));
const int size = x.size();
const int rest_size = size / depth;
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec;
bcast_spec.set(0, rest_size);
auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>();
auto x_centered =
x_rest_by_depth -
estimated_mean.reshape(one_by_depth).broadcast(bcast_spec);
auto scaling_factor = ((estimated_variance + epsilon).rsqrt() * scale)
.eval()
.reshape(one_by_depth)
.broadcast(bcast_spec);
auto x_scaled = x_centered * scaling_factor;
auto x_shifted =
(x_scaled + offset.reshape(one_by_depth).broadcast(bcast_spec))
.template cast<T>();
y.reshape(rest_by_depth).device(d) = x_shifted;
batch_mean.device(d) = estimated_mean;
batch_variance.device(d) = estimated_variance;
if (tensor_format == FORMAT_NCHW) {
const std::array<int32, 4> perm = {0, 3, 1, 2};
const Status s = ::tensorflow::DoTranspose(
context->eigen_device<CPUDevice>(), transformed_y, perm, y_output);
if (!s.ok()) {
context->SetStatus(errors::InvalidArgument("Transpose failed: ", s));
}
}
}
};
template <typename T, typename U>
struct FusedBatchNormGrad<CPUDevice, T, U> {
void operator()(OpKernelContext* context, const Tensor& y_backprop_input,
const Tensor& x_input, const Tensor& scale_input,
const Tensor* offset_input, const Tensor& mean_input,
const Tensor& variance_input, const Tensor* y_input,
U epsilon, FusedBatchNormActivationMode activation_mode,
Tensor* x_backprop_output, Tensor* scale_backprop_output,
Tensor* offset_backprop_output,
Tensor* side_input_backprop_output, bool use_reserved_space,
TensorFormat tensor_format) {
OP_REQUIRES(context,
y_input == nullptr &&
activation_mode == FusedBatchNormActivationMode::kIdentity,
errors::Internal(
"The CPU implementation of FusedBatchNormGrad does not "
"support activations."));
OP_REQUIRES(context, side_input_backprop_output == nullptr,
errors::Internal("The CPU implementation of FusedBatchNormGrad "
"does not support side input."));
Tensor transformed_y_backprop_input;
Tensor transformed_x_input;
Tensor transformed_x_backprop_output;
if (tensor_format == FORMAT_NCHW) {
const int64_t in_batch = GetTensorDim(x_input, tensor_format, 'N');
const int64_t in_rows = GetTensorDim(x_input, tensor_format, 'H');
const int64_t in_cols = GetTensorDim(x_input, tensor_format, 'W');
const int64_t in_depths = GetTensorDim(x_input, tensor_format, 'C');
TensorShape transformed_y_backprop_input_shape;
OP_REQUIRES_OK(context,
ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths,
&transformed_y_backprop_input_shape));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::value,
transformed_y_backprop_input_shape,
&transformed_y_backprop_input));
TensorShape transformed_x_input_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols,
in_depths, &transformed_x_input_shape));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_input_shape,
&transformed_x_input));
TensorShape transformed_x_backprop_output_shape;
OP_REQUIRES_OK(context,
ShapeFromFormatWithStatus(
FORMAT_NHWC, in_batch, in_rows, in_cols, in_depths,
&transformed_x_backprop_output_shape));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::value,
transformed_x_backprop_output_shape,
&transformed_x_backprop_output));
std::array<int32, 4> perm = {0, 2, 3, 1};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
y_backprop_input, perm,
&transformed_y_backprop_input));
OP_REQUIRES_OK(context, ::tensorflow::DoTranspose(
context->eigen_device<CPUDevice>(), x_input,
perm, &transformed_x_input));
} else {
transformed_y_backprop_input = y_backprop_input;
transformed_x_input = x_input;
transformed_x_backprop_output = *x_backprop_output;
}
typename TTypes<T, 4>::Tensor y_backprop(
transformed_y_backprop_input.tensor<T, 4>());
typename TTypes<T, 4>::Tensor x(transformed_x_input.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec mean(mean_input.vec<U>());
typename TTypes<U>::ConstVec variance(variance_input.vec<U>());
typename TTypes<T, 4>::Tensor x_backprop(
transformed_x_backprop_output.tensor<T, 4>());
typename TTypes<U>::Vec offset_backprop(offset_backprop_output->vec<U>());
const CPUDevice& d = context->eigen_device<CPUDevice>();
const int depth = x.dimension(3);
const int size = x.size();
const int rest_size = size / depth;
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> bcast_spec;
bcast_spec.set(0, rest_size);
auto x_rest_by_depth = x.reshape(rest_by_depth).template cast<U>();
U rest_size_inv = static_cast<U>(1.0f / static_cast<U>(rest_size));
using ScalarSum = Eigen::internal::scalar_sum_op<U>;
const functor::ReduceOuterDimensions<T, U, U, ScalarSum> redux_sum_t;
const functor::ReduceOuterDimensions<U, U, U, ScalarSum> redux_sum_u;
auto scratch_dtype = DataTypeToEnum<U>::value;
Tensor scratch_one_by_depth;
OP_REQUIRES_OK(context, context->allocate_temp(scratch_dtype, {depth},
&scratch_one_by_depth));
Tensor scratch_rest_by_depth;
if (std::is_same<T, U>::value) {
OP_REQUIRES(context,
scratch_rest_by_depth.CopyFrom(transformed_x_backprop_output,
{rest_size, depth}),
errors::Internal("Failed to copy a tensor"));
} else {
OP_REQUIRES_OK(context,
context->allocate_temp(scratch_dtype, {rest_size, depth},
&scratch_rest_by_depth));
}
typename TTypes<U, 2>::Tensor scratch_tensor(
scratch_rest_by_depth.tensor<U, 2>());
typename TTypes<U>::Vec scratch_vector(scratch_one_by_depth.vec<U>());
auto x_mean_rest_by_depth =
mean.reshape(one_by_depth).broadcast(bcast_spec);
auto x_centered = (x_rest_by_depth - x_mean_rest_by_depth);
auto coef0_one_by_depth =
(variance.reshape(one_by_depth) + epsilon).rsqrt();
auto coef0_rest_by_depth = coef0_one_by_depth.broadcast(bcast_spec);
auto x_scaled = x_centered * coef0_rest_by_depth;
auto y_backprop_rest_by_depth =
y_backprop.reshape(rest_by_depth).template cast<U>();
scratch_tensor.device(d) = y_backprop_rest_by_depth * x_scaled;
redux_sum_u(d, rest_by_depth, scratch_rest_by_depth, scale_backprop_output);
redux_sum_t(d, rest_by_depth, transformed_y_backprop_input,
offset_backprop_output);
auto y_backprop_sum = offset_backprop;
auto y_backprop_sum_one_by_depth = y_backprop_sum.reshape(one_by_depth);
auto y_backprop_mean_one_by_depth =
y_backprop_sum_one_by_depth * rest_size_inv;
auto y_backprop_mean_rest_by_depth =
y_backprop_mean_one_by_depth.broadcast(bcast_spec);
auto y_backprop_centered =
y_backprop_rest_by_depth - y_backprop_mean_rest_by_depth;
scratch_tensor.device(d) = y_backprop_rest_by_depth * x_centered;
redux_sum_u(d, rest_by_depth, scratch_rest_by_depth, &scratch_one_by_depth);
auto y_backprop_centered_mean =
scratch_vector.reshape(one_by_depth) / static_cast<U>(rest_size);
auto coef1 = (scale.reshape(one_by_depth) * coef0_one_by_depth)
.broadcast(bcast_spec);
auto coef2 = (coef0_one_by_depth.square() * y_backprop_centered_mean)
.broadcast(bcast_spec);
x_backprop.reshape(rest_by_depth).device(d) =
(coef1 * (y_backprop_centered - x_centered * coef2)).template cast<T>();
if (tensor_format == FORMAT_NCHW) {
std::array<int32, 4> perm = {0, 3, 1, 2};
OP_REQUIRES_OK(
context, ::tensorflow::DoTranspose(context->eigen_device<CPUDevice>(),
transformed_x_backprop_output,
perm, x_backprop_output));
}
}
};
template <typename T, typename U>
struct FusedBatchNormFreezeGrad<CPUDevice, T, U> {
void operator()(OpKernelContext* context, const Tensor& y_backprop_input,
const Tensor& x_input, const Tensor& scale_input,
const Tensor& pop_mean_input,
const Tensor& pop_variance_input, U epsilon,
Tensor* x_backprop_output, Tensor* scale_backprop_output,
Tensor* offset_backprop_output) {
typename TTypes<T, 4>::ConstTensor y_backprop(
y_backprop_input.tensor<T, 4>());
typename TTypes<T, 4>::ConstTensor input(x_input.tensor<T, 4>());
typename TTypes<U>::ConstVec scale(scale_input.vec<U>());
typename TTypes<U>::ConstVec pop_mean(pop_mean_input.vec<U>());
typename TTypes<U>::ConstVec pop_var(pop_variance_input.vec<U>());
typename TTypes<T, 4>::Tensor x_backprop(x_backprop_output->tensor<T, 4>());
typename TTypes<U>::Vec scale_backprop(scale_backprop_output->vec<U>());
const int depth = pop_mean.dimension(0);
const int rest_size = input.size() / depth;
const CPUDevice& d = context->eigen_device<CPUDevice>();
Tensor scratch1_vec, scratch2_vec;
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value,
{depth}, &scratch1_vec));
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value,
{depth}, &scratch2_vec));
Tensor scratch3_tensor;
if (std::is_same<T, U>::value) {
OP_REQUIRES(
context,
scratch3_tensor.CopyFrom(*x_backprop_output, {rest_size, depth}),
errors::Internal("Failed to copy a tensor"));
} else {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<U>::value,
{rest_size, depth},
&scratch3_tensor));
}
typename TTypes<U>::Vec scratch1(scratch1_vec.vec<U>());
typename TTypes<U>::Vec scratch2(scratch2_vec.vec<U>());
typename TTypes<U, 2>::Tensor scratch3(scratch3_tensor.tensor<U, 2>());
Eigen::DSizes<Eigen::Index, 2> rest_by_depth(rest_size, depth);
Eigen::IndexList<Eigen::type2index<1>, Eigen::Index> one_by_depth;
one_by_depth.set(1, depth);
Eigen::IndexList<Eigen::Index, Eigen::type2index<1>> rest_by_one;
rest_by_one.set(0, rest_size);
using ScalarSum = Eigen::internal::scalar_sum_op<U>;
const functor::ReduceOuterDimensions<T, U, U, ScalarSum> redux_sum_t;
const functor::ReduceOuterDimensions<U, U, U, ScalarSum> redux_sum_u;
auto y_backprop_rest_by_depth =
y_backprop.reshape(rest_by_depth).template cast<U>();
auto input_rest_by_depth = input.reshape(rest_by_depth).template cast<U>();
redux_sum_t(d, rest_by_depth, y_backprop_input, offset_backprop_output);
scratch1 = (pop_var + pop_var.constant(epsilon)).rsqrt();
scratch3.device(d) =
y_backprop_rest_by_depth *
(input_rest_by_depth -
pop_mean.reshape(one_by_depth).broadcast(rest_by_one));
redux_sum_u(d, rest_by_depth, scratch3_tensor, &scratch2_vec);
x_backprop.reshape(rest_by_depth).device(d) =
(y_backprop_rest_by_depth *
((scratch1.reshape(one_by_depth) * scale.reshape(one_by_depth))
.broadcast(rest_by_one)))
.template cast<T>();
scale_backprop = scratch2 * scratch1;
}
};
#if !GOOGLE_CUDA
namespace {
bool BatchnormSpatialPersistentEnabled() { return false; }
}
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace {
se::dnn::ActivationMode AsDnnActivationMode(
const FusedBatchNormActivationMode activation_mode) {
switch (activation_mode) {
case FusedBatchNormActivationMode::kIdentity:
return se::dnn::ActivationMode::kNone;
case FusedBatchNormActivationMode::kRelu:
return se::dnn::ActivationMode::kRelu;
}
}
#if GOOGLE_CUDA
bool BatchnormSpatialPersistentEnabled() {
#if CUDNN_VERSION >= 7402
static bool is_enabled = [] {
bool is_enabled = false;
TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar(
"TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT",
false, &is_enabled));
return is_enabled;
}();
return is_enabled;
#else
return false;
#endif
}
#endif
}
template <typename U, typename T>
DeviceMemory<U> CastDeviceMemory(Tensor* tensor) {
return DeviceMemory<U>::MakeFromByteSize(
tensor->template flat<T>().data(),
tensor->template flat<T>().size() * sizeof(T));
}
template <typename T>
class CudnnBatchNormAllocatorInTemp : public ScratchAllocator {
public:
~CudnnBatchNormAllocatorInTemp() override = default;
explicit CudnnBatchNormAllocatorInTemp(OpKernelContext* context)
: context_(context) {}
int64_t GetMemoryLimitInBytes() override {
return std::numeric_limits<int64_t>::max();
}
StatusOr<DeviceMemory<uint8>> AllocateBytes(int64_t byte_size) override {
Tensor temporary_memory;
const DataType tf_data_type = DataTypeToEnum<T>::v();
int64_t allocate_count =
Eigen::divup(byte_size, static_cast<int64_t>(sizeof(T)));
Status allocation_status(context_->allocate_temp(
tf_data_type, TensorShape({allocate_count}), &temporary_memory));
if (!allocation_status.ok()) {
return allocation_status;
}
allocated_tensors_.push_back(temporary_memory);
total_byte_size_ += byte_size;
return DeviceMemory<uint8>::MakeFromByteSize(
temporary_memory.template flat<T>().data(),
temporary_memory.template flat<T>().size() * sizeof(T));
}
int64_t TotalByteSize() const { return total_byte_size_; }
Tensor get_allocated_tensor(int index) const {
return allocated_tensors_[index];
}
private:
int64_t total_byte_size_ = 0;
OpKernelContext* context_;
std::vector<Tensor> allocated_tensors_;
};
template <typename T>
class CudnnBatchNormAllocatorInOutput : public ScratchAllocator {
public:
~CudnnBatchNormAllocatorInOutput() override {
if (!output_allocated) {
Tensor* dummy_reserve_space = nullptr;
OP_REQUIRES_OK(context_, context_->allocate_output(output_index_, {},
&dummy_reserve_space));
}
}
CudnnBatchNormAllocatorInOutput(OpKernelContext* context, int output_index)
: context_(context), output_index_(output_index) {}
int64_t GetMemoryLimitInBytes() override {
return std::numeric_limits<int64_t>::max();
}
StatusOr<DeviceMemory<uint8>> AllocateBytes(int64_t byte_size) override {
output_allocated = true;
DCHECK(total_byte_size_ == 0)
<< "Reserve space allocator can only be called once";
int64_t allocate_count =
Eigen::divup(byte_size, static_cast<int64_t>(sizeof(T)));
Tensor* temporary_memory = nullptr;
Status allocation_status(context_->allocate_output(
output_index_, TensorShape({allocate_count}), &temporary_memory));
if (!allocation_status.ok()) {
return allocation_status;
}
total_byte_size_ += byte_size;
auto memory_uint8 = DeviceMemory<uint8>::MakeFromByteSize(
temporary_memory->template flat<T>().data(),
temporary_memory->template flat<T>().size() * sizeof(T));
return StatusOr<DeviceMemory<uint8>>(memory_uint8);
}
int64_t TotalByteSize() { return total_byte_size_; }
private:
int64_t total_byte_size_ = 0;
OpKernelContext* context_;
int output_index_;
bool output_allocated = false;
};
template <typename T, typename U, bool is_training>
struct FusedBatchNormImplGPU {
void operator()(OpKernelContext* context, const Tensor& x,
const Tensor& scale, const Tensor& offset,
const Tensor& estimated_mean,
const Tensor& estimated_variance, const Tensor* side_input,
U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode, Tensor* y,
Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean,
Tensor* saved_inv_var, TensorFormat tensor_format,
bool use_reserved_space) {
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available"));
const int64_t batch_size = GetTensorDim(x, tensor_format, 'N');
const int64_t channels = GetTensorDim(x, tensor_format, 'C');
const int64_t height = GetTensorDim(x, tensor_format, 'H');
const int64_t width = GetTensorDim(x, tensor_format, 'W');
#if GOOGLE_CUDA
const bool fast_nhwc_batch_norm =
!is_training || (BatchnormSpatialPersistentEnabled() &&
(DataTypeToEnum<T>::value == DT_HALF ||
DataTypeToEnum<T>::value == DT_BFLOAT16) &&
use_reserved_space);
#else
const bool fast_nhwc_batch_norm = false;
#endif
TensorFormat compute_format =
fast_nhwc_batch_norm && tensor_format == FORMAT_NHWC ? FORMAT_NHWC
: FORMAT_NCHW;
VLOG(2) << "FusedBatchNorm:"
<< " batch_size: " << batch_size << " channels: " << channels
<< " height: " << height << " width:" << width
<< " x shape: " << x.shape().DebugString()
<< " scale shape: " << scale.shape().DebugString()
<< " offset shape: " << offset.shape().DebugString()
<< " activation mode: " << ToString(activation_mode)
<< " tensor format: " << ToString(tensor_format)
<< " compute format: " << ToString(compute_format);
auto maybe_make_dummy_output = [context, use_reserved_space]() -> Status {
if (use_reserved_space) {
Tensor* dummy_reserve_space = nullptr;
return context->allocate_output(5, {}, &dummy_reserve_space);
}
return OkStatus();
};
if (x.shape().num_elements() == 0) {
OP_REQUIRES_OK(context, maybe_make_dummy_output());
functor::SetNanFunctor<GPUDevice, U> f;
f(context->eigen_device<GPUDevice>(), batch_mean->flat<U>());
f(context->eigen_device<GPUDevice>(), batch_var->flat<U>());
return;
}
const bool has_side_input = side_input != nullptr;
const bool has_activation =
activation_mode != FusedBatchNormActivationMode::kIdentity;
if (!is_training && (has_side_input || has_activation)) {
OP_REQUIRES_OK(context, maybe_make_dummy_output());
FusedBatchNormInferenceFunctor<GPUDevice, T, U> inference_functor;
if (has_side_input) {
inference_functor(context, tensor_format, x.tensor<T, 4>(),
scale.vec<U>(), offset.vec<U>(),
estimated_mean.vec<U>(), estimated_variance.vec<U>(),
side_input->tensor<T, 4>(), epsilon, activation_mode,
y->tensor<T, 4>());
} else {
typename TTypes<T, 4>::ConstTensor empty_tensor(nullptr, 0, 0, 0, 0);
inference_functor(context, tensor_format, x.tensor<T, 4>(),
scale.vec<U>(), offset.vec<U>(),
estimated_mean.vec<U>(), estimated_variance.vec<U>(),
empty_tensor, epsilon, activation_mode,
y->tensor<T, 4>());
}
return;
}
Tensor x_maybe_transformed = x;
Tensor x_transformed;
Tensor y_transformed;
se::DeviceMemory<T> y_ptr;
if (tensor_format == compute_format) {
y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(*y);
} else if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
TensorShape x_transformed_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
compute_format, batch_size, height, width,
channels, &x_transformed_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
x_transformed_shape, &x_transformed));
functor::NHWCToNCHW<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(x_maybe_transformed).tensor<T, 4>(),
x_transformed.tensor<T, 4>());
x_maybe_transformed = x_transformed;
TensorShape y_transformed_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
compute_format, batch_size, height, width,
channels, &y_transformed_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
y_transformed_shape, &y_transformed));
y_ptr = StreamExecutorUtil::AsDeviceMemory<T>(y_transformed);
} else {
context->SetStatus(errors::Internal(
"Unsupported tensor format: ", ToString(tensor_format),
" and compute format: ", ToString(compute_format)));
return;
}
const se::dnn::DataLayout data_layout =
compute_format == FORMAT_NHWC ? se::dnn::DataLayout::kBatchYXDepth
: se::dnn::DataLayout::kBatchDepthYX;
se::dnn::BatchDescriptor x_desc;
x_desc.set_count(batch_size)
.set_feature_map_count(channels)
.set_height(height)
.set_width(width)
.set_layout(data_layout);
se::dnn::BatchDescriptor scale_offset_desc;
scale_offset_desc.set_count(1)
.set_feature_map_count(channels)
.set_height(1)
.set_width(1)
.set_layout(se::dnn::DataLayout::kBatchDepthYX);
auto x_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_maybe_transformed);
auto scale_ptr = StreamExecutorUtil::AsDeviceMemory<U>(scale);
auto offset_ptr = StreamExecutorUtil::AsDeviceMemory<U>(offset);
auto estimated_mean_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(estimated_mean);
auto estimated_variance_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(estimated_variance);
auto side_input_ptr =
side_input != nullptr
? StreamExecutorUtil::AsDeviceMemory<T>(*side_input)
: se::DeviceMemory<T>();
auto batch_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_mean);
auto batch_var_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*batch_var);
auto saved_mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(*saved_mean);
auto saved_inv_var_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(*saved_inv_var);
std::unique_ptr<functor::CudnnBatchNormAllocatorInOutput<U>>
reserve_space_allocator;
std::unique_ptr<functor::CudnnBatchNormAllocatorInTemp<uint8>>
workspace_allocator;
if (use_reserved_space) {
reserve_space_allocator.reset(
new functor::CudnnBatchNormAllocatorInOutput<U>(context, 5));
workspace_allocator.reset(
new functor::CudnnBatchNormAllocatorInTemp<uint8>(context));
}
if (!batch_mean->SharesBufferWith(estimated_mean) &&
exponential_avg_factor != 1.0f) {
OP_REQUIRES_OK(
context, stream->MemcpyD2D(&batch_mean_ptr, estimated_mean_ptr,
estimated_mean.NumElements() * sizeof(U)));
}
if (!batch_var->SharesBufferWith(estimated_variance) &&
exponential_avg_factor != 1.0f) {
OP_REQUIRES_OK(
context,
stream->MemcpyD2D(&batch_var_ptr, estimated_variance_ptr,
estimated_variance.NumElements() * sizeof(U)));
}
auto dnn = stream->parent()->AsDnn();
if (dnn == nullptr) {
context->SetStatus(absl::InternalError("No DNN support for stream"));
return;
}
bool cudnn_launch_status = dnn->DoBatchNormalizationForward(
stream, x_ptr, scale_ptr, offset_ptr, estimated_mean_ptr,
estimated_variance_ptr, side_input_ptr, x_desc, scale_offset_desc,
static_cast<double>(epsilon),
static_cast<double>(exponential_avg_factor),
AsDnnActivationMode(activation_mode), &y_ptr, &batch_mean_ptr,
&batch_var_ptr, &saved_mean_ptr, &saved_inv_var_ptr, is_training,
reserve_space_allocator.get(), workspace_allocator.get());
if (!cudnn_launch_status) {
context->SetStatus(
errors::Internal("cuDNN launch failure : input shape (",
x.shape().DebugString(), ")"));
return;
}
if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
functor::NCHWToNHWC<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(y_transformed).tensor<T, 4>(),
y->tensor<T, 4>());
}
}
};
template <typename T, typename U, bool is_training>
struct FusedBatchNorm<GPUDevice, T, U, is_training> {
void operator()(OpKernelContext* context, const Tensor& x,
const Tensor& scale, const Tensor& offset,
const Tensor& estimated_mean,
const Tensor& estimated_variance, const Tensor* side_input,
U epsilon, U exponential_avg_factor,
FusedBatchNormActivationMode activation_mode, Tensor* y,
Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean,
Tensor* saved_inv_var, TensorFormat tensor_format,
bool use_reserved_space) {
FusedBatchNormImplGPU<T, U, is_training>()(
context, x, scale, offset, estimated_mean, estimated_variance,
side_input, epsilon, exponential_avg_factor, activation_mode, y,
batch_mean, batch_var, saved_mean, saved_inv_var, tensor_format,
use_reserved_space);
}
};
template <bool is_training>
struct FusedBatchNorm<GPUDevice, Eigen::bfloat16, float, is_training> {
void operator()(OpKernelContext* context, const Tensor& x,
const Tensor& scale, const Tensor& offset,
const Tensor& estimated_mean,
const Tensor& estimated_variance, const Tensor* side_input,
float epsilon, float exponential_avg_factor,
FusedBatchNormActivationMode activation_mode, Tensor* y,
Tensor* batch_mean, Tensor* batch_var, Tensor* saved_mean,
Tensor* saved_inv_var, TensorFormat tensor_format,
bool use_reserved_space) {
auto* stream = context->op_device_context()->stream();
const bool cast_to_float = !IsBF16SupportedInOps(stream);
if (cast_to_float) {
Tensor casted_x = x;
Tensor casted_side_input;
Tensor casted_y = *y;
const GPUDevice& device = context->eigen_device<GPUDevice>();
functor::CastFunctor<GPUDevice, float, Eigen::bfloat16> cast;
OP_REQUIRES_OK(context,
context->allocate_temp(DT_FLOAT, x.shape(), &casted_x));
cast(device, casted_x.template flat<float>(),
x.template flat<Eigen::bfloat16>());
if (side_input != nullptr) {
OP_REQUIRES_OK(context,
context->allocate_temp(DT_FLOAT, side_input->shape(),
&casted_side_input));
cast(device, casted_side_input.template flat<float>(),
side_input->template flat<Eigen::bfloat16>());
}
OP_REQUIRES_OK(context,
context->allocate_temp(DT_FLOAT, y->shape(), &casted_y));
FusedBatchNormImplGPU<float, float, is_training>()(
context, casted_x, scale, offset, estimated_mean, estimated_variance,
(side_input != nullptr) ? &casted_side_input : nullptr, epsilon,
exponential_avg_factor, activation_mode, &casted_y, batch_mean,
batch_var, saved_mean, saved_inv_var, tensor_format,
use_reserved_space);
functor::CastFunctor<GPUDevice, Eigen::bfloat16, float> cast_back;
const Tensor& casted_y_const = casted_y;
cast_back(device, y->template flat<Eigen::bfloat16>(),
casted_y_const.template flat<float>());
return;
}
FusedBatchNormImplGPU<Eigen::bfloat16, float, is_training>()(
context, x, scale, offset, estimated_mean, estimated_variance,
side_input, epsilon, exponential_avg_factor, activation_mode, y,
batch_mean, batch_var, saved_mean, saved_inv_var, tensor_format,
use_reserved_space);
}
};
template <typename T, typename U>
struct FusedBatchNormGradImplGPU {
void operator()(OpKernelContext* context, const Tensor& y_backprop,
const Tensor& x, const Tensor& scale, const Tensor* offset,
const Tensor& mean, const Tensor& inv_variance,
const Tensor* y, U epsilon,
FusedBatchNormActivationMode activation_mode,
Tensor* x_backprop, Tensor* scale_backprop,
Tensor* offset_backprop, Tensor* side_input_backprop,
bool use_reserved_space, TensorFormat tensor_format) {
auto* stream = context->op_device_context()->stream();
OP_REQUIRES(context, stream, errors::Internal("No GPU stream available"));
const int64_t batch_size = GetTensorDim(x, tensor_format, 'N');
const int64_t channels = GetTensorDim(x, tensor_format, 'C');
const int64_t height = GetTensorDim(x, tensor_format, 'H');
const int64_t width = GetTensorDim(x, tensor_format, 'W');
#if GOOGLE_CUDA
const bool fast_nhwc_batch_norm =
BatchnormSpatialPersistentEnabled() &&
(DataTypeToEnum<T>::value == DT_HALF ||
DataTypeToEnum<T>::value == DT_BFLOAT16) &&
use_reserved_space;
#else
const bool fast_nhwc_batch_norm = false;
#endif
TensorFormat compute_format =
fast_nhwc_batch_norm && tensor_format == FORMAT_NHWC ? FORMAT_NHWC
: FORMAT_NCHW;
VLOG(2) << "FusedBatchNormGrad:"
<< " batch_size: " << batch_size << " channels: " << channels
<< " height: " << height << " width: " << width
<< " y_backprop shape: " << y_backprop.shape().DebugString()
<< " x shape: " << x.shape().DebugString()
<< " scale shape: " << scale.shape().DebugString()
<< " activation mode: " << ToString(activation_mode)
<< " tensor format: " << ToString(tensor_format)
<< " compute format: " << ToString(compute_format);
Tensor y_backprop_maybe_transformed = y_backprop;
Tensor x_maybe_transformed = x;
Tensor y_backprop_transformed;
Tensor x_transformed;
Tensor x_backprop_transformed;
se::DeviceMemory<T> x_backprop_ptr;
if (tensor_format == compute_format) {
x_backprop_ptr = StreamExecutorUtil::AsDeviceMemory<T>(*x_backprop);
} else if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
TensorShape y_backprop_transformed_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NCHW, batch_size, height, width,
channels, &y_backprop_transformed_shape));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::value,
y_backprop_transformed_shape,
&y_backprop_transformed));
functor::NHWCToNCHW<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(y_backprop_maybe_transformed)
.tensor<T, 4>(),
y_backprop_transformed.tensor<T, 4>());
y_backprop_maybe_transformed = y_backprop_transformed;
TensorShape x_transformed_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(FORMAT_NCHW, batch_size,
height, width, channels,
&x_transformed_shape));
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
x_transformed_shape, &x_transformed));
functor::NHWCToNCHW<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(x_maybe_transformed).tensor<T, 4>(),
x_transformed.tensor<T, 4>());
x_maybe_transformed = x_transformed;
TensorShape x_backprop_transformed_shape;
OP_REQUIRES_OK(context, ShapeFromFormatWithStatus(
FORMAT_NCHW, batch_size, height, width,
channels, &x_backprop_transformed_shape));
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<T>::value,
x_backprop_transformed_shape,
&x_backprop_transformed));
x_backprop_ptr =
StreamExecutorUtil::AsDeviceMemory<T>(x_backprop_transformed);
} else {
context->SetStatus(errors::Internal(
"Unsupported tensor format: ", ToString(tensor_format),
" and compute format: ", ToString(compute_format)));
return;
}
const se::dnn::DataLayout data_layout =
compute_format == FORMAT_NHWC ? se::dnn::DataLayout::kBatchYXDepth
: se::dnn::DataLayout::kBatchDepthYX;
se::dnn::BatchDescriptor x_desc;
x_desc.set_count(batch_size)
.set_feature_map_count(channels)
.set_height(height)
.set_width(width)
.set_layout(data_layout);
se::dnn::BatchDescriptor scale_offset_desc;
scale_offset_desc.set_count(1)
.set_feature_map_count(channels)
.set_height(1)
.set_width(1)
.set_layout(se::dnn::DataLayout::kBatchDepthYX);
auto y_backprop_ptr =
StreamExecutorUtil::AsDeviceMemory<T>(y_backprop_maybe_transformed);
auto x_ptr = StreamExecutorUtil::AsDeviceMemory<T>(x_maybe_transformed);
auto scale_ptr = StreamExecutorUtil::AsDeviceMemory<U>(scale);
auto offset_ptr = offset != nullptr
? StreamExecutorUtil::AsDeviceMemory<U>(*offset)
: se::DeviceMemory<U>();
auto mean_ptr = StreamExecutorUtil::AsDeviceMemory<U>(mean);
auto inv_variance_ptr = StreamExecutorUtil::AsDeviceMemory<U>(inv_variance);
auto y_ptr = y != nullptr ? StreamExecutorUtil::AsDeviceMemory<T>(*y)
: se::DeviceMemory<T>();
auto scale_backprop_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(*scale_backprop);
auto offset_backprop_ptr =
StreamExecutorUtil::AsDeviceMemory<U>(*offset_backprop);
auto side_input_backprop_ptr =
side_input_backprop != nullptr
? StreamExecutorUtil::AsDeviceMemory<T>(*side_input_backprop)
: se::DeviceMemory<T>();
std::unique_ptr<functor::CudnnBatchNormAllocatorInTemp<uint8>>
workspace_allocator;
DeviceMemory<uint8>* reserve_space_data_ptr = nullptr;
DeviceMemory<uint8> reserve_space_data;
#if CUDNN_VERSION >= 7402
if (use_reserved_space) {
const Tensor& reserve_space = context->input(5);
workspace_allocator.reset(
new functor::CudnnBatchNormAllocatorInTemp<uint8>(context));
if (reserve_space.dims() != 0) {
reserve_space_data = functor::CastDeviceMemory<uint8, U>(
const_cast<Tensor*>(&reserve_space));
reserve_space_data_ptr = &reserve_space_data;
}
}
#endif
auto dnn = stream->parent()->AsDnn();
if (dnn == nullptr) {
context->SetStatus(absl::InternalError("No DNN support for stream"));
return;
}
bool cudnn_launch_status = dnn->DoBatchNormalizationBackward(
stream, y_backprop_ptr, x_ptr, scale_ptr, offset_ptr, mean_ptr,
inv_variance_ptr, y_ptr, x_desc, scale_offset_desc,
static_cast<double>(epsilon), AsDnnActivationMode(activation_mode),
&x_backprop_ptr, &scale_backprop_ptr, &offset_backprop_ptr,
&side_input_backprop_ptr, reserve_space_data_ptr,
workspace_allocator.get());
if (!cudnn_launch_status) {
context->SetStatus(
errors::Internal("cuDNN launch failure : input shape (",
x.shape().DebugString(), ")"));
}
if (tensor_format == FORMAT_NHWC && compute_format == FORMAT_NCHW) {
functor::NCHWToNHWC<GPUDevice, T, 4>()(
context->eigen_device<GPUDevice>(),
const_cast<const Tensor&>(x_backprop_transformed).tensor<T, 4>(),
x_backprop->tensor<T, 4>());
}
}
};
template <typename T, typename U>
struct FusedBatchNormGrad<GPUDevice, T, U> {
void operator()(OpKernelContext* context, const Tensor& y_backprop,
const Tensor& x, const Tensor& scale, const Tensor* offset,
const Tensor& mean, const Tensor& inv_variance,
const Tensor* y, U epsilon,
FusedBatchNormActivationMode activation_mode,
Tensor* x_backprop, Tensor* scale_backprop,
Tensor* offset_backprop, Tensor* side_input_backprop,
bool use_reserved_space, TensorFormat tensor_format) {
FusedBatchNormGradImplGPU<T, U>()(
context, y_backprop, x, scale, offset, mean, inv_variance, y, epsilon,
activation_mode, x_backprop, scale_backprop, offset_backprop,
side_input_backprop, use_reserved_space, tensor_format);
}
};
template <>
struct FusedBatchNormGrad<GPUDevice, Eigen::bfloat16, float> {
void operator()(OpKernelContext* context, const Tensor& y_backprop,
const Tensor& x, const Tensor& scale, const Tensor* offset,
const Tensor& mean, const Tensor& inv_variance,
const Tensor* y, float epsilon,
FusedBatchNormActivationMode activation_mode,
Tensor* x_backprop, Tensor* scale_backprop,
Tensor* offset_backprop, Tensor* side_input_backprop,
bool use_reserved_space, TensorFormat tensor_format) {
auto* stream = context->op_device_context()->stream();
const bool cast_to_float = !IsBF16SupportedInOps(stream);
if (cast_to_float) {
Tensor casted_y_backprop = y_backprop;
Tensor casted_x = x;
Tensor casted_y;
Tensor casted_x_backprop = *x_backprop;
Tensor casted_side_input_backprop;
const GPUDevice& device = context->eigen_device<GPUDevice>();
functor::CastFunctor<GPUDevice, float, Eigen::bfloat16> cast;
OP_REQUIRES_OK(context,
context->allocate_temp(DT_FLOAT, y_backprop.shape(),
&casted_y_backprop));
cast(device, casted_y_backprop.template flat<float>(),
y_backprop.template flat<Eigen::bfloat16>());
OP_REQUIRES_OK(context,
context->allocate_temp(DT_FLOAT, x.shape(), &casted_x));
cast(device, casted_x.template flat<float>(),
x.template flat<Eigen::bfloat16>());
if (y != nullptr) {
OP_REQUIRES_OK(context,
context->allocate_temp(DT_FLOAT, y->shape(), &casted_y));
cast(device, casted_y.template flat<float>(),
y->template flat<Eigen::bfloat16>());
}
OP_REQUIRES_OK(context,
context->allocate_temp(DT_FLOAT, x_backprop->shape(),
&casted_x_backprop));
if (side_input_backprop != nullptr) {
OP_REQUIRES_OK(context, context->allocate_temp(
DT_FLOAT, side_input_backprop->shape(),
&casted_side_input_backprop));
}
FusedBatchNormGradImplGPU<float, float>()(
context, casted_y_backprop, casted_x, scale, offset, mean,
inv_variance, (y != nullptr) ? &casted_y : nullptr, epsilon,
activation_mode, &casted_x_backprop, scale_backprop, offset_backprop,
(side_input_backprop != nullptr) ? &casted_side_input_backprop
: nullptr,
use_reserved_space, tensor_format);
functor::CastFunctor<GPUDevice, Eigen::bfloat16, float> cast_back;
const Tensor& casted_x_backprop_const = casted_x_backprop;
cast_back(device, x_backprop->template flat<Eigen::bfloat16>(),
casted_x_backprop_const.template flat<float>());
if (side_input_backprop != nullptr) {
const Tensor& casted_side_input_backprop_const =
casted_side_input_backprop;
cast_back(device, side_input_backprop->template flat<Eigen::bfloat16>(),
casted_side_input_backprop_const.template flat<float>());
}
return;
}
FusedBatchNormGradImplGPU<Eigen::bfloat16, float>()(
context, y_backprop, x, scale, offset, mean, inv_variance, y, epsilon,
activation_mode, x_backprop, scale_backprop, offset_backprop,
side_input_backprop, use_reserved_space, tensor_format);
}
};
#define DECLARE_GPU_SPEC(T, U) \
template <> \
void FusedBatchNormFreezeGrad<GPUDevice, T, U>::operator()( \
OpKernelContext* context, const Tensor& y_backprop_input, \
const Tensor& x_input, const Tensor& scale_input, \
const Tensor& mean_input, const Tensor& variance_input, U epsilon, \
Tensor* x_backprop_output, Tensor* scale_backprop_output, \
Tensor* offset_backprop_output); \
extern template struct FusedBatchNormFreezeGrad<GPUDevice, T, U>; \
template <> \
void FusedBatchNormInferenceFunctor<GPUDevice, T, U>::operator()( \
OpKernelContext* context, TensorFormat tensor_format, \
typename TTypes<T, 4>::ConstTensor in, \
typename TTypes<U>::ConstVec scale, typename TTypes<U>::ConstVec offset, \
typename TTypes<U>::ConstVec estimated_mean, \
typename TTypes<U>::ConstVec estimated_variance, \
typename TTypes<T, 4>::ConstTensor side_input, U epsilon, \
FusedBatchNormActivationMode activation_mode, \
typename TTypes<T, 4>::Tensor out); \
extern template struct FusedBatchNormInferenceFunctor<GPUDevice, T, U>;
DECLARE_GPU_SPEC(float, float);
DECLARE_GPU_SPEC(Eigen::half, float);
DECLARE_GPU_SPEC(Eigen::bfloat16, float);
#undef DECLARE_GPU_SPEC
#endif
}
template <typename Device, typename T, typename U>
class FusedBatchNormOpBase : public OpKernel {
using FbnActivationMode = functor::FusedBatchNormActivationMode;
protected:
explicit FusedBatchNormOpBase(OpKernelConstruction* context,
bool is_batch_norm_ex = false)
: OpKernel(context) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
epsilon_ = U(epsilon);
float exponential_avg_factor;
OP_REQUIRES_OK(context, context->GetAttr("exponential_avg_factor",
&exponential_avg_factor));
exponential_avg_factor_ = U(exponential_avg_factor);
string tensor_format;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format));
OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_),
errors::InvalidArgument("Invalid data format"));
OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_));
if (!is_batch_norm_ex) {
has_side_input_ = false;
activation_mode_ = FbnActivationMode::kIdentity;
} else {
OP_REQUIRES_OK(context, ParseActivationMode(context, &activation_mode_));
int num_side_inputs;
OP_REQUIRES_OK(context,
context->GetAttr("num_side_inputs", &num_side_inputs));
OP_REQUIRES(context, num_side_inputs >= 0 && num_side_inputs <= 1,
errors::InvalidArgument(
"FusedBatchNorm accepts at most one side input."));
has_side_input_ = (num_side_inputs == 1);
if (has_side_input_ && is_training_) {
OP_REQUIRES(
context, activation_mode_ != FbnActivationMode::kIdentity,
errors::InvalidArgument("Identity activation is not supported with "
"non-empty side input"));
}
}
if (activation_mode_ != FbnActivationMode::kIdentity && is_training_) {
OP_REQUIRES(context, DataTypeToEnum<T>::value == DT_HALF,
errors::InvalidArgument("FusedBatchNorm with activation "
"supports only DT_HALF data type."));
OP_REQUIRES(context, tensor_format_ == FORMAT_NHWC,
errors::InvalidArgument("FusedBatchNorm with activation "
"supports only NHWC tensor format."));
OP_REQUIRES(context, functor::BatchnormSpatialPersistentEnabled(),
errors::InvalidArgument(
"FusedBatchNorm with activation must run with cuDNN "
"spatial persistence mode enabled."));
}
}
virtual void ComputeWithReservedSpace(OpKernelContext* context,
bool use_reserved_space) {
Tensor x = context->input(0);
const Tensor& scale = context->input(1);
const Tensor& offset = context->input(2);
const Tensor& estimated_mean = context->input(3);
const Tensor& estimated_variance = context->input(4);
const Tensor* side_input = has_side_input_ ? &context->input(5) : nullptr;
OP_REQUIRES(context, x.dims() == 4 || x.dims() == 5,
errors::InvalidArgument("input must be 4 or 5-dimensional",
x.shape().DebugString()));
OP_REQUIRES(context, scale.dims() == 1,
errors::InvalidArgument("scale must be 1-dimensional",
scale.shape().DebugString()));
OP_REQUIRES(context, offset.dims() == 1,
errors::InvalidArgument("offset must be 1-dimensional",
offset.shape().DebugString()));
OP_REQUIRES(context, estimated_mean.dims() == 1,
errors::InvalidArgument("estimated_mean must be 1-dimensional",
estimated_mean.shape().DebugString()));
OP_REQUIRES(
context, estimated_variance.dims() == 1,
errors::InvalidArgument("estimated_variance must be 1-dimensional",
estimated_variance.shape().DebugString()));
bool use_reshape = (x.dims() == 5);
auto x_shape = x.shape();
TensorShape dest_shape;
if (use_reshape) {
const int64_t in_batch = GetTensorDim(x, tensor_format_, 'N');
int64_t in_planes = GetTensorDim(x, tensor_format_, '0');
int64_t in_rows = GetTensorDim(x, tensor_format_, '1');
int64_t in_cols = GetTensorDim(x, tensor_format_, '2');
const int64_t in_depth = GetTensorDim(x, tensor_format_, 'C');
OP_REQUIRES_OK(context,
ShapeFromFormatWithStatus(tensor_format_, in_batch,
{{in_planes, in_rows * in_cols}},
in_depth, &dest_shape));
OP_REQUIRES(context, x.CopyFrom(x, dest_shape),
errors::InvalidArgument("Error during tensor copy."));
}
const auto num_channels = GetTensorDim(x, tensor_format_, 'C');
OP_REQUIRES(
context, scale.NumElements() == num_channels,
errors::InvalidArgument("scale must have the same number of elements "
"as the channels of x, got ",
scale.NumElements(), " and ", num_channels));
OP_REQUIRES(
context, offset.NumElements() == num_channels,
errors::InvalidArgument("offset must have the same number of elements "
"as the channels of x, got ",
offset.NumElements(), " and ", num_channels));
if (!is_training_ || exponential_avg_factor_ != 1.) {
std::string prefix_msg = is_training_ ? "When exponential_avg_factor != 1"
: "When is_training=false";
OP_REQUIRES(context, estimated_mean.NumElements() == num_channels,
errors::InvalidArgument(
prefix_msg,
", mean must have the same number "
"of elements as the channels of x, got ",
estimated_mean.NumElements(), " and ", num_channels));
OP_REQUIRES(context, estimated_variance.NumElements() == num_channels,
errors::InvalidArgument(
prefix_msg,
", variance must have the same "
"number of elements as the channels of x, got ",
estimated_variance.NumElements(), " and ", num_channels));
}
if (has_side_input_) {
OP_REQUIRES(context, side_input->shape() == x.shape(),
errors::InvalidArgument(
"side_input shape must be equal to input shape: ",
side_input->shape().DebugString(),
" != ", x.shape().DebugString()));
}
if (activation_mode_ != FbnActivationMode::kIdentity) {
OP_REQUIRES(
context, !is_training_ || num_channels % 4 == 0,
errors::InvalidArgument("FusedBatchNorm with activation requires "
"channel dimension to be a multiple of 4."));
}
Tensor* y = nullptr;
auto alloc_shape = use_reshape ? dest_shape : x_shape;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 0, alloc_shape, &y));
Tensor* batch_mean = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{3}, 1, scale.shape(), &batch_mean));
Tensor* batch_var = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{4}, 2, scale.shape(), &batch_var));
Tensor* saved_mean = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(3, scale.shape(), &saved_mean));
Tensor* saved_maybe_inv_var = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(4, scale.shape(),
&saved_maybe_inv_var));
if (is_training_) {
functor::FusedBatchNorm<Device, T, U, true>()(
context, x, scale, offset, estimated_mean, estimated_variance,
side_input, epsilon_, exponential_avg_factor_, activation_mode_, y,
batch_mean, batch_var, saved_mean, saved_maybe_inv_var,
tensor_format_, use_reserved_space);
} else {
functor::FusedBatchNorm<Device, T, U, false>()(
context, x, scale, offset, estimated_mean, estimated_variance,
side_input, epsilon_, exponential_avg_factor_, activation_mode_, y,
batch_mean, batch_var, saved_mean, saved_maybe_inv_var,
tensor_format_, use_reserved_space);
}
if (use_reshape) {
OP_REQUIRES(context, y->CopyFrom(*y, x_shape),
errors::InvalidArgument("Error during tensor copy."));
}
}
private:
U epsilon_;
U exponential_avg_factor_;
TensorFormat tensor_format_;
bool is_training_;
bool has_side_input_;
FbnActivationMode activation_mode_;
};
template <typename Device, typename T, typename U>
class FusedBatchNormOp : public FusedBatchNormOpBase<Device, T, U> {
public:
explicit FusedBatchNormOp(OpKernelConstruction* context)
: FusedBatchNormOpBase<Device, T, U>(context) {}
void Compute(OpKernelContext* context) override {
FusedBatchNormOpBase<Device, T, U>::ComputeWithReservedSpace(context,
false);
}
};
template <typename Device, typename T, typename U>
class FusedBatchNormOpV3 : public FusedBatchNormOpBase<Device, T, U> {
public:
explicit FusedBatchNormOpV3(OpKernelConstruction* context)
: FusedBatchNormOpBase<Device, T, U>(context) {}
void Compute(OpKernelContext* context) override {
FusedBatchNormOpBase<Device, T, U>::ComputeWithReservedSpace(context, true);
}
};
template <typename Device, typename T, typename U>
class FusedBatchNormOpEx : public FusedBatchNormOpBase<Device, T, U> {
static constexpr bool kWithSideInputAndActivation = true;
public:
explicit FusedBatchNormOpEx(OpKernelConstruction* context)
: FusedBatchNormOpBase<Device, T, U>(context,
kWithSideInputAndActivation) {}
void Compute(OpKernelContext* context) override {
FusedBatchNormOpBase<Device, T, U>::ComputeWithReservedSpace(context, true);
}
};
template <typename Device, typename T, typename U>
class FusedBatchNormGradOpBase : public OpKernel {
using FbnActivationMode = functor::FusedBatchNormActivationMode;
protected:
explicit FusedBatchNormGradOpBase(OpKernelConstruction* context,
bool is_batch_norm_grad_ex = false)
: OpKernel(context) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
epsilon_ = U(epsilon);
string tensor_format;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format));
OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_),
errors::InvalidArgument("Invalid data format"));
OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_));
if (!is_batch_norm_grad_ex) {
has_side_input_ = false;
activation_mode_ = FbnActivationMode::kIdentity;
} else {
OP_REQUIRES_OK(context, ParseActivationMode(context, &activation_mode_));
int num_side_inputs;
OP_REQUIRES_OK(context,
context->GetAttr("num_side_inputs", &num_side_inputs));
OP_REQUIRES(context, num_side_inputs >= 0 && num_side_inputs <= 1,
errors::InvalidArgument(
"FusedBatchNormGrad accepts at most one side input."));
has_side_input_ = (num_side_inputs == 1);
if (has_side_input_ && is_training_) {
OP_REQUIRES(
context, activation_mode_ != FbnActivationMode::kIdentity,
errors::InvalidArgument("Identity activation is not supported with "
"non-empty side input"));
}
}
if (activation_mode_ != FbnActivationMode::kIdentity && is_training_) {
OP_REQUIRES(context, DataTypeToEnum<T>::value == DT_HALF,
errors::InvalidArgument("FusedBatchNormGrad with activation "
"supports only DT_HALF data type."));
OP_REQUIRES(context, tensor_format_ == FORMAT_NHWC,
errors::InvalidArgument("FusedBatchNormGrad with activation "
"supports only NHWC tensor format."));
OP_REQUIRES(context, functor::BatchnormSpatialPersistentEnabled(),
errors::InvalidArgument(
"FusedBatchNormGrad with activation must run with cuDNN "
"spatial persistence mode enabled."));
}
}
virtual void ComputeWithReservedSpace(OpKernelContext* context,
bool use_reserved_space) {
Tensor y_backprop = context->input(0);
Tensor x = context->input(1);
const Tensor& scale = context->input(2);
const Tensor& saved_mean_or_pop_mean = context->input(3);
const Tensor& saved_maybe_inv_var_or_pop_var = context->input(4);
bool use_activation = activation_mode_ != FbnActivationMode::kIdentity;
const Tensor* offset = use_activation ? &context->input(6) : nullptr;
const Tensor* y = use_activation ? &context->input(7) : nullptr;
OP_REQUIRES(context, y_backprop.dims() == 4 || y_backprop.dims() == 5,
errors::InvalidArgument("input must be 4 or 5-dimensional",
y_backprop.shape().DebugString()));
OP_REQUIRES(context, x.dims() == 4 || x.dims() == 5,
errors::InvalidArgument("input must be 4 or 5-dimensional",
x.shape().DebugString()));
OP_REQUIRES(context, scale.dims() == 1,
errors::InvalidArgument("scale must be 1-dimensional",
scale.shape().DebugString()));
OP_REQUIRES(
context, saved_mean_or_pop_mean.dims() == 1,
errors::InvalidArgument("saved mean must be 1-dimensional",
saved_mean_or_pop_mean.shape().DebugString()));
OP_REQUIRES(context, saved_maybe_inv_var_or_pop_var.dims() == 1,
errors::InvalidArgument(
"saved variance must be 1-dimensional",
saved_maybe_inv_var_or_pop_var.shape().DebugString()));
OP_REQUIRES(
context, x.shape() == y_backprop.shape(),
errors::InvalidArgument(
"x and y_backprop must have same shape, but x has shape ",
x.shape(), " and y_backprop has shape ", y_backprop.shape()));
if (use_activation) {
OP_REQUIRES(
context, x.dim_size(3) % 4 == 0,
errors::InvalidArgument("FusedBatchNormGrad with activation requires "
"channel dimension to be a multiple of 4."));
OP_REQUIRES(context, offset->dims() == 1,
errors::InvalidArgument("offset must be 1-dimensional",
offset->shape().DebugString()));
}
bool use_reshape = (x.dims() == 5);
auto x_shape = x.shape();
TensorShape dest_shape;
if (use_reshape) {
const int64_t in_batch = GetTensorDim(x, tensor_format_, 'N');
int64_t in_planes = GetTensorDim(x, tensor_format_, '0');
int64_t in_rows = GetTensorDim(x, tensor_format_, '1');
int64_t in_cols = GetTensorDim(x, tensor_format_, '2');
const int64_t in_depth = GetTensorDim(x, tensor_format_, 'C');
OP_REQUIRES_OK(context,
ShapeFromFormatWithStatus(tensor_format_, in_batch,
{{in_planes, in_rows * in_cols}},
in_depth, &dest_shape));
OP_REQUIRES(context, x.CopyFrom(x, dest_shape),
errors::InvalidArgument("Error during tensor copy."));
OP_REQUIRES(context, y_backprop.CopyFrom(y_backprop, dest_shape),
errors::InvalidArgument("Error during tensor copy."));
}
const auto num_channels = GetTensorDim(x, tensor_format_, 'C');
OP_REQUIRES(
context, scale.NumElements() == num_channels,
errors::InvalidArgument("scale must have the same number of elements "
"as the channels of x, got ",
scale.NumElements(), " and ", num_channels));
OP_REQUIRES(
context, saved_mean_or_pop_mean.NumElements() == num_channels,
errors::InvalidArgument("reserve_space_1 must have the same number of "
"elements as the channels of x, got ",
saved_mean_or_pop_mean.NumElements(), " and ",
num_channels));
OP_REQUIRES(
context, saved_maybe_inv_var_or_pop_var.NumElements() == num_channels,
errors::InvalidArgument("reserve_space_2 must have the same number of "
"elements as the channels of x, got ",
saved_maybe_inv_var_or_pop_var.NumElements(),
" and ", num_channels));
Tensor* x_backprop = nullptr;
auto alloc_shape = use_reshape ? dest_shape : x_shape;
OP_REQUIRES_OK(context,
context->allocate_output(0, alloc_shape, &x_backprop));
const TensorShape& scale_offset_shape = scale.shape();
Tensor* scale_backprop = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, scale_offset_shape,
&scale_backprop));
Tensor* offset_backprop = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, scale_offset_shape,
&offset_backprop));
Tensor* placeholder_1 = nullptr;
OP_REQUIRES_OK(
context, context->allocate_output(3, TensorShape({0}), &placeholder_1));
Tensor* placeholder_2 = nullptr;
OP_REQUIRES_OK(
context, context->allocate_output(4, TensorShape({0}), &placeholder_2));
Tensor* side_input_backprop = nullptr;
if (has_side_input_) {
OP_REQUIRES_OK(context, context->allocate_output(5, alloc_shape,
&side_input_backprop));
}
if (x.shape().num_elements() == 0) {
functor::SetZeroFunctor<Device, U> f;
f(context->eigen_device<Device>(), scale_backprop->flat<U>());
f(context->eigen_device<Device>(), offset_backprop->flat<U>());
return;
}
if (is_training_) {
functor::FusedBatchNormGrad<Device, T, U>()(
context, y_backprop, x, scale, offset, saved_mean_or_pop_mean,
saved_maybe_inv_var_or_pop_var, y, epsilon_, activation_mode_,
x_backprop, scale_backprop, offset_backprop, side_input_backprop,
use_reserved_space, tensor_format_);
} else {
OP_REQUIRES(
context,
activation_mode_ == FbnActivationMode::kIdentity && !has_side_input_,
errors::InvalidArgument(
"FusedBatchNormGrad with activation is only supported "
"when is_training=True."));
OP_REQUIRES(context, tensor_format_ == FORMAT_NHWC,
errors::InvalidArgument(
"The implementation of "
"FusedBatchNormGrad with is_training=False only support "
"NHWC tensor format for now."));
functor::FusedBatchNormFreezeGrad<Device, T, U>()(
context, y_backprop, x, scale, saved_mean_or_pop_mean,
saved_maybe_inv_var_or_pop_var, epsilon_, x_backprop, scale_backprop,
offset_backprop);
}
if (use_reshape) {
OP_REQUIRES(context, x_backprop->CopyFrom(*x_backprop, x_shape),
errors::InvalidArgument("Error during tensor copy."));
}
}
private:
U epsilon_;
TensorFormat tensor_format_;
bool is_training_;
bool has_side_input_;
FbnActivationMode activation_mode_;
};
template <typename Device, typename T, typename U>
class FusedBatchNormGradOp : public FusedBatchNormGradOpBase<Device, T, U> {
public:
explicit FusedBatchNormGradOp(OpKernelConstruction* context)
: FusedBatchNormGradOpBase<Device, T, U>(context) {}
void Compute(OpKernelContext* context) override {
FusedBatchNormGradOpBase<Device, T, U>::ComputeWithReservedSpace(context,
false);
}
};
template <typename Device, typename T, typename U>
class FusedBatchNormGradOpV3 : public FusedBatchNormGradOpBase<Device, T, U> {
public:
explicit FusedBatchNormGradOpV3(OpKernelConstruction* context)
: FusedBatchNormGradOpBase<Device, T, U>(context) {}
void Compute(OpKernelContext* context) override {
FusedBatchNormGradOpBase<Device, T, U>::ComputeWithReservedSpace(context,
true);
}
};
template <typename Device, typename T, typename U>
class FusedBatchNormGradOpEx : public FusedBatchNormGradOpBase<Device, T, U> {
static constexpr bool kWithSideInputAndActivation = true;
public:
explicit FusedBatchNormGradOpEx(OpKernelConstruction* context)
: FusedBatchNormGradOpBase<Device, T, U>(context,
kWithSideInputAndActivation) {}
void Compute(OpKernelContext* context) override {
FusedBatchNormGradOpBase<Device, T, U>::ComputeWithReservedSpace(context,
true);
}
};
REGISTER_KERNEL_BUILDER(
Name("FusedBatchNorm").Device(DEVICE_CPU).TypeConstraint<float>("T"),
FusedBatchNormOp<CPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(
Name("FusedBatchNormGrad").Device(DEVICE_CPU).TypeConstraint<float>("T"),
FusedBatchNormGradOp<CPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOp<CPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOp<CPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2")
.Device(DEVICE_CPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOp<CPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2")
.Device(DEVICE_CPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOp<CPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2")
.Device(DEVICE_CPU)
.TypeConstraint<bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOp<CPUDevice, bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2")
.Device(DEVICE_CPU)
.TypeConstraint<bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOp<CPUDevice, bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpV3<CPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpV3<CPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3")
.Device(DEVICE_CPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpV3<CPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3")
.Device(DEVICE_CPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpV3<CPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3")
.Device(DEVICE_CPU)
.TypeConstraint<bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpV3<CPUDevice, bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3")
.Device(DEVICE_CPU)
.TypeConstraint<bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpV3<CPUDevice, bfloat16, float>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
REGISTER_KERNEL_BUILDER(
Name("FusedBatchNorm").Device(DEVICE_GPU).TypeConstraint<float>("T"),
FusedBatchNormOp<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(
Name("FusedBatchNormGrad").Device(DEVICE_GPU).TypeConstraint<float>("T"),
FusedBatchNormGradOp<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOp<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOp<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOp<GPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV2")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOp<GPUDevice, Eigen::bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV2")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOp<GPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(
Name("FusedBatchNormGradV2")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOp<GPUDevice, Eigen::bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpV3<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpEx<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpV3<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormGradEx")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpEx<GPUDevice, float, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpV3<GPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormV3")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpV3<GPUDevice, Eigen::bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpEx<GPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormOpEx<GPUDevice, Eigen::bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("FusedBatchNormGradV3")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpV3<GPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(
Name("FusedBatchNormGradV3")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpV3<GPUDevice, Eigen::bfloat16, float>);
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormGradEx")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::half>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpEx<GPUDevice, Eigen::half, float>);
REGISTER_KERNEL_BUILDER(
Name("_FusedBatchNormGradEx")
.Device(DEVICE_GPU)
.TypeConstraint<Eigen::bfloat16>("T")
.TypeConstraint<float>("U"),
FusedBatchNormGradOpEx<GPUDevice, Eigen::bfloat16, float>);
#endif
} | #include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
class FusedBatchNormOpTest : public OpsTestBase {};
TEST_F(FusedBatchNormOpTest, Training) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", 1.0)
.Attr("epsilon", 0.001)
.Attr("is_training", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
Tensor expected_mean(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_mean, {10, 10});
test::ExpectTensorNear<float>(expected_mean, *GetOutput(1), 0.01);
Tensor expected_variance(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_variance, {14.00, 14.00});
test::ExpectTensorNear<float>(expected_variance, *GetOutput(2), 0.01);
}
TEST_F(FusedBatchNormOpTest, TrainingRunningMean) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", 0.5)
.Attr("epsilon", 0.001)
.Attr("is_training", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({2}), {6.0, 6.0});
AddInputFromArray<float>(TensorShape({2}), {16.0, 16.0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
Tensor expected_mean(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_mean, {8, 8});
test::ExpectTensorNear<float>(expected_mean, *GetOutput(1), 0.01);
Tensor expected_variance(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_variance, {15.00, 15.00});
test::ExpectTensorNear<float>(expected_variance, *GetOutput(2), 0.01);
}
TEST_F(FusedBatchNormOpTest, Inference) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", 0.001)
.Attr("is_training", false)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({2}), {10, 10});
AddInputFromArray<float>(TensorShape({2}), {11.67f, 11.67f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
}
TEST_F(FusedBatchNormOpTest, InferenceIgnoreAvgFactor) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", 0.5)
.Attr("epsilon", 0.001)
.Attr("is_training", false)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15});
AddInputFromArray<float>(TensorShape({2}), {4.0, 4.0});
AddInputFromArray<float>(TensorShape({2}), {2.0, 2.0});
AddInputFromArray<float>(TensorShape({2}), {10, 10});
AddInputFromArray<float>(TensorShape({2}), {11.67f, 11.67f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected, {-3.86, -3.86, -1.51, -1.51, 0.83, 0.83,
3.17, 3.17, 5.51, 5.51, 7.86, 7.86});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.01);
}
TEST_F(FusedBatchNormOpTest, EmptyInput) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_op", "FusedBatchNorm")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", 0.001)
.Attr("is_training", true)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 0, 0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({0}), {});
TF_ASSERT_OK(RunOpKernel());
EXPECT_EQ(GetOutput(0)->shape(), TensorShape({1, 1, 0, 0}));
}
class FusedBatchNormGradOpTest : public OpsTestBase {};
TEST_F(FusedBatchNormGradOpTest, Simple) {
TF_EXPECT_OK(NodeDefBuilder("batch_norm_grad_op", "FusedBatchNormGrad")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", 0.001)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{2, 2, 9, 9, -4, -4, 5, 5, 8, 8, 7, 7});
AddInputFromArray<float>(TensorShape({1, 1, 6, 2}),
{1, 1, 7, 7, 4, 4, -3, -3, -11, -11, 13, 13});
AddInputFromArray<float>(TensorShape({2}), {4, 4});
AddInputFromArray<float>(TensorShape({2}), {1.833f, 1.833f});
AddInputFromArray<float>(TensorShape({2}), {57.472f, 57.472f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_x(allocator(), DT_FLOAT, TensorShape({1, 1, 6, 2}));
test::FillValues<float>(&expected_x, {-1.34, -1.34, 2.47, 2.47, -4.44, -4.44,
0.17, 0.17, 1.60, 1.60, 1.53, 1.53});
test::ExpectTensorNear<float>(expected_x, *GetOutput(0), 0.01);
Tensor expected_scale(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_scale, {-1.6488, -1.6488});
test::ExpectTensorNear<float>(expected_scale, *GetOutput(1), 0.01);
Tensor expected_offset(allocator(), DT_FLOAT, TensorShape({2}));
test::FillValues<float>(&expected_offset, {27, 27});
test::ExpectTensorNear<float>(expected_offset, *GetOutput(2), 0.01);
}
using fp32 = float;
using fp16 = Eigen::half;
using bf16 = bfloat16;
template <typename T>
static Graph* FusedBatchNormInference(int n, int h, int w, int c,
bool is_training,
TensorFormat data_format) {
Graph* g = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::value;
Tensor x_t(dtype, data_format == FORMAT_NHWC ? TensorShape({n, h, w, c})
: TensorShape({n, c, h, w}));
x_t.flat<T>().setRandom();
Tensor other_t(DT_FLOAT, TensorShape({c}));
other_t.flat<float>().setRandom();
Tensor empty_t(DT_FLOAT, TensorShape({0}));
Node* x = test::graph::Constant(g, x_t, "x");
Node* other = test::graph::Constant(g, other_t, "other");
Node* empty = test::graph::Constant(g, empty_t, "empty");
Node* fused_batch_norm;
TF_CHECK_OK(NodeBuilder(g->NewName("fused_batch_norm"), "FusedBatchNormV3")
.Input(x)
.Input(other)
.Input(other)
.Input(is_training ? empty : other)
.Input(is_training ? empty : other)
.Attr("T", dtype)
.Attr("U", DT_FLOAT)
.Attr("epsilon", 0.001)
.Attr("is_training", is_training)
.Attr("data_format", ToString(data_format))
.Finalize(g, &fused_batch_norm));
return g;
}
template <typename T>
static Graph* FusedBatchNormGrad(int n, int h, int w, int c, bool is_training,
TensorFormat data_format) {
Graph* g = new Graph(OpRegistry::Global());
DataType dtype = DataTypeToEnum<T>::value;
TensorShape shape = data_format == FORMAT_NHWC ? TensorShape({n, h, w, c})
: TensorShape({n, c, h, w});
Tensor y_backprop_t(dtype, shape);
y_backprop_t.flat<T>().setRandom();
Tensor x_t(dtype, shape);
x_t.flat<T>().setRandom();
Tensor other_t(DT_FLOAT, TensorShape({c}));
other_t.flat<float>().setRandom();
Node* y_backprop = test::graph::Constant(g, y_backprop_t, "y_backprop");
Node* x = test::graph::Constant(g, x_t, "x");
Node* other = test::graph::Constant(g, other_t, "other");
Node* fused_batch_norm;
TF_CHECK_OK(
NodeBuilder(g->NewName("fused_batch_norm_grad"), "FusedBatchNormGradV3")
.Input(y_backprop)
.Input(x)
.Input(other)
.Input(other)
.Input(other)
.Input(other)
.Attr("T", dtype)
.Attr("U", DT_FLOAT)
.Attr("epsilon", 0.001)
.Attr("is_training", is_training)
.Attr("data_format", ToString(data_format))
.Finalize(g, &fused_batch_norm));
return g;
}
#define BM_NAME(NAME, N, H, W, C, T, IT, FORMAT, DEVICE) \
BM_##NAME##_##N##_##H##_##W##_##C##_##IT##_##FORMAT##_##T##_##DEVICE
#define BM_FusedBatchNorm(N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE) \
static void BM_NAME(FusedBatchNorm, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)(::testing::benchmark::State & state) { \
test::Benchmark( \
#DEVICE, \
FusedBatchNormInference<T>(N, H, W, C, IS_TRAINING, FORMAT_##FORMAT), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * N * H * W * C); \
} \
BENCHMARK( \
BM_NAME(FusedBatchNorm, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)) \
->UseRealTime();
BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, bf16, false, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NHWC, cpu);
BM_FusedBatchNorm(64, 14, 14, 256, bf16, true, NHWC, cpu);
#ifdef GOOGLE_CUDA
BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, false, NCHW, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, false, NCHW, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NHWC, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp32, true, NCHW, gpu);
BM_FusedBatchNorm(64, 14, 14, 256, fp16, true, NCHW, gpu);
#endif
#define BM_FusedBatchNormGrad(N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE) \
static void BM_NAME(FusedBatchNormGrad, N, H, W, C, T, IS_TRAINING, FORMAT, \
DEVICE)(::testing::benchmark::State & state) { \
test::Benchmark( \
#DEVICE, \
FusedBatchNormGrad<T>(N, H, W, C, IS_TRAINING, FORMAT_##FORMAT), \
false) \
.Run(state); \
state.SetItemsProcessed(state.iterations() * N * H * W * C); \
} \
BENCHMARK( \
BM_NAME(FusedBatchNormGrad, N, H, W, C, T, IS_TRAINING, FORMAT, DEVICE)) \
->UseRealTime();
#define BM_FusedBatchNormGradResnetShapes(T, IS_TRAINING, FORMAT, DEVICE) \
BM_FusedBatchNormGrad(64, 56, 56, 64, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 56, 56, 128, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 56, 56, 256, T, IS_TRAINING, FORMAT, DEVICE); \
\
BM_FusedBatchNormGrad(64, 28, 28, 128, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 28, 28, 256, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 28, 28, 512, T, IS_TRAINING, FORMAT, DEVICE); \
\
BM_FusedBatchNormGrad(64, 14, 14, 128, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 14, 14, 256, T, IS_TRAINING, FORMAT, DEVICE); \
BM_FusedBatchNormGrad(64, 14, 14, 1024, T, IS_TRAINING, FORMAT, DEVICE)
BM_FusedBatchNormGradResnetShapes(fp32, true, NHWC, cpu);
BM_FusedBatchNormGradResnetShapes(fp32, false, NHWC, cpu);
BM_FusedBatchNormGradResnetShapes(bf16, true, NHWC, cpu);
BM_FusedBatchNormGradResnetShapes(bf16, false, NHWC, cpu);
#ifdef GOOGLE_CUDA
BM_FusedBatchNormGradResnetShapes(fp32, true, NHWC, gpu);
BM_FusedBatchNormGradResnetShapes(fp16, true, NHWC, gpu);
BM_FusedBatchNormGradResnetShapes(fp32, true, NCHW, gpu);
BM_FusedBatchNormGradResnetShapes(fp16, true, NCHW, gpu);
BM_FusedBatchNormGradResnetShapes(fp32, false, NHWC, gpu);
BM_FusedBatchNormGradResnetShapes(fp16, false, NHWC, gpu);
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fused_batch_norm_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/fused_batch_norm_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b67db28c-e40b-4668-92e4-3270dc801d78 | cpp | tensorflow/tensorflow | graph_transform_wrapper | tensorflow/core/transforms/graph_transform_wrapper.cc | tensorflow/core/transforms/graph_transform_wrapper_test.cc | #include "tensorflow/core/transforms/graph_transform_wrapper.h"
#include <initializer_list>
#include "absl/memory/memory.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/ir/importexport/graphdef_export.h"
#include "tensorflow/core/ir/importexport/graphdef_import.h"
#include "tensorflow/core/platform/statusor.h"
namespace mlir {
namespace tfg {
tensorflow::Status RunTransformOnGraph(
tensorflow::Graph* graph,
const std::initializer_list<
llvm::function_ref<std::unique_ptr<mlir::Pass>()>>& passes,
const tensorflow::GraphDebugInfo& debug_info) {
MLIRContext context(MLIRContext::Threading::DISABLED);
TF_ASSIGN_OR_RETURN(OwningOpRef<ModuleOp> module,
ImportGraphAndFunctionsToMlir(&context, debug_info,
*graph, graph->flib_def()));
PassManager pm((*module)->getName(), mlir::PassManager::Nesting::Explicit);
for (auto& pass : passes) pm.addPass(pass());
mlir::StatusScopedDiagnosticHandler error_handler(&context);
if (failed(pm.run(*module)))
return error_handler.Combine(
tensorflow::errors::InvalidArgument("MLIR Graph Optimizer failed: "));
tensorflow::GraphDef graphdef;
TF_RETURN_WITH_CONTEXT_IF_ERROR(ConvertToGraphDef(*module, &graphdef),
"when exporting MLIR module to GraphDef");
graph->Clear();
graph->mutable_flib_def()->Clear();
tensorflow::GraphConstructorOptions opts;
return ConvertGraphDefToGraph(opts, graphdef, graph);
}
}
} | #include "tensorflow/core/transforms/graph_transform_wrapper.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace {
struct TestPass : public PassWrapper<TestPass, OperationPass<ModuleOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPass)
TestPass() = default;
StringRef getArgument() const final { return "test"; }
void runOnOperation() override {
Operation* del;
getOperation()->walk([&](Operation* op) {
if (op->getName().getStringRef() != "tfg.TestInput") return;
del = *op->getResult(0).getUsers().begin();
});
del->erase();
}
};
}
}
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestRelu").Input("i: float").Output("o: float");
REGISTER_OP("NoOp");
TEST(GraphTransformWrapper, ReplacedGraph) {
tensorflow::Graph graph(tensorflow::OpRegistry::Global());
{
tensorflow::GraphDefBuilder b(
tensorflow::GraphDefBuilder::kFailImmediately);
tensorflow::Node* input =
tensorflow::ops::SourceOp("TestInput", b.opts().WithName("in"));
tensorflow::ops::UnaryOp("TestRelu", tensorflow::ops::NodeOut(input, 0),
b.opts().WithName("n1"));
tensorflow::ops::UnaryOp("TestRelu", tensorflow::ops::NodeOut(input, 1),
b.opts().WithName("n2"));
TF_EXPECT_OK(tensorflow::GraphDefBuilderToGraph(b, &graph));
}
mlir::MLIRContext context;
context.getOrLoadDialect<mlir::tfg::TFGraphDialect>();
auto create_pass = [&]() { return std::make_unique<mlir::TestPass>(); };
TF_QCHECK_OK(mlir::tfg::RunTransformOnGraph(&graph, {create_pass}));
EXPECT_EQ(4, graph.num_nodes());
EXPECT_TRUE(
absl::StrContains(graph.ToGraphDefDebug().ShortDebugString(), "\"n2\""));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/transforms/graph_transform_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/transforms/graph_transform_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
43526799-73a2-492f-aa82-ab1df36fdb29 | cpp | google/tensorstore | result | tensorstore/serialization/result.h | tensorstore/serialization/result_test.cc | #ifndef TENSORSTORE_SERIALIZATION_RESULT_H_
#define TENSORSTORE_SERIALIZATION_RESULT_H_
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/status.h"
#include "tensorstore/util/result.h"
namespace tensorstore {
namespace serialization {
template <typename T>
struct Serializer<Result<T>> {
[[nodiscard]] static bool Encode(EncodeSink& sink, const Result<T>& value) {
return serialization::Encode(sink, value.ok()) &&
(value.ok() ? serialization::Encode(sink, *value)
: serialization::Encode(sink, value.status()));
}
[[nodiscard]] static bool Decode(DecodeSource& source, Result<T>& value) {
bool has_value;
if (!serialization::Decode(source, has_value)) return false;
if (has_value) {
return serialization::Decode(source, value.emplace());
} else {
absl::Status status;
if (!ErrorStatusSerializer::Decode(source, status)) return false;
value = std::move(status);
return true;
}
}
};
}
}
#endif | #include "tensorstore/serialization/result.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(ResultTest, OkRoundTrip) {
TestSerializationRoundTrip(tensorstore::Result<int>(3));
TestSerializationRoundTrip(tensorstore::Result<int>(4));
}
TEST(StatusTest, ErrorRoundTrip) {
TestSerializationRoundTrip(
tensorstore::Result<int>(absl::InternalError("abc")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/result.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/result_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4a03d074-abd5-4eff-b24a-ddc438954def | cpp | tensorflow/tensorflow | context_map | third_party/xla/xla/stream_executor/gpu/context_map.h | third_party/xla/xla/stream_executor/gpu/context_map_test.cc | #ifndef XLA_STREAM_EXECUTOR_GPU_CONTEXT_MAP_H_
#define XLA_STREAM_EXECUTOR_GPU_CONTEXT_MAP_H_
#include <memory>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/any_invocable.h"
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
namespace stream_executor::gpu {
template <class GpuContext, class ContextType>
class ContextMap {
public:
explicit ContextMap(absl::AnyInvocable<int(void* ptr)> find_device_ordinal)
: find_device_ordinal_(std::move(find_device_ordinal)) {}
bool Has(GpuContext context) {
absl::ReaderMutexLock lock(&mutex_);
return gpu_context_to_context_type_map_.find(context) !=
gpu_context_to_context_type_map_.end();
}
ContextType* Add(GpuContext context, int device_ordinal) {
CHECK_NE(context, nullptr);
absl::MutexLock lock(&mutex_);
auto insert_result = gpu_context_to_context_type_map_.insert(
std::make_pair(context, nullptr));
auto it = insert_result.first;
if (insert_result.second) {
it->second = std::make_unique<ContextType>(context, device_ordinal);
ordinal_to_type_map_[device_ordinal].push_back(context);
}
return it->second.get();
}
void Remove(GpuContext context) {
absl::MutexLock lock(&mutex_);
CHECK_NE(context, nullptr);
auto it = gpu_context_to_context_type_map_.find(context);
CHECK(it != gpu_context_to_context_type_map_.end()) << context;
gpu_context_to_context_type_map_.erase(it);
for (auto p : ordinal_to_type_map_) {
auto it2 = std::find(p.second.begin(), p.second.end(), context);
if (it2 != p.second.end()) {
p.second.erase(it2);
if (p.second.empty()) {
ordinal_to_type_map_.erase(p.first);
}
break;
}
}
}
GpuContext GetAnyContext(void* ptr) {
absl::ReaderMutexLock lock(&mutex_);
int device_ordinal = find_device_ordinal_(ptr);
CHECK_EQ(ordinal_to_type_map_.count(device_ordinal), 1);
CHECK(!ordinal_to_type_map_.at(device_ordinal).empty())
<< "Need at least one context.";
return ordinal_to_type_map_.at(device_ordinal)[0];
}
private:
absl::Mutex mutex_;
absl::flat_hash_map<GpuContext, std::unique_ptr<ContextType>>
gpu_context_to_context_type_map_ ABSL_GUARDED_BY(mutex_);
absl::flat_hash_map<int, std::vector<GpuContext>> ordinal_to_type_map_
ABSL_GUARDED_BY(mutex_);
absl::AnyInvocable<int(void* ptr)> find_device_ordinal_;
};
}
#endif | #include "xla/stream_executor/gpu/context_map.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
class TestContext {
public:
TestContext(void *context, int device_ordinal)
: context_(context), device_ordinal_(device_ordinal) {}
void *context() const { return context_; }
int device_ordinal() const { return device_ordinal_; }
private:
void *context_;
int device_ordinal_;
};
TEST(ContextMapTest, AddRemoveAndHasWorks) {
int device_ordinal = 1;
void *context = &device_ordinal;
auto ordinal_finder = [device_ordinal](void *ptr) { return device_ordinal; };
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context = map.Add(context, device_ordinal);
EXPECT_EQ(test_context->context(), context);
EXPECT_EQ(test_context->device_ordinal(), device_ordinal);
EXPECT_TRUE(map.Has(context));
map.Remove(context);
EXPECT_FALSE(map.Has(context));
}
TEST(ContextMapTest, AddTwiceReturnsSameContext) {
void *context = reinterpret_cast<void *>(2);
constexpr int device_ordinal = 1;
auto ordinal_finder = [](void *ptr) { return device_ordinal; };
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context1 = map.Add(context, device_ordinal);
auto *test_context2 = map.Add(context, device_ordinal);
EXPECT_EQ(test_context1, test_context2);
}
TEST(ContextMapTest, GetAnyContextReturnsCorrectContext) {
void *context1 = reinterpret_cast<void *>(2);
void *context2 = reinterpret_cast<void *>(3);
constexpr int device_ordinal1 = 1;
constexpr int device_ordinal2 = 2;
auto ordinal_finder = [](void *ptr) {
static int calls = 0;
++calls;
if (calls <= 1) {
return device_ordinal1;
} else {
return device_ordinal2;
}
};
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context1 = map.Add(context1, device_ordinal1);
auto *test_context2 = map.Add(context2, device_ordinal2);
EXPECT_NE(test_context1, test_context2);
auto first_context = map.GetAnyContext(context1);
EXPECT_EQ(first_context, context1);
auto second_context = map.GetAnyContext(context2);
EXPECT_EQ(second_context, context2);
}
TEST(ContextMapTest, GetAnyContextShouldDieWithBadInput) {
void *context1 = reinterpret_cast<void *>(2);
void *context2 = reinterpret_cast<void *>(3);
constexpr int device_ordinal1 = 1;
constexpr int device_ordinal2 = 2;
auto ordinal_finder = [](void *ptr) {
static int calls = 0;
++calls;
if (calls <= 1) {
return device_ordinal1;
} else {
return device_ordinal2;
}
};
ContextMap<void *, TestContext> map(ordinal_finder);
auto *test_context1 = map.Add(context1, device_ordinal1);
auto *test_context2 = map.Add(context2, device_ordinal2);
EXPECT_NE(test_context1, test_context2);
auto first_context = map.GetAnyContext(context1);
EXPECT_EQ(first_context, context1);
auto second_context = map.GetAnyContext(context2);
EXPECT_EQ(second_context, context2);
map.Remove(context2);
EXPECT_DEATH(map.GetAnyContext(context2), "Check failed");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/context_map.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/context_map_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e3460a20-4301-4749-99c9-483fa6ad0a48 | cpp | tensorflow/tensorflow | master | tensorflow/core/distributed_runtime/master.cc | tensorflow/core/distributed_runtime/master_test.cc | #include "tensorflow/core/distributed_runtime/master.h"
#include <memory>
#include <unordered_set>
#include <vector>
#include "xla/tsl/protobuf/rpc_options.pb.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/regexp.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/cluster.pb.h"
#include "tensorflow/core/protobuf/master.pb.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
constexpr char kGrpcPrefixRegex[] = "^grpc.*:
}
Master::Master(MasterEnv* env, double session_gc_seconds)
: env_(env),
last_1000_steps_(1000),
step_count_(0),
session_gc_seconds_(session_gc_seconds),
recent_request_ids_(10000, env_->experimental_num_shards) {
CHECK(!env->local_devices.empty());
DCHECK_GT(env_->experimental_num_shards, 0);
if (session_gc_seconds_ > 0.0) {
gc_thread_ = env_->env->StartThread(ThreadOptions(), "TF_master_GC",
[this]() { GC(); });
} else {
gc_thread_ = nullptr;
}
}
Master::~Master() {
if (gc_thread_) {
mutex_lock l(mu_);
shutdown_ = true;
shutdown_cv_.notify_all();
delete gc_thread_;
}
}
void Master::GC() {
Env* env = Env::Default();
while (true) {
mutex_lock l(mu_);
const int kTimeoutMilliseconds = 10 * 1000;
WaitForMilliseconds(&l, &shutdown_cv_, kTimeoutMilliseconds);
if (shutdown_) {
break;
}
std::vector<string> handles;
const int64_t num_micros =
static_cast<int64_t>(session_gc_seconds_ * 1000000);
for (const auto& entry : sessions_) {
int64_t lat = entry.second->last_access_time_usec();
if (static_cast<int64_t>(env->NowMicros()) - lat > num_micros) {
handles.push_back(entry.first);
auto* sess = entry.second;
SchedClosure([this, sess]() {
LOG(WARNING) << "GC session " << sess->handle() << " after "
<< session_gc_seconds_ << " seconds. "
<< "Note that if you are starting multiple replicas "
<< "on a staggered delay, session_gc_seconds may need "
<< "to be raised.";
sess->GarbageCollect();
});
}
}
for (const auto& handle : handles) sessions_.erase(handle);
}
}
MasterSession* Master::FindMasterSession(const string& handle) {
MasterSession* session = nullptr;
{
mutex_lock l(mu_);
session = gtl::FindPtrOrNull(sessions_, handle);
if (session != nullptr) {
session->Ref();
}
}
return session;
}
class DeviceFinder {
public:
static Status GetRemoteDevices(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache,
std::vector<std::unique_ptr<Device>>* out_remote) {
DeviceFinder finder(device_filters, env, worker_cache);
finder.Start();
TF_RETURN_IF_ERROR(finder.Wait());
finder.GetRemoteDevices(env->local_devices, out_remote);
return absl::OkStatus();
}
static void GetRemoteWorkers(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache, std::vector<string>* workers) {
DeviceFinder finder(device_filters, env, worker_cache);
*workers = finder.targets_;
}
private:
explicit DeviceFinder(
const protobuf::RepeatedPtrField<string>& device_filters, MasterEnv* env,
WorkerCacheInterface* worker_cache)
: env_(env), worker_cache_(worker_cache) {
CHECK(worker_cache) << "Worker cache was null!";
auto process_filter = [this](const string& filter) {
DeviceNameUtils::ParsedName parsed;
if (DeviceNameUtils::ParseFullName(filter, &parsed)) {
filters_.push_back(parsed);
} else {
LOG(FATAL) << "Skipping invalid filter: " << filter;
}
};
for (const string& filter : device_filters) {
process_filter(filter);
}
if (filters_.empty()) {
std::vector<string> workers;
worker_cache->ListWorkers(&workers);
std::swap(workers, targets_);
} else {
CHECK_GT(env_->local_devices.size(), 0) << "No local devices provided.";
const string& local_device_name = env_->local_devices[0]->name();
DeviceNameUtils::ParsedName local_parsed_name;
CHECK(DeviceNameUtils::ParseFullName(local_device_name,
&local_parsed_name));
bool all_filters_have_job = true;
std::unordered_set<string> filter_job_names({local_parsed_name.job});
for (const DeviceNameUtils::ParsedName& filter : filters_) {
all_filters_have_job = all_filters_have_job && filter.has_job;
if (filter.has_job) {
filter_job_names.insert(filter.job);
}
}
std::vector<string> workers;
if (all_filters_have_job) {
for (const string& job_name : filter_job_names) {
VLOG(2) << "Selectively listing workers in job: " << job_name;
std::vector<string> workers_in_job;
worker_cache->ListWorkersInJob(job_name, &workers_in_job);
workers.insert(workers.end(), workers_in_job.begin(),
workers_in_job.end());
}
} else {
VLOG(2) << "Listing workers in all jobs because some device "
<< "filter has no job specified. Filters were:";
if (device_filters.empty()) {
VLOG(2) << "- <NO FILTERS>";
} else {
for (const string& filter : device_filters) {
VLOG(2) << "- " << filter;
}
}
worker_cache->ListWorkers(&workers);
}
for (const string& name : workers) {
if (MatchFilters(name) ||
DeviceNameUtils::IsSameAddressSpace(name, local_device_name)) {
targets_.push_back(name);
}
}
}
seen_targets_.assign(targets_.size(), false);
}
~DeviceFinder() {
for (Device* dev : found_) delete dev;
}
void Start() {
LOG(INFO) << "Scanning workers for devices: " << targets_.size()
<< " total workers";
{
mutex_lock l(mu_);
num_pending_ = targets_.size();
if (num_pending_ == 0) {
pending_zero_.notify_all();
}
}
for (size_t i = 0; i < targets_.size(); ++i) {
NewRemoteDevices(
env_->env, worker_cache_, targets_[i],
[this, i](const Status& s, std::vector<Device*>* devices) {
WhenFound(i, s, devices);
});
}
}
const int32 kLoggingPeriodMs = 10 * 1000;
Status Wait() {
mutex_lock l(mu_);
while (num_pending_ != 0) {
pending_zero_.wait_for(l, std::chrono::milliseconds(kLoggingPeriodMs));
if (num_pending_ != 0) {
for (size_t i = 0; i < targets_.size(); ++i) {
if (!seen_targets_[i]) {
LOG(INFO)
<< "CreateSession still waiting for response from worker: "
<< targets_[i];
}
}
}
}
return status_;
}
void GetRemoteDevices(const std::vector<Device*>& local,
std::vector<std::unique_ptr<Device>>* remote) {
std::unordered_set<string> names(local.size());
for (Device* dev : local) names.insert(dev->name());
mutex_lock l(mu_);
for (Device* dev : found_) {
const string& name = dev->name();
if (names.insert(name).second && MatchFilters(name)) {
remote->push_back(std::unique_ptr<Device>(dev));
} else {
delete dev;
}
}
found_.clear();
}
typedef DeviceFinder ME;
const MasterEnv* env_;
WorkerCacheInterface* worker_cache_;
std::vector<DeviceNameUtils::ParsedName> filters_;
mutex mu_;
int num_pending_ TF_GUARDED_BY(mu_);
condition_variable pending_zero_;
std::vector<Device*> found_ TF_GUARDED_BY(mu_);
std::vector<string> targets_;
std::vector<bool> seen_targets_ TF_GUARDED_BY(mu_);
Status status_;
void WhenFound(int target_index, const Status& s,
std::vector<Device*>* devices) {
mutex_lock l(mu_);
seen_targets_[target_index] = true;
if (!s.ok()) {
LOG(ERROR) << "CreateSession failed because worker "
<< targets_[target_index] << " returned error: " << s;
status_.Update(s);
} else {
found_.insert(found_.end(), devices->begin(), devices->end());
devices->clear();
}
--num_pending_;
if (num_pending_ == 0) {
pending_zero_.notify_all();
}
}
bool Intersects(const DeviceNameUtils::ParsedName& x,
const DeviceNameUtils::ParsedName& y) {
return (!x.has_job || !y.has_job || x.job == y.job) &&
(!x.has_replica || !y.has_replica || x.replica == y.replica) &&
(!x.has_task || !y.has_task || x.task == y.task) &&
(!x.has_type || !y.has_type || x.type == y.type) &&
(!x.has_id || !y.has_id || x.id == y.id);
}
bool MatchFilters(const string& name) {
if (filters_.empty()) return true;
DeviceNameUtils::ParsedName x;
if (DeviceNameUtils::ParseFullName(name, &x)) {
for (const auto& filter : filters_) {
if (Intersects(x, filter)) return true;
}
}
return false;
}
DeviceFinder(const DeviceFinder&) = delete;
void operator=(const DeviceFinder&) = delete;
};
void Master::CreateSession(const CreateSessionRequest* req,
CreateSessionResponse* resp, MyClosure done) {
SchedClosure([this, req, resp, done]() {
Status status;
WorkerCacheFactoryOptions worker_cache_factory_options;
auto call_done = gtl::MakeCleanup([&status, &done] { done(status); });
status = ValidateExternalGraphDefSyntax(req->graph_def());
if (!status.ok()) return;
WorkerCacheInterface* worker_cache = nullptr;
std::unique_ptr<WorkerCacheInterface> worker_cache_ptr;
std::unique_ptr<DeviceSet> device_set;
std::unique_ptr<std::vector<std::unique_ptr<Device>>> remote_devices(
new std::vector<std::unique_ptr<Device>>());
const ClusterDef& cluster_def = req->config().cluster_def();
if (!cluster_def.job().empty()) {
worker_cache_factory_options.cluster_def = cluster_def;
string normalized_string(req->target());
RE2::Replace(&normalized_string, kGrpcPrefixRegex, "");
for (auto&& job : cluster_def.job()) {
for (auto&& task : job.tasks()) {
if (task.second == normalized_string) {
if (!worker_cache_factory_options.job_name.empty()) {
status = errors::InvalidArgument(
"Found multiple matching tasks that correspond to "
"to the master. Master target: '",
req->target(),
"'. ClusterDef: ", cluster_def.ShortDebugString());
LOG(ERROR) << status;
return;
}
if (env_->local_devices[0]->parsed_name().job == job.name() &&
env_->local_devices[0]->parsed_name().task == task.first) {
status = errors::InvalidArgument(
"The ClusterSpec names the job and task index to be the same "
"names that were provided when the server booted. This is "
"currently not allowed. Job: ",
job.name(), ", task index: ", task.first);
return;
}
worker_cache_factory_options.job_name = job.name();
worker_cache_factory_options.task_index = task.first;
}
}
}
worker_cache_factory_options.rpc_options = req->config().rpc_options();
status = env_->worker_cache_factory(worker_cache_factory_options,
&worker_cache);
if (!status.ok()) return;
worker_cache_ptr = std::unique_ptr<WorkerCacheInterface>(worker_cache);
status =
DeviceFinder::GetRemoteDevices(req->config().device_filters(), env_,
worker_cache, remote_devices.get());
if (!status.ok()) return;
device_set = std::make_unique<DeviceSet>();
for (auto&& d : *remote_devices) {
device_set->AddDevice(d.get());
DeviceNameUtils::ParsedName name = d->parsed_name();
if (name.job == worker_cache_factory_options.job_name &&
name.task == worker_cache_factory_options.task_index &&
name.type == "CPU" && name.id == 0) {
device_set->set_client_device(d.get());
}
}
} else {
worker_cache = env_->worker_cache;
status =
DeviceFinder::GetRemoteDevices(req->config().device_filters(), env_,
worker_cache, remote_devices.get());
if (!status.ok()) return;
device_set = std::make_unique<DeviceSet>();
for (auto&& d : *remote_devices) {
device_set->AddDevice(d.get());
}
int num_local_devices = 0;
for (Device* d : env_->local_devices) {
device_set->AddDevice(d);
if (num_local_devices == 0) {
device_set->set_client_device(d);
}
num_local_devices++;
}
}
CHECK(device_set->client_device()) << "No client device found. Missing "
<< "CPU:0 device?";
SessionOptions options;
options.target = req->target();
options.config = req->config();
options.config.mutable_experimental()
->set_disable_optimize_for_static_graph(true);
std::vector<string> filtered_worker_list;
DeviceFinder::GetRemoteWorkers(req->config().device_filters(), env_,
worker_cache, &filtered_worker_list);
MasterSession* session = env_->master_session_factory(
options, env_, std::move(remote_devices), std::move(worker_cache_ptr),
std::move(device_set), std::move(filtered_worker_list));
GraphDef* gdef =
const_cast<CreateSessionRequest*>(req)->mutable_graph_def();
status = session->Create(std::move(*gdef), cluster_def);
if (!status.ok()) {
session->Close().IgnoreError();
session->Unref();
return;
}
resp->set_session_handle(session->handle());
{
mutex_lock l(mu_);
CHECK(sessions_.insert({session->handle(), session}).second);
}
});
}
void Master::ExtendSession(const ExtendSessionRequest* req,
ExtendSessionResponse* resp, MyClosure done) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done]() {
Status status = ValidateExternalGraphDefSyntax(req->graph_def());
if (status.ok()) {
status = session->Extend(req, resp);
}
session->Unref();
done(status);
});
}
void Master::PartialRunSetup(const PartialRunSetupRequest* req,
PartialRunSetupResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"PartialRunSetup (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done]() {
Status s = session->PartialRunSetup(req, resp);
session->Unref();
done(s);
});
}
void Master::RunStep(CallOptions* opts, const RunStepRequestWrapper* req,
MutableRunStepResponseWrapper* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"RunStep (Master)", req);
if (!s.ok()) {
done(s);
return;
}
auto start_time = env_->env->NowMicros();
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([this, start_time, session, opts, req, resp, done]() {
Status status = session->Run(opts, *req, resp);
session->Unref();
uint64 done_time = env_->env->NowMicros();
done(status);
mutex_lock l(mu_);
last_1000_steps_.AddValue((done_time - start_time) / 1e9);
++step_count_;
});
}
void Master::CloseSession(const CloseSessionRequest* req,
CloseSessionResponse* resp, MyClosure done) {
MasterSession* session = nullptr;
{
mu_.lock();
auto iter = sessions_.find(req->session_handle());
if (iter == sessions_.end()) {
mu_.unlock();
done(errors::Aborted(
"Session ", req->session_handle(),
" is not found. Possibly, this master has restarted."));
return;
}
session = iter->second;
sessions_.erase(iter);
mu_.unlock();
}
SchedClosure([session, done]() {
Status s = session->Close();
session->Unref();
done(s);
});
}
void Master::ListDevices(const ListDevicesRequest* req,
ListDevicesResponse* resp, MyClosure done) {
SchedClosure([this, req, resp, done]() {
if (!req->session_handle().empty()) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::InvalidArgument(
"Session ", req->session_handle(),
" is not found. Possibly, this master has restarted."));
return;
}
core::ScopedUnref ref(session);
Status s = session->ListDevices(resp);
done(s);
return;
}
std::vector<std::unique_ptr<Device>> remote_devices;
Status s = DeviceFinder::GetRemoteDevices({}, env_, env_->worker_cache,
&remote_devices);
if (s.ok()) {
for (Device* dev : env_->local_devices) {
*(resp->add_local_device()) = dev->attributes();
}
for (auto&& dev : remote_devices) {
*(resp->add_remote_device()) = dev->attributes();
}
}
done(s);
});
}
void Master::CleanupWorkers(const ResetRequest& reset) {
std::vector<string> worker_names;
DeviceFinder::GetRemoteWorkers(reset.device_filters(), env_,
env_->worker_cache, &worker_names);
if (!worker_names.empty()) {
const int num_workers = worker_names.size();
std::vector<Notification> n(num_workers);
CleanupAllRequest req;
(*req.mutable_container()) = reset.container();
std::vector<CleanupAllResponse> resp(num_workers);
int c = 0;
for (int i = 0; i < num_workers; ++i) {
const string& worker_name = worker_names[i];
auto worker = env_->worker_cache->GetOrCreateWorker(worker_name);
if (worker) {
worker->CleanupAllAsync(
&req, &resp[i], [this, &n, worker_name, worker, c](Status s) {
if (!s.ok()) {
LOG(ERROR) << "Worker CleanupAll failed: " << s;
}
env_->worker_cache->ReleaseWorker(worker_name, worker);
n[c].Notify();
});
} else {
n[c].Notify();
}
++c;
}
for (size_t i = 0; i < n.size(); ++i) {
n[i].WaitForNotification();
}
}
}
void Master::Reset(const ResetRequest* req, ResetResponse* resp,
MyClosure done) {
std::vector<MasterSession*> sessions_to_close;
{
mutex_lock l(mu_);
for (const auto& entry : sessions_) {
sessions_to_close.push_back(entry.second);
}
sessions_.clear();
}
CleanupWorkers(*req);
SchedClosure([sessions_to_close, done]() {
Status s;
for (MasterSession* session : sessions_to_close) {
s.Update(session->Close());
session->Unref();
}
done(s);
});
}
void Master::MakeCallable(const MakeCallableRequest* req,
MakeCallableResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"MakeCallable (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done = std::move(done)]() {
Status s = session->MakeCallable(*req, resp);
session->Unref();
done(s);
});
}
void Master::RunCallable(CallOptions* opts, const RunCallableRequest* req,
RunCallableResponse* resp, MyClosure done) {
Status s = recent_request_ids_.TrackUnique(req->request_id(),
"RunCallable (Master)", *req);
if (!s.ok()) {
done(s);
return;
}
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, opts, req, resp, done = std::move(done)]() {
Status s = session->RunCallable(opts, *req, resp);
session->Unref();
done(s);
});
}
void Master::ReleaseCallable(const ReleaseCallableRequest* req,
ReleaseCallableResponse* resp, MyClosure done) {
auto session = FindMasterSession(req->session_handle());
if (session == nullptr) {
done(errors::Aborted("Session ", req->session_handle(), " is not found."));
return;
}
SchedClosure([session, req, resp, done = std::move(done)]() {
Status s = session->ReleaseCallable(*req, resp);
session->Unref();
done(s);
});
}
} | #include "tensorflow/core/distributed_runtime/master.h"
#include <map>
#include <memory>
#include "grpcpp/grpcpp.h"
#include "Eigen/Core"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_master_service_impl.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/master.pb.h"
namespace tensorflow {
class MasterTest : public ::testing::Test {
protected:
MasterTest() {
std::vector<string> targets;
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 1;
(*options.config.mutable_device_count())["GPU"] = 0;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(
test::TestClusterConfig().Options(options).Jobs(
{test::TestJob{"localhost", 2}}),
&cluster_));
SharedGrpcChannelPtr channel_ptr;
TF_CHECK_OK(NewHostPortGrpcChannel(
cluster_->targets()[0], &options.config.rpc_options(), &channel_ptr));
master_ = grpc::MasterService::NewStub(channel_ptr);
}
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<grpc::MasterService::Stub> master_;
Status CreateSession(const GraphDef& def, string* handle,
int64_t* initial_version) {
::grpc::ClientContext ctx;
CreateSessionRequest req;
*(req.mutable_graph_def()) = def;
req.mutable_config()->set_placement_period(1);
CreateSessionResponse resp;
const Status s = FromGrpcStatus(master_->CreateSession(&ctx, req, &resp));
if (s.ok()) {
*handle = resp.session_handle();
*initial_version = resp.graph_version();
}
return s;
}
Status ExtendSession(const string& handle, const GraphDef& def,
int64_t current_version, int64_t* new_version) {
::grpc::ClientContext ctx;
ExtendSessionRequest req;
req.set_session_handle(handle);
*(req.mutable_graph_def()) = def;
req.set_current_graph_version(current_version);
ExtendSessionResponse resp;
const Status s = FromGrpcStatus(master_->ExtendSession(&ctx, req, &resp));
if (s.ok()) {
*new_version = resp.new_graph_version();
}
return s;
}
Status RunStep(const string& handle,
const std::vector<std::pair<string, const Tensor*> >& feed,
const std::map<string, Tensor*>& fetch) {
::grpc::ClientContext ctx;
RunStepRequest req;
req.set_session_handle(handle);
for (const auto& p : feed) {
const string& feed_name = p.first;
const Tensor* feed_tensor = p.second;
auto f = req.add_feed();
f->set_name(feed_name);
feed_tensor->AsProtoTensorContent(f->mutable_tensor());
}
for (const auto& p : fetch) {
const string& fetch_name = p.first;
req.add_fetch(fetch_name);
}
RunStepResponse resp;
const Status s = FromGrpcStatus(master_->RunStep(&ctx, req, &resp));
if (s.ok()) {
for (const auto& fetch_resp : resp.tensor()) {
auto it = fetch.find(fetch_resp.name());
CHECK(it != fetch.end());
CHECK(it->second->FromProto(fetch_resp.tensor()));
}
}
return s;
}
Status CloseSession(const string& handle) {
::grpc::ClientContext ctx;
CloseSessionRequest req;
req.set_session_handle(handle);
CloseSessionResponse resp;
return FromGrpcStatus(master_->CloseSession(&ctx, req, &resp));
}
Status Reset() {
::grpc::ClientContext ctx;
ResetRequest req;
ResetResponse resp;
return FromGrpcStatus(master_->Reset(&ctx, req, &resp));
}
};
TEST_F(MasterTest, CreateClose) {
GraphDef def;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def, &handle, &initial_version));
EXPECT_TRUE(errors::IsAborted(CloseSession("randombits")));
EXPECT_TRUE(CloseSession(handle).ok());
}
TEST_F(MasterTest, ListDevices) {
::grpc::ClientContext ctx;
ListDevicesRequest req;
ListDevicesResponse resp;
const Status s = FromGrpcStatus(master_->ListDevices(&ctx, req, &resp));
TF_EXPECT_OK(s);
EXPECT_EQ(1, resp.local_device_size());
EXPECT_EQ("CPU", resp.local_device(0).device_type());
}
TEST_F(MasterTest, Reset) {
GraphDef def;
string s1, s2;
int64_t initial_version1, initial_version2;
TF_ASSERT_OK(CreateSession(def, &s1, &initial_version1));
TF_ASSERT_OK(CreateSession(def, &s2, &initial_version2));
EXPECT_TRUE(Reset().ok());
EXPECT_TRUE(errors::IsAborted(CloseSession(s1)));
EXPECT_TRUE(errors::IsAborted(CloseSession(s2)));
}
TEST_F(MasterTest, Extend) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Tensor A_expected(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&A_expected, {3.0, 2.0, -1.0, 0.0});
Tensor x_expected(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_expected, {2.0, 2.0});
Graph graph_1(OpRegistry::Global());
test::graph::Constant(&graph_1, A_expected, "A");
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
Tensor A(DT_FLOAT, TensorShape({2, 2}));
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
test::ExpectTensorEqual<float>(A, A_expected);
Graph graph_2(OpRegistry::Global());
test::graph::Constant(&graph_2, x_expected, "x");
GraphDef def_2;
test::graph::ToGraphDef(&graph_2, &def_2);
int64_t version_2;
EXPECT_TRUE(errors::IsAborted(
ExtendSession("randombits", def_2, version_1, &version_2)));
TF_ASSERT_OK(ExtendSession(handle, def_2, version_1, &version_2));
EXPECT_GT(version_2, version_1);
Tensor x(DT_FLOAT, TensorShape({2, 1}));
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}, {"x:0", &x}}));
test::ExpectTensorEqual<float>(A, A_expected);
test::ExpectTensorEqual<float>(x, x_expected);
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ExtendUpdateStatefulFails) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1, version_2;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
EXPECT_TRUE(errors::IsInvalidArgument(
ExtendSession(handle, def_1, version_1, &version_2)));
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ExtendTwiceFails) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
EXPECT_GT(version_1, initial_version);
EXPECT_TRUE(errors::IsAborted(
ExtendSession(handle, def_1, initial_version, &version_1)));
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ConcurrentExtendOnlyOneSucceeds) {
GraphDef def_0;
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
test::graph::Var(&graph_1, DT_FLOAT, TensorShape({512}));
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
Notification n;
mutex mu;
int succeeded = 0;
int failed = 0;
auto extend_fn = [this, handle, def_1, initial_version, &n, &mu, &succeeded,
&failed]() {
n.WaitForNotification();
int64_t new_version;
Status s = ExtendSession(handle, def_1, initial_version, &new_version);
EXPECT_TRUE(s.ok() || errors::IsAborted(s));
{
mutex_lock l(mu);
if (s.ok()) {
++succeeded;
} else {
++failed;
}
}
};
{
thread::ThreadPool thread_pool(Env::Default(), "extend_pool", 100);
for (int i = 0; i < 100; ++i) {
thread_pool.Schedule(extend_fn);
}
n.Notify();
}
EXPECT_EQ(failed, 99);
EXPECT_EQ(succeeded, 1);
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, ConcurrentExtendAndRun) {
Graph graph_0(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
test::graph::Constant(&graph_0, a_tensor, "A");
GraphDef def_0;
test::graph::ToGraphDef(&graph_0, &def_0);
string handle;
int64_t initial_version;
TF_ASSERT_OK(CreateSession(def_0, &handle, &initial_version));
Graph graph_1(OpRegistry::Global());
Tensor b_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&b_tensor, {1, 0, 0, 1});
test::graph::Constant(&graph_1, b_tensor, "B");
GraphDef def_1;
test::graph::ToGraphDef(&graph_1, &def_1);
Notification extend_done;
Notification extend_can_start;
auto get_a_fn = [this, handle, &extend_done]() {
Tensor A(DT_FLOAT, TensorShape({2, 2}));
while (!extend_done.HasBeenNotified()) {
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
}
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}}));
};
auto get_a_and_b_fn = [this, handle, &extend_done, &extend_can_start]() {
Tensor A(DT_FLOAT, TensorShape({2, 2}));
Tensor B(DT_FLOAT, TensorShape({2, 2}));
EXPECT_TRUE(
errors::IsNotFound(RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}})));
extend_can_start.Notify();
while (!extend_done.HasBeenNotified()) {
Status s = RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}});
EXPECT_TRUE(errors::IsNotFound(s) || s.ok());
}
TF_ASSERT_OK(RunStep(handle, {}, {{"A:0", &A}, {"B:0", &B}}));
};
auto extend_fn = [this, handle, def_1, initial_version, &extend_done,
&extend_can_start]() {
extend_can_start.WaitForNotification();
int64_t version_1;
TF_ASSERT_OK(ExtendSession(handle, def_1, initial_version, &version_1));
extend_done.Notify();
};
{
thread::ThreadPool thread_pool(Env::Default(), "extend_pool", 3);
thread_pool.Schedule(get_a_fn);
thread_pool.Schedule(get_a_and_b_fn);
thread_pool.Schedule(extend_fn);
}
TF_ASSERT_OK(CloseSession(handle));
}
TEST_F(MasterTest, EigenProblem) {
Graph graph(OpRegistry::Global());
Tensor a_tensor(DT_FLOAT, TensorShape({2, 2}));
test::FillValues<float>(&a_tensor, {3, 2, -1, 0});
Node* a_node = test::graph::Constant(&graph, a_tensor);
Tensor x_tensor(DT_FLOAT, TensorShape({2, 1}));
test::FillValues<float>(&x_tensor, {0, 0});
Node* x_node = test::graph::Constant(&graph, x_tensor);
Node* y_node = test::graph::Matmul(&graph, a_node, x_node, false, false);
GraphDef def;
test::graph::ToGraphDef(&graph, &def);
string handle;
int64_t initial_version;
TF_CHECK_OK(CreateSession(def, &handle, &initial_version));
const Eigen::array<Eigen::DenseIndex, 1> sum_along_dim{0};
const Eigen::array<Eigen::DenseIndex, 2> matrix_transpose{1, 0};
Tensor x(DT_FLOAT, TensorShape({2, 1}));
Tensor y(DT_FLOAT, TensorShape({2, 1}));
Eigen::Tensor<float, 1, Eigen::RowMajor> y_square_sum;
Eigen::Tensor<float, 2, Eigen::RowMajor> y_normalized(2, 1);
y_normalized.setRandom();
Eigen::Tensor<float, 1, Eigen::RowMajor> error_square_sum;
float lambda;
bool converged = false;
while (!converged) {
auto x_matrix = x.matrix<float>();
x_matrix = y_normalized;
TF_EXPECT_OK(
RunStep(handle, {{x_node->name(), &x}}, {{y_node->name() + ":0", &y}}));
auto y_matrix = y.matrix<float>();
{
lambda = y_matrix(0, 0) / x_matrix(0, 0);
y_square_sum = y.matrix<float>().square().sum(sum_along_dim);
const float norm = static_cast<float>(sqrt(y_square_sum(0)));
y_normalized = y_matrix * (1 / norm);
error_square_sum = (x_matrix - y_normalized).square().sum(sum_along_dim);
VLOG(1) << "x = [" << x_matrix.shuffle(matrix_transpose) << "] y = ["
<< y_matrix.shuffle(matrix_transpose) << "] lambda = " << lambda;
converged = sqrt(error_square_sum(0)) < 1e-10;
}
}
EXPECT_NEAR(lambda, 2.0, 0.01);
TF_EXPECT_OK(CloseSession(handle));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/master.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/master_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4bef952-252d-42c3-bf1d-7f04f232b2ad | cpp | tensorflow/tensorflow | lower_function_call_op | tensorflow/core/common_runtime/lower_function_call_op.cc | tensorflow/core/common_runtime/lower_function_call_op_test.cc | #include "tensorflow/core/common_runtime/lower_function_call_op.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/lower_function_call_inline_policy.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
namespace tensorflow {
using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode;
using OutputControlSrc = InlineFunctionBodyOptions::OutputControlSource;
Status RewriteFunctionCallNode(Node* n, Graph* g,
const FunctionLibraryDefinition& flib_def,
bool keep_caller_fetchable) {
VLOG(2) << "Lower function call node: " << SummarizeNode(*n);
InlineFunctionBodyOptions inline_options;
inline_options.keep_caller_node = keep_caller_fetchable
? KeepCallerNode::kFetchable
: KeepCallerNode::kTargetable;
FunctionCallInlinePolicy policy = GetFunctionCallInlinePolicy(n);
if (policy == FunctionCallInlinePolicy::kMultiDevicePlacer) {
inline_options.output_control_src = OutputControlSrc::kControlOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::MultiDevice();
} else if (policy == FunctionCallInlinePolicy::kSingleDevicePlacer) {
inline_options.output_control_src = OutputControlSrc::kDataOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::SingleDevice();
} else {
return errors::InvalidArgument("Unsupported function inlining policy");
}
core::RefCountPtr<FunctionRecord> fdef;
if (n->IsPartitionedCall()) {
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "f", &func));
fdef = flib_def.FindRecord(func.name());
} else if (n->type_string() == FunctionLibraryDefinition::kGradientOp) {
VLOG(2) << "Skip SymbolicGradient lowering";
return absl::OkStatus();
} else {
fdef = flib_def.FindRecord(n->type_string());
}
if (fdef == nullptr) {
return errors::Internal("Can't find a function: node=", SummarizeNode(*n));
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(std::move(fdef), n->attrs(), &flib_def, &fbody));
if (flags::Global().enable_function_pruning_before_inlining.value()) {
VLOG(2) << "Pruning enabled before inlining";
PruneFunctionBody(
fbody->record->fdef(), fbody->graph,
absl::Span<Node*>(fbody->arg_nodes.data(), fbody->arg_nodes.size()));
} else {
VLOG(2) << "Pruning disabled before inlining";
}
Status can_inline_function_call =
ValidateInlining(n, fbody.get(), inline_options);
if (can_inline_function_call.ok()) {
TF_RETURN_IF_ERROR(
InlineFunctionBody(flib_def, g, n, fbody.get(), inline_options));
} else {
VLOG(2) << "Failed to inline function call node: "
<< can_inline_function_call.message();
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncAttr(const string& name) {
AttrValue attr;
attr.mutable_func()->set_name(name);
return attr;
}
AttrValue FuncAttr(const string& name, const DataType type) {
AttrValue attr;
attr.mutable_func()->set_name(name);
(*attr.mutable_func()->mutable_attr())["T"].set_type(type);
return attr;
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerFunctionCallTest, InlineFunctionCall) {
using FDH = FunctionDefHelper;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) =
FDH::Create("AddAndMul", {"i: int32"}, {"o: int32"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_INT32}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_INT32}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
Node* function_call;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(NodeBuilder("F", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("AddAndMul"))
.Finalize(root.graph(), &function_call));
TF_ASSERT_OK(root.DoShapeInference(function_call));
auto b = ops::Identity(root.WithOpName("B"), Output(function_call, 0));
root.graph()->AddControlEdge(function_call, b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
int add_count = 0;
int mul_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
if (op->type_string() == "Add") add_count++;
if (op->type_string() == "Mul") mul_count++;
}
ASSERT_EQ(partitioned_call_count, 0);
ASSERT_EQ(add_count, 1);
ASSERT_EQ(mul_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(b)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 100);
}
}
TEST(LowerFunctionCallTest, InlineFunctionCallAfterPruning) {
flags::Global().enable_function_pruning_before_inlining.reset(true);
using FDH = FunctionDefHelper;
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = FDH::Create(
"AddAndMul", {"i: int32", "j: int32", "k: int32", "r: resource"},
{"o: int32"}, {},
{{{"add"}, "Add", {"i", "i"}, {{"T", DT_INT32}}},
{{"div"}, "FloorDiv", {"i", "i"}, {{"T", DT_INT32}}},
{{"gather"},
"ResourceGather",
{"r", "i"},
{{"Tindices", DT_INT32}, {"dtype", DT_FLOAT}}},
{{"ret"}, "Mul", {"i", "i"}, {{"T", DT_INT32}}}},
{{"o", "ret:z:0"}},
{{"must_execute", "add"}});
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto x = ops::Placeholder(root.WithOpName("X"), DT_INT32);
auto y = ops::Placeholder(root.WithOpName("Y"), DT_INT32);
auto z = ops::Placeholder(root.WithOpName("Z"), DT_INT32);
auto r = ops::Placeholder(root.WithOpName("R"), DT_RESOURCE);
Node* function_call;
std::vector<NodeBuilder::NodeOut> inputs(
{NodeBuilder::NodeOut(x.node()), NodeBuilder::NodeOut(y.node()),
NodeBuilder::NodeOut(z.node()), NodeBuilder::NodeOut(r.node())});
TF_ASSERT_OK(NodeBuilder("F", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32, DT_INT32, DT_INT32, DT_RESOURCE})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("AddAndMul"))
.Finalize(root.graph(), &function_call));
TF_ASSERT_OK(root.DoShapeInference(function_call));
auto b = ops::Identity(root.WithOpName("B"), Output(function_call, 0));
root.graph()->AddControlEdge(function_call, b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
int add_count = 0;
int mul_count = 0;
int floor_div_count = 0;
int resource_gather_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
if (op->type_string() == "Add") add_count++;
if (op->type_string() == "Mul") mul_count++;
if (op->type_string() == "FloorDiv") floor_div_count++;
if (op->type_string() == "ResourceGather") resource_gather_count++;
}
ASSERT_EQ(partitioned_call_count, 0);
ASSERT_EQ(add_count, 1);
ASSERT_EQ(mul_count, 1);
ASSERT_EQ(floor_div_count, 0);
ASSERT_EQ(resource_gather_count, 0);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(x.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(b)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 100);
}
flags::Global().enable_function_pruning_before_inlining.reset(false);
}
TEST(LowerFunctionCallTest, DoNotInlineTpuOrXlaFunctions) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef tpu_func = test::function::XTimesTwo();
tpu_func.mutable_signature()->set_name("TpuXTimesTwo");
(*tpu_func.mutable_attr())["_tpu_replicate"].set_b(true);
FunctionDef xla_func = test::function::XTimesTwo();
xla_func.mutable_signature()->set_name("XlaXTimesTwo");
(*xla_func.mutable_attr())["_xla_compile_id"].set_s("cluster_0");
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* tpu_call;
TF_ASSERT_OK(NodeBuilder("B", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("XTimesTwo", DT_INT32))
.Attr("_tpu_replicate", "cluster")
.Finalize(root.graph(), &tpu_call));
Node* xla_call;
TF_ASSERT_OK(NodeBuilder("C", "PartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_INT32})
.Attr("Tout", {DT_INT32})
.Attr("f", FuncAttr("XTimesTwo", DT_INT32))
.Attr("_xla_compile_id", "cluster")
.Finalize(root.graph(), &xla_call));
TF_ASSERT_OK(root.DoShapeInference(tpu_call));
TF_ASSERT_OK(root.DoShapeInference(xla_call));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int partitioned_call_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsPartitionedCall()) partitioned_call_count++;
}
ASSERT_EQ(partitioned_call_count, 2);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(
session.Run(feeds, {Output(tpu_call), Output(xla_call)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 2);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
EXPECT_EQ(out_tensors[1].scalar<int>()(), 20);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_function_call_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_function_call_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
83794754-4a41-4ddb-9315-284eacde22be | cpp | google/tensorstore | json_change_map | tensorstore/driver/json/json_change_map.cc | tensorstore/driver/json/json_change_map_test.cc | #include "tensorstore/driver/json/json_change_map.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/btree_map.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_pointer.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_json_driver {
Result<::nlohmann::json> JsonChangeMap::Apply(
const ::nlohmann::json& existing,
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it = map_.lower_bound(sub_value_pointer),
changes_end = map_.end();
if (changes_it != changes_end && changes_it->first == sub_value_pointer) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, changes_it->second};
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto* modified_value,
json_pointer::Dereference(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
json_pointer::kMustExist));
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Dereference(existing, prev_it->first,
json_pointer::kSimulateCreate),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
return {std::in_place, *modified_value};
}
}
::nlohmann::json new_value;
{
TENSORSTORE_ASSIGN_OR_RETURN(
const ::nlohmann::json* restricted_existing,
json_pointer::Dereference(existing, sub_value_pointer,
json_pointer::kSimulateCreate));
if (restricted_existing) {
new_value = *restricted_existing;
} else {
new_value = ::nlohmann::json(::nlohmann::json::value_t::discarded);
}
}
for (; changes_it != changes_end &&
json_pointer::Compare(changes_it->first, sub_value_pointer) ==
json_pointer::kContainedIn;
++changes_it) {
TENSORSTORE_RETURN_IF_ERROR(
json_pointer::Replace(new_value,
std::string_view(changes_it->first)
.substr(sub_value_pointer.size()),
changes_it->second),
internal::ConvertInvalidArgumentToFailedPrecondition(_));
}
return new_value;
}
bool JsonChangeMap::CanApplyUnconditionally(
std::string_view sub_value_pointer) const {
Map::const_iterator changes_it;
if (sub_value_pointer.empty()) {
changes_it = map_.begin();
} else {
changes_it = map_.lower_bound(sub_value_pointer);
}
if (changes_it != map_.end()) {
if (changes_it->first == sub_value_pointer) {
return true;
}
}
if (changes_it != map_.begin()) {
auto prev_it = std::prev(changes_it);
return json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains;
}
return false;
}
absl::Status JsonChangeMap::AddChange(std::string_view sub_value_pointer,
::nlohmann::json sub_value) {
auto it = map_.lower_bound(sub_value_pointer);
if (it != map_.end()) {
auto compare_result = json_pointer::Compare(sub_value_pointer, it->first);
assert(compare_result <= json_pointer::kEqual);
if (compare_result == json_pointer::kEqual) {
it->second = std::move(sub_value);
return absl::OkStatus();
}
while (compare_result == json_pointer::kContains) {
it = map_.erase(it);
if (it == map_.end()) break;
compare_result = json_pointer::Compare(sub_value_pointer, it->first);
}
}
if (it != map_.begin()) {
auto prev_it = std::prev(it);
if (json_pointer::Compare(prev_it->first, sub_value_pointer) ==
json_pointer::kContains) {
return json_pointer::Replace(
prev_it->second, sub_value_pointer.substr(prev_it->first.size()),
std::move(sub_value));
}
}
map_.try_emplace(it, std::string(sub_value_pointer), std::move(sub_value));
return absl::OkStatus();
}
}
} | #include "tensorstore/driver/json/json_change_map.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_json_driver::JsonChangeMap;
using ::testing::ElementsAre;
using ::testing::Optional;
using ::testing::Pair;
TEST(JsonChangeMapTest, AddChangeValid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(false)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", true));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b/a", MatchesJson(true)),
Pair("/a/b/c", MatchesJson(42))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
EXPECT_THAT(changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}}))));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/b", MatchesJson(::nlohmann::json{
{"d", "xyz"}, {"c", 42}, {"a", false}}))));
}
TEST(JsonChangeMapTest, AddChangeValidIndependent) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/c", 42));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/e", "xx"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/a", "yy"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b/a", false));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"d", "xyz"}}));
EXPECT_THAT(
changes.underlying_map(),
ElementsAre(Pair("/a/a", MatchesJson("yy")),
Pair("/a/b", MatchesJson(::nlohmann::json{{"d", "xyz"}})),
Pair("/a/e", MatchesJson("xx"))));
}
TEST(JsonChangeMapTest, AddChangeInvalid) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.AddChange("/a/b", 43),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyEmptyChangeMap) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}),
Optional(MatchesJson(::nlohmann::json{{"x", "y"}, {"z", "w"}})));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"z", "w"}}, "/x"),
Optional(MatchesJson(::nlohmann::json("y"))));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", {{"b", {{"c", 42}}}}}}));
EXPECT_THAT(changes.Apply("old", "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyInvalidContainingChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply(false, "/a/b/c"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyChangeMapPriorNonContaining) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 10));
EXPECT_THAT(changes.Apply({{"b", 42}}, "/b"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyContainingChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
EXPECT_THAT(changes.Apply({{"e", "f"}}, "/a/b/c"), Optional(MatchesJson(42)));
}
TEST(JsonChangeMapTest, ApplyChangeMap) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", {{"b", {{"c", 42}}}}));
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply({{"x", "y"}, {"e", "f"}}),
Optional(MatchesJson(::nlohmann::json{
{"a", {{"b", {{"c", 42}}}}}, {"e", 42}, {"x", "y"}})));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap1) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/e", 42));
EXPECT_THAT(changes.Apply(42),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyInvalidChangeMap2) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/4", 42));
EXPECT_THAT(changes.Apply({1, 2, 3}),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointer) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestInvalidJsonPointerNoChanges) {
JsonChangeMap changes;
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, ApplyRequestNewMember) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", 42));
EXPECT_THAT(changes.Apply(::nlohmann::json::object_t{}, "/a"),
Optional(MatchesJson(::nlohmann::json{{"b", 42}})));
}
TEST(JsonChangeMapTest, ApplyIncompatibleChangeExactRequest) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("/a", 42));
EXPECT_THAT(changes.Apply(false, "/a"),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(JsonChangeMapTest, AddIncompatibleChanges) {
JsonChangeMap changes;
TENSORSTORE_EXPECT_OK(changes.AddChange("", 42));
EXPECT_THAT(changes.AddChange("/a", 50),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"JSON Pointer reference \"/a\" cannot be applied "
"to number value: 42"));
}
TEST(JsonChangeMapTest, CanApplyUnconditionally) {
JsonChangeMap changes;
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/b/c"));
TENSORSTORE_EXPECT_OK(changes.AddChange("/a/b", {{"c", 42}}));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/c"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b"));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a/b/d"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a"));
EXPECT_FALSE(changes.CanApplyUnconditionally("/a/x"));
EXPECT_FALSE(changes.CanApplyUnconditionally(""));
TENSORSTORE_EXPECT_OK(changes.AddChange("", {{"a", false}}));
EXPECT_TRUE(changes.CanApplyUnconditionally(""));
EXPECT_TRUE(changes.CanApplyUnconditionally("/a"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/json/json_change_map.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/json/json_change_map_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
46a3c061-5a2a-484e-80e6-f9a69d4d72b6 | cpp | tensorflow/tensorflow | split_utils | tensorflow/core/data/split_utils.cc | tensorflow/core/data/split_utils_test.cc | #include "tensorflow/core/data/split_utils.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNumToSkip[] = "num_to_skip";
constexpr char kSplitProvider[] = "split_provider";
constexpr char kSlash[] = "/";
constexpr char kIndex[] = "index";
}
IndexSplitProvider::IndexSplitProvider(int64_t n) : i_(0), n_(n) {
VLOG(3) << "Created index split provider with " << n << " splits.";
}
absl::Status IndexSplitProvider::GetNext(Tensor* split, bool* end_of_splits) {
tsl::mutex_lock l(mu_);
if (i_ >= n_) {
*end_of_splits = true;
return absl::OkStatus();
}
*end_of_splits = false;
*split = Tensor(DT_INT64, TensorShape{});
split->scalar<int64_t>()() = i_++;
return absl::OkStatus();
}
absl::Status IndexSplitProvider::Reset() {
tsl::mutex_lock l(mu_);
i_ = 0;
return absl::OkStatus();
}
absl::Status IndexSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
tsl::mutex_lock l(mu_);
return writer->WriteScalar(full_name(kIndex), i_);
}
absl::Status IndexSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
tsl::mutex_lock l(mu_);
return reader->ReadScalar(full_name(kIndex), &i_);
}
int64_t IndexSplitProvider::Cardinality() const {
if (n_ == tsl::kint64max) {
return kInfiniteCardinality;
}
return n_;
}
ShardingSplitProvider::ShardingSplitProvider(
int64_t num_shards, int64_t shard_index,
std::shared_ptr<SplitProvider> split_provider)
: num_shards_(num_shards),
shard_index_(shard_index),
split_provider_(split_provider),
num_to_skip_(shard_index_) {}
absl::Status ShardingSplitProvider::GetNext(Tensor* split,
bool* end_of_splits) {
tsl::mutex_lock l(mu_);
while (num_to_skip_ > 0) {
TF_RETURN_IF_ERROR(split_provider_->GetNext(split, end_of_splits));
if (*end_of_splits) {
return absl::OkStatus();
}
num_to_skip_--;
}
num_to_skip_ = num_shards_ - 1;
TF_RETURN_IF_ERROR(split_provider_->GetNext(split, end_of_splits));
return absl::OkStatus();
}
absl::Status ShardingSplitProvider::Reset() {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Reset());
num_to_skip_ = shard_index_;
return absl::OkStatus();
}
absl::Status ShardingSplitProvider::Save(
std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Save(
[&](const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
},
writer));
return writer->WriteScalar(full_name(kNumToSkip), num_to_skip_);
}
absl::Status ShardingSplitProvider::Restore(
std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) {
tsl::mutex_lock l(mu_);
TF_RETURN_IF_ERROR(split_provider_->Restore(
[&](const std::string& key) {
return full_name(absl::StrCat(kSplitProvider, kSlash, key));
},
reader));
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kNumToSkip), &num_to_skip_));
return absl::OkStatus();
}
absl::StatusOr<std::shared_ptr<SplitProvider>> GetSingleSplitProvider(
IteratorContext* ctx, const DatasetBase* dataset) {
if (ctx->split_providers().size() != 1) {
return absl::FailedPreconditionError(
absl::StrCat("Failed to get single split provider for dataset ",
dataset->DebugString(), ". Found ",
ctx->split_providers().size(), " split providers"));
}
return ctx->split_providers()[0];
}
absl::StatusOr<std::vector<std::unique_ptr<SplitProvider>>> GetSplitProviders(
const DatasetBase* dataset) {
std::vector<std::unique_ptr<SplitProvider>> result;
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset->InputDatasets(&inputs));
for (const auto& input : inputs) {
std::vector<std::unique_ptr<SplitProvider>> providers;
TF_RETURN_IF_ERROR(input->MakeSplitProviders(&providers));
for (auto& provider : providers) {
result.push_back(std::move(provider));
}
}
return result;
}
absl::StatusOr<std::vector<IteratorContext>> CreateInputIteratorContexts(
IteratorContext* ctx, const DatasetBase* dataset) {
std::vector<const DatasetBase*> inputs;
TF_RETURN_IF_ERROR(dataset->InputDatasets(&inputs));
std::vector<IteratorContext> result;
if (ctx->split_providers().empty()) {
for (int i = 0; i < inputs.size(); ++i) {
result.emplace_back(ctx);
}
return result;
}
int64_t num_sources = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
if (inputs[i]->num_sources() < 0) {
return absl::FailedPreconditionError(absl::StrCat(
"Failed to determine the number of sources for dataset of type ",
inputs[i]->type_string()));
}
num_sources += inputs[i]->num_sources();
}
if (num_sources != ctx->split_providers().size()) {
return absl::FailedPreconditionError(absl::StrCat(
"Attempted to feed ", ctx->split_providers().size(),
" split providers into a dataset with ", num_sources, " sources"));
}
int64_t split_provider_index = 0;
for (size_t i = 0; i < inputs.size(); ++i) {
IteratorContext::Params params(ctx);
params.split_providers.clear();
for (int j = 0; j < inputs[i]->num_sources(); ++j) {
params.split_providers.push_back(
ctx->split_providers()[split_provider_index + j]);
}
split_provider_index += inputs[i]->num_sources();
result.emplace_back(std::move(params));
}
return result;
}
}
} | #include "tensorflow/core/data/split_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
std::string full_name(const std::string& name) {
return FullName("test", name);
}
Status SaveAndRestore(SplitProvider* split_provider) {
VariantTensorDataWriter writer;
TF_RETURN_IF_ERROR(split_provider->Save(full_name, &writer));
std::vector<const VariantTensorData*> variants;
writer.GetData(&variants);
VariantTensorDataReader reader(variants);
TF_RETURN_IF_ERROR(split_provider->Restore(full_name, &reader));
return absl::OkStatus();
}
Status CheckOutput(SplitProvider* split_provider,
std::vector<Tensor> expected) {
int64_t next = 0;
bool end_of_splits = false;
while (!end_of_splits) {
Tensor split;
TF_RETURN_IF_ERROR(split_provider->GetNext(&split, &end_of_splits));
if (!end_of_splits) {
test::ExpectEqual(split, expected[next++]);
}
}
EXPECT_EQ(next, expected.size());
return absl::OkStatus();
}
TEST(IndexSplitProviderTest, Empty) {
IndexSplitProvider split_provider(0);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {})));
}
TEST(IndexSplitProviderTest, One) {
IndexSplitProvider split_provider(1);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {{0}})));
}
TEST(IndexSplitProviderTest, Three) {
IndexSplitProvider split_provider(3);
TF_EXPECT_OK(
CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}})));
}
TEST(IndexSplitProviderTest, SaveAndRestore) {
IndexSplitProvider split_provider(4);
std::vector<Tensor> expected =
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}});
for (int i = 0; i < expected.size(); ++i) {
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = true;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_FALSE(end_of_splits);
test::ExpectEqual(split, expected[i]);
}
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
TEST(ShardingSplitProviderTest, TwoWayShardZero) {
auto base = std::make_shared<IndexSplitProvider>(4);
ShardingSplitProvider split_provider(2, 0, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{0}, {2}})));
}
TEST(ShardingSplitProviderTest, TwoWayShardOne) {
auto base = std::make_shared<IndexSplitProvider>(4);
ShardingSplitProvider split_provider(2, 1, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{1}, {3}})));
}
TEST(ShardingSplitProviderTest, ThreeWayShardOne) {
auto base = std::make_shared<IndexSplitProvider>(6);
ShardingSplitProvider split_provider(3, 1, base);
TF_EXPECT_OK(CheckOutput(
&split_provider, CreateTensors<int64_t>(TensorShape({}), {{1}, {4}})));
}
TEST(ShardingSplitProviderTest, Empty) {
auto base = std::make_shared<IndexSplitProvider>(1);
ShardingSplitProvider split_provider(2, 1, base);
TF_EXPECT_OK(CheckOutput(&split_provider,
CreateTensors<int64_t>(TensorShape({}), {})));
}
TEST(ShardingSplitProviderTest, SaveAndRestore) {
auto base = std::make_shared<IndexSplitProvider>(6);
std::vector<Tensor> expected =
CreateTensors<int64_t>(TensorShape({}), {{1}, {4}});
ShardingSplitProvider split_provider(3, 1, base);
for (int i = 0; i < expected.size(); ++i) {
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = true;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_FALSE(end_of_splits);
test::ExpectEqual(split, expected[i]);
}
TF_ASSERT_OK(SaveAndRestore(&split_provider));
Tensor split;
bool end_of_splits = false;
TF_ASSERT_OK(split_provider.GetNext(&split, &end_of_splits));
EXPECT_TRUE(end_of_splits);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/split_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/split_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28d9d3a2-ed77-490f-a92c-b11cef165408 | cpp | google/libaddressinput | address_field | cpp/src/address_field.cc | cpp/test/address_field_test.cc | #include <libaddressinput/address_field.h>
#include <cstddef>
#include <ostream>
#include "util/size.h"
using i18n::addressinput::AddressField;
using i18n::addressinput::COUNTRY;
using i18n::addressinput::RECIPIENT;
using i18n::addressinput::size;
std::ostream& operator<<(std::ostream& o, AddressField field) {
static const char* const kFieldNames[] = {
"COUNTRY",
"ADMIN_AREA",
"LOCALITY",
"DEPENDENT_LOCALITY",
"SORTING_CODE",
"POSTAL_CODE",
"STREET_ADDRESS",
"ORGANIZATION",
"RECIPIENT",
};
static_assert(COUNTRY == 0, "bad_base");
static_assert(RECIPIENT == size(kFieldNames) - 1, "bad_length");
if (field < 0 || static_cast<size_t>(field) >= size(kFieldNames)) {
o << "[INVALID ENUM VALUE " << static_cast<int>(field) << "]";
} else {
o << kFieldNames[field];
}
return o;
} | #include <libaddressinput/address_field.h>
#include <sstream>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::SORTING_CODE;
TEST(AddressFieldTest, ValidEnumValue) {
std::ostringstream oss;
oss << SORTING_CODE;
EXPECT_EQ("SORTING_CODE", oss.str());
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_field.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_field_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
a8876eb4-6b08-4e2c-954c-ccf51ebc946e | cpp | tensorflow/tensorflow | topk_splitter | third_party/xla/xla/service/gpu/transforms/topk_splitter.cc | third_party/xla/xla/service/gpu/transforms/topk_splitter_test.cc | #include "xla/service/gpu/transforms/topk_splitter.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr size_t kRequiredAlignment = 1024;
constexpr size_t kMaximumBatchSize = 1024;
class TopkSplitterVisitor : public DfsHloRewriteVisitor {
public:
explicit TopkSplitterVisitor(size_t split_threshold)
: split_threshold_(split_threshold) {}
absl::Status HandleCustomCall(HloInstruction* inst) override {
HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst);
if (topk == nullptr || topk->custom_call_target() != "TopK") {
return absl::OkStatus();
}
HloComputation* comp = inst->parent();
Shape data_shape = topk->operand(0)->shape();
bool has_batch = data_shape.dimensions_size() == 2;
if (has_batch && data_shape.dimensions(0) != 1) {
return absl::OkStatus();
}
size_t n = data_shape.dimensions(has_batch ? 1 : 0);
int64_t k = topk->shape().tuple_shapes(0).dimensions(has_batch ? 1 : 0);
if (k > sqrt(n)) {
return absl::OkStatus();
}
if (n % kRequiredAlignment != 0) {
return absl::OkStatus();
}
if (n < split_threshold_) return absl::OkStatus();
int new_batch =
std::min(absl::bit_floor(n / split_threshold_), kMaximumBatchSize);
int new_n = n / new_batch;
Shape split_input_shape =
ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, new_n});
TF_ASSIGN_OR_RETURN(
HloInstruction * reshaped,
MakeReshapeHlo(split_input_shape, topk->mutable_operand(0)));
Shape batch_topk_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, k}),
ShapeUtil::MakeShape(S32, {new_batch, k})});
HloInstruction* batch_topk =
comp->AddInstruction(HloInstruction::CreateCustomCall(
batch_topk_shape, {reshaped}, topk->to_apply(), "TopK",
""));
TF_ASSIGN_OR_RETURN(HloInstruction * indices,
MakeGetTupleElementHlo(batch_topk, 1));
TF_ASSIGN_OR_RETURN(HloInstruction * values,
MakeGetTupleElementHlo(batch_topk, 0));
Shape iota_shape = ShapeUtil::MakeShape(S32, {new_batch});
TF_ASSIGN_OR_RETURN(
HloInstruction * fix,
MakeBinaryHlo(
HloOpcode::kMultiply, MakeIotaHlo(comp, iota_shape, 0),
MakeBroadcastHlo(MakeR0ConstantHlo<int32_t>(comp, new_n),
{}, iota_shape)));
TF_ASSIGN_OR_RETURN(
indices, MakeBinaryHlo(HloOpcode::kAdd, indices,
MakeBroadcastHlo(fix, {0}, indices->shape())));
Shape linear_index_shape = ShapeUtil::MakeShape(S32, {k * new_batch});
Shape linear_shape = ShapeUtil::ChangeElementType(
linear_index_shape, data_shape.element_type());
Shape linear_sort_shape =
ShapeUtil::MakeTupleShape({linear_shape, linear_index_shape});
HloInstruction* aggregated_sort =
comp->AddInstruction(HloInstruction::CreateSort(
linear_sort_shape, 0,
{*MakeReshapeHlo(linear_shape, values),
*MakeReshapeHlo(linear_index_shape, indices)},
topk->to_apply(), true));
auto slice_tuple = [&](HloInstruction* sort, const size_t index) {
return *MakeReshapeHlo(
topk->shape().tuple_shapes(index),
*MakeSliceHlo(*MakeGetTupleElementHlo(sort, index), {0}, {k}, {1}));
};
return ReplaceInstruction(topk,
comp->AddInstruction(HloInstruction::CreateTuple({
slice_tuple(aggregated_sort, 0),
slice_tuple(aggregated_sort, 1),
})));
}
private:
size_t split_threshold_;
};
}
absl::StatusOr<bool> TopKSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return TopkSplitterVisitor(split_threshold_)
.RunOnModule(module, execution_threads);
}
}
} | #include "xla/service/gpu/transforms/topk_splitter.h"
#include <stdint.h>
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/topk_rewriter.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
using TopkSplitterTest = HloTestBase;
constexpr absl::string_view kComparator = R"(
%compare {
%p.1.lhs.40628 = s32[] parameter(2)
%p.1.rhs.40629 = s32[] parameter(3)
%constant.40630 = pred[] constant(true)
%broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={}
%p.0.lhs.40626 = f32[] parameter(0)
%p.0.rhs.40627 = f32[] parameter(1)
%compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER
ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631)
})";
TEST_F(TopkSplitterTest, SplitsTopK) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,1073741824] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true));
auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0)));
auto slice_result = [&](auto input, size_t i) {
return m::Reshape(m::Slice(m::GetTupleElement(input, i)));
};
auto index_correction =
m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant())));
auto sorted = m::Sort(
m::Reshape(m::GetTupleElement(first_topk, 0)),
m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction)));
EXPECT_TRUE(
Match(module->entry_computation()->root_instruction(),
m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1))));
}
TEST_F(TopkSplitterTest, SplitsTopKNoBatchDimension) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1073741824] parameter(0)
ROOT %cc.2 = (f32[5], s32[5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true));
auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0)));
auto slice_result = [&](auto input, size_t i) {
return m::Reshape(m::Slice(m::GetTupleElement(input, i)));
};
auto index_correction =
m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant())));
auto sorted = m::Sort(
m::Reshape(m::GetTupleElement(first_topk, 0)),
m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction)));
EXPECT_TRUE(
Match(module->entry_computation()->root_instruction(),
m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1))));
}
TEST_F(TopkSplitterTest, SplitFailsUnderThreshold) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524288] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(
RunHloPass(TopKSplitter(1048576), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, SplitFailsUnaligned) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524289] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, SplitFailsLargeK) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524288] parameter(0)
ROOT %cc.2 = (f32[1,1024], s32[1,1024]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, Equivalent) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,16384] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
EXPECT_TRUE(HloDCE().Run(module).status().ok());
};
EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip));
}
TEST_F(TopkSplitterTest, StableSorts) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%constant.1 = f32[] constant(42)
%broadcast.2= f32[1,16384] broadcast(f32[] %constant.1), dimensions={}
ROOT %cc.3 = (f32[1,5], s32[1,5]) custom-call(%broadcast.2), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
EXPECT_TRUE(HloDCE().Run(module).status().ok());
};
EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
99e0ea1b-a681-4414-8060-f0272fd44c2a | cpp | abseil/abseil-cpp | extension | absl/strings/internal/str_format/extension.cc | absl/strings/internal/str_format/extension_test.cc | #include "absl/strings/internal/str_format/extension.h"
#include <errno.h>
#include <algorithm>
#include <string>
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace str_format_internal {
std::string FlagsToString(Flags v) {
std::string s;
s.append(FlagsContains(v, Flags::kLeft) ? "-" : "");
s.append(FlagsContains(v, Flags::kShowPos) ? "+" : "");
s.append(FlagsContains(v, Flags::kSignCol) ? " " : "");
s.append(FlagsContains(v, Flags::kAlt) ? "#" : "");
s.append(FlagsContains(v, Flags::kZero) ? "0" : "");
return s;
}
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
#define ABSL_INTERNAL_X_VAL(id) \
constexpr absl::FormatConversionChar FormatConversionCharInternal::id;
ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_X_VAL, )
#undef ABSL_INTERNAL_X_VAL
constexpr absl::FormatConversionChar FormatConversionCharInternal::kNone;
#define ABSL_INTERNAL_CHAR_SET_CASE(c) \
constexpr FormatConversionCharSet FormatConversionCharSetInternal::c;
ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(ABSL_INTERNAL_CHAR_SET_CASE, )
#undef ABSL_INTERNAL_CHAR_SET_CASE
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kStar;
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kIntegral;
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kFloating;
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kNumeric;
constexpr FormatConversionCharSet FormatConversionCharSetInternal::kPointer;
#endif
bool FormatSinkImpl::PutPaddedString(string_view value, int width,
int precision, bool left) {
size_t space_remaining = 0;
if (width >= 0)
space_remaining = static_cast<size_t>(width);
size_t n = value.size();
if (precision >= 0) n = std::min(n, static_cast<size_t>(precision));
string_view shown(value.data(), n);
space_remaining = Excess(shown.size(), space_remaining);
if (!left) Append(space_remaining, ' ');
Append(shown);
if (left) Append(space_remaining, ' ');
return true;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/str_format/extension.h"
#include <random>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
namespace my_namespace {
class UserDefinedType {
public:
UserDefinedType() = default;
void Append(absl::string_view str) { value_.append(str.data(), str.size()); }
const std::string& Value() const { return value_; }
friend void AbslFormatFlush(UserDefinedType* x, absl::string_view str) {
x->Append(str);
}
private:
std::string value_;
};
}
namespace {
std::string MakeRandomString(size_t len) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis('a', 'z');
std::string s(len, '0');
for (char& c : s) {
c = dis(gen);
}
return s;
}
TEST(FormatExtensionTest, SinkAppendSubstring) {
for (size_t chunk_size : {1, 10, 100, 1000, 10000}) {
std::string expected, actual;
absl::str_format_internal::FormatSinkImpl sink(&actual);
for (size_t chunks = 0; chunks < 10; ++chunks) {
std::string rand = MakeRandomString(chunk_size);
expected += rand;
sink.Append(rand);
}
sink.Flush();
EXPECT_EQ(actual, expected);
}
}
TEST(FormatExtensionTest, SinkAppendChars) {
for (size_t chunk_size : {1, 10, 100, 1000, 10000}) {
std::string expected, actual;
absl::str_format_internal::FormatSinkImpl sink(&actual);
for (size_t chunks = 0; chunks < 10; ++chunks) {
std::string rand = MakeRandomString(1);
expected.append(chunk_size, rand[0]);
sink.Append(chunk_size, rand[0]);
}
sink.Flush();
EXPECT_EQ(actual, expected);
}
}
TEST(FormatExtensionTest, VerifyEnumEquality) {
#define X_VAL(id) \
EXPECT_EQ(absl::FormatConversionChar::id, \
absl::str_format_internal::FormatConversionCharInternal::id);
ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(X_VAL, );
#undef X_VAL
#define X_VAL(id) \
EXPECT_EQ(absl::FormatConversionCharSet::id, \
absl::str_format_internal::FormatConversionCharSetInternal::id);
ABSL_INTERNAL_CONVERSION_CHARS_EXPAND_(X_VAL, );
#undef X_VAL
}
TEST(FormatExtensionTest, SetConversionChar) {
absl::str_format_internal::FormatConversionSpecImpl spec;
EXPECT_EQ(spec.conversion_char(),
absl::str_format_internal::FormatConversionCharInternal::kNone);
spec.set_conversion_char(
absl::str_format_internal::FormatConversionCharInternal::d);
EXPECT_EQ(spec.conversion_char(),
absl::str_format_internal::FormatConversionCharInternal::d);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/str_format/extension.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/str_format/extension_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
703e8d85-24a9-416f-9372-b1a016d13026 | cpp | tensorflow/tensorflow | parallel_filter_dataset_op | tensorflow/core/kernels/data/parallel_filter_dataset_op.cc | tensorflow/core/kernels/data/parallel_filter_dataset_op_test.cc | #include "tensorflow/core/kernels/data/parallel_filter_dataset_op.h"
#include <deque>
#include <utility>
#include "absl/status/status.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
namespace tensorflow {
namespace data {
constexpr const char* const ParallelFilterDatasetOp::kDatasetType;
constexpr const char* const ParallelFilterDatasetOp::kInputDataset;
constexpr const char* const
ParallelFilterDatasetOp::kOtherArguments;
constexpr const char* const
ParallelFilterDatasetOp::kNumParallelCalls;
constexpr const char* const ParallelFilterDatasetOp::kPredicate;
constexpr const char* const
ParallelFilterDatasetOp::kDeterministic;
constexpr const char* const ParallelFilterDatasetOp::kTarguments;
constexpr const char* const ParallelFilterDatasetOp::kOutputTypes;
constexpr const char* const ParallelFilterDatasetOp::kOutputShapes;
constexpr char kComponent[] = "component";
constexpr char kReturnValues[] = "return_values";
constexpr char kPredicateValues[] = "predicate_values";
constexpr char kInvocationResults[] = "invocation_results";
constexpr char kSize[] = "size";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kErrorCode[] = "code";
constexpr char kErrorMessage[] = "error_message";
class ParallelFilterDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
int64_t num_parallel_calls, DeterminismPolicy deterministic,
std::unique_ptr<CapturedFunction> captured_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
num_parallel_calls_(num_parallel_calls),
deterministic_(deterministic),
captured_func_(std::move(captured_func)) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
Node* num_parallel_calls = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(static_cast<int32>(num_parallel_calls_),
&num_parallel_calls));
AttrValue deterministic_attr;
b->BuildAttrValue(deterministic_.String(), &deterministic_attr);
AttrValue predicate_attr;
b->BuildAttrValue(captured_func_->func(), &predicate_attr);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {{0, input_graph_node}}, {{1, other_arguments}},
{{kDeterministic, deterministic_attr},
{kPredicate, predicate_attr},
{kTarguments, other_arguments_types_attr}},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
num_parallel_calls_(std::make_shared<model::SharedState>(
params.dataset->num_parallel_calls_, mu_, cond_var_)),
deterministic_(params.dataset->deterministic_.IsDeterministic() ||
params.dataset->deterministic_.IsDefault()),
autotune_(params.dataset->num_parallel_calls_ == model::kAutotune) {}
~Iterator() override {
CancelThreads(true);
input_impl_.reset();
if (deregister_fn_) deregister_fn_();
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
interleave_depth_ = ctx->interleave_depth();
if (num_parallel_calls_->value == model::kAutotune) {
num_parallel_calls_->value = GetAutotuneDefaultParallelism(ctx);
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(),
[this]() { CancelThreads(false); }, &deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
IteratorContext(params), this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
std::shared_ptr<InvocationResult> result;
{
mutex_lock l(*mu_);
EnsureThreadsStarted(ctx);
while (ShouldWait(ctx, &result)) {
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (cancelled_) {
return errors::Cancelled("Iterator was cancelled");
}
}
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelFilterConsume",
{{"element_id", result->uid}});
});
return ProcessResult(ctx, result, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeAsyncUnknownRatioNode(
std::move(args),
{model::MakeParameter("parallelism", num_parallel_calls_, 1,
ctx->runner_threadpool_size())});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(*mu_);
while (num_calls_ > 0) {
cond_var_->wait(l);
}
if (num_calls_ != 0) {
return errors::FailedPrecondition(
"Unexpected outstanding calls encountered.");
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(absl::StrCat(prefix(), "::", kInvocationResults),
kSize, invocation_results_.size()));
for (size_t i = 0; i < invocation_results_.size(); i++) {
const auto& result = *(invocation_results_[i]);
std::string element_prefix =
absl::StrCat(prefix(), "::", kInvocationResults, "::", i);
TF_RETURN_IF_ERROR(
WriteStatusLocked(writer, element_prefix, result.status));
TF_RETURN_IF_ERROR(WriteComponentsLocked(
writer, absl::StrCat(element_prefix, "::", kReturnValues),
result.return_values));
TF_RETURN_IF_ERROR(WriteComponentsLocked(
writer, absl::StrCat(element_prefix, "::", kPredicateValues),
result.predicate_values));
if (result.end_of_input) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(element_prefix, kEndOfInput, ""));
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t invocation_results_size;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", kInvocationResults),
kSize, &invocation_results_size));
DCHECK(invocation_results_.empty());
for (size_t i = 0; i < invocation_results_size; i++) {
invocation_results_.push_back(std::make_shared<InvocationResult>());
auto& result = *invocation_results_.back();
std::string element_prefix =
absl::StrCat(prefix(), "::", kInvocationResults, "::", i);
TF_RETURN_IF_ERROR(
ReadStatusLocked(reader, element_prefix, &result.status));
TF_RETURN_IF_ERROR(ReadComponentsLocked(
ctx, reader, absl::StrCat(element_prefix, "::", kReturnValues),
&result.return_values));
TF_RETURN_IF_ERROR(ReadComponentsLocked(
ctx, reader, absl::StrCat(element_prefix, "::", kPredicateValues),
&result.predicate_values));
result.end_of_input = reader->Contains(element_prefix, kEndOfInput);
RecordBufferEnqueue(ctx, result.return_values);
result.notification.Notify();
}
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
int64_t parallelism = -1;
if (mu_->try_lock()) {
parallelism = num_parallel_calls_->value;
mu_->unlock();
}
data::TraceMeMetadata result;
result.push_back(
std::make_pair("autotune", autotune_ ? "true" : "false"));
result.push_back(
std::make_pair("deterministic", deterministic_ ? "true" : "false"));
result.push_back(std::make_pair(
"parallelism",
parallelism == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(parallelism))));
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct InvocationResult {
InvocationResult() : uid(tensorflow::EnvTime::NowNanos()) {}
Notification notification;
Status status;
std::vector<Tensor> return_values;
std::vector<Tensor> predicate_values;
bool end_of_input = false;
const int64_t uid;
};
void CancelThreads(bool wait) TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
while (wait && num_calls_ > 0) {
cond_var_->wait(l);
}
}
void EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!runner_thread_) {
auto ctx_copy = std::make_shared<IteratorContext>(*ctx);
runner_thread_ = ctx->StartThread(
"tf_data_parallel_filter",
std::bind(&Iterator::RunnerThread, this, ctx_copy));
}
}
void CallCompleted(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
mutex_lock l(*mu_);
num_calls_--;
result->notification.Notify();
cond_var_->notify_all();
}
void CallFunction(const std::shared_ptr<IteratorContext>& ctx,
const std::shared_ptr<InvocationResult>& result)
TF_LOCKS_EXCLUDED(*mu_) {
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode("ParallelFilterProduce",
{{"element_id", result->uid}});
});
std::vector<Tensor> input_element;
result->status = input_impl_->GetNext(ctx.get(), &input_element,
&result->end_of_input);
if (result->end_of_input || !result->status.ok()) {
CallCompleted(ctx, result);
return;
}
result->return_values = input_element;
auto done = [this, ctx, result](Status status) {
result->status.Update(status);
if (status.ok() && (result->predicate_values.size() != 1 ||
result->predicate_values[0].dtype() != DT_BOOL ||
result->predicate_values[0].NumElements() != 1)) {
result->status.Update(errors::InvalidArgument(
"Filter predicate `predicate` must return a scalar bool."));
}
RecordBufferEnqueue(ctx.get(), result->return_values);
CallCompleted(ctx, result);
};
if (dataset()->captured_func_->use_inter_op_parallelism()) {
instantiated_captured_func_->RunAsync(
ctx.get(), std::move(input_element), &result->predicate_values,
std::move(done), model_node());
} else {
auto fn = std::bind(
[this, ctx, result](std::vector<Tensor> input_element) {
return instantiated_captured_func_->Run(
ctx.get(), std::move(input_element),
&result->predicate_values, model_node());
},
std::move(input_element));
(*ctx->runner())(
[this, ctx, fn = std::move(fn), done = std::move(done)]() {
Status s;
if (IsRecording(ctx.get())) {
s = fn();
} else {
RecordStart(ctx.get());
s = fn();
RecordStop(ctx.get());
}
done(s);
});
}
}
Status ProcessResult(IteratorContext* ctx,
const std::shared_ptr<InvocationResult>& result,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_LOCKS_EXCLUDED(*mu_) {
if (!result->end_of_input && result->status.ok()) {
*out_tensors = std::move(result->return_values);
*end_of_sequence = false;
return absl::OkStatus();
}
if (errors::IsOutOfRange(result->status)) {
return errors::InvalidArgument(
"Function invocation produced OutOfRangeError: ",
result->status.message());
}
*end_of_sequence = result->end_of_input;
return result->status;
}
void RunnerThread(const std::shared_ptr<IteratorContext>& ctx)
TF_LOCKS_EXCLUDED(*mu_) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
std::vector<std::shared_ptr<InvocationResult>> new_calls;
{
tf_shared_lock l(*mu_);
new_calls.reserve(num_parallel_calls_->value);
}
auto busy = [this]() TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) -> bool {
int64_t num_parallel_calls = num_parallel_calls_->value;
return num_calls_ >= num_parallel_calls ||
invocation_results_.size() >= num_parallel_calls;
};
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && busy()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
return;
}
while (!busy()) {
invocation_results_.push_back(std::make_shared<InvocationResult>());
new_calls.push_back(invocation_results_.back());
num_calls_++;
}
cond_var_->notify_all();
}
for (const auto& call : new_calls) {
CallFunction(ctx, call);
}
new_calls.clear();
}
}
bool ShouldWait(IteratorContext* ctx,
std::shared_ptr<InvocationResult>* result)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (cancelled_) {
return false;
}
auto PredicateReady = [](const InvocationResult* result) -> bool {
return result->status.ok() && !result->end_of_input;
};
auto GetPredicateValue = [](const InvocationResult* result) -> bool {
return result->predicate_values[0].scalar<bool>()();
};
while (!invocation_results_.empty() &&
invocation_results_.front()->notification.HasBeenNotified() &&
PredicateReady(invocation_results_.front().get()) &&
!GetPredicateValue(invocation_results_.front().get())) {
RecordBufferDequeue(ctx, invocation_results_.front()->return_values);
invocation_results_.pop_front();
cond_var_->notify_all();
}
if (!deterministic_) {
for (auto it = invocation_results_.begin();
it != invocation_results_.end(); ++it) {
if ((*it)->notification.HasBeenNotified() &&
(it == invocation_results_.begin() ||
(PredicateReady(it->get()) && GetPredicateValue(it->get())))) {
std::swap(*result, *it);
invocation_results_.erase(it);
cond_var_->notify_all();
return false;
}
}
} else {
if (!invocation_results_.empty() &&
invocation_results_.front()->notification.HasBeenNotified()) {
std::swap(*result, invocation_results_.front());
invocation_results_.pop_front();
if (!(*result)->end_of_input) {
RecordBufferDequeue(ctx, (*result)->return_values);
}
cond_var_->notify_all();
return false;
}
}
return true;
}
Status WriteComponentsLocked(IteratorStateWriter* writer,
const std::string& prefix,
const std::vector<Tensor>& values)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix, kSize, values.size()));
for (size_t j = 0; j < values.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix, absl::StrCat(kComponent, "[", j, "]"), values[j]));
}
return absl::OkStatus();
}
Status ReadComponentsLocked(IteratorContext* ctx,
IteratorStateReader* reader,
const std::string& prefix,
std::vector<Tensor>* values)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t size;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix, kSize, &size));
size_t num_return_values = static_cast<size_t>(size);
if (num_return_values != size) {
return errors::InvalidArgument(prefix, ",", kSize, ": ", size,
" is not a valid value of type size_t.");
}
values->reserve(num_return_values);
for (size_t j = 0; j < num_return_values; j++) {
values->emplace_back();
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix, absl::StrCat(kComponent, "[", j, "]"),
&values->back()));
}
return absl::OkStatus();
}
Status WriteStatusLocked(IteratorStateWriter* writer,
const std::string& key, const Status& status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
key, kErrorCode, static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(key, kErrorMessage,
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatusLocked(IteratorStateReader* reader, const std::string& key,
Status* status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(key, kErrorCode, &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(
reader->ReadScalar(key, kErrorMessage, &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
const std::shared_ptr<mutex> mu_;
const std::shared_ptr<condition_variable> cond_var_;
const std::shared_ptr<model::SharedState> num_parallel_calls_;
const bool deterministic_;
const bool autotune_;
int64_t num_calls_ TF_GUARDED_BY(*mu_) = 0;
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
std::unique_ptr<IteratorBase> input_impl_;
std::deque<std::shared_ptr<InvocationResult>> invocation_results_
TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> runner_thread_ TF_GUARDED_BY(*mu_);
};
const DatasetBase* const input_;
const int64_t num_parallel_calls_;
const DeterminismPolicy deterministic_;
const std::unique_ptr<CapturedFunction> captured_func_;
};
ParallelFilterDatasetOp::ParallelFilterDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kPredicate, {},
&func_metadata_));
OP_REQUIRES(ctx, func_metadata_->short_circuit_info().indices.size() <= 1,
errors::InvalidArgument(
"predicate function has more than one return value."));
std::string deterministic;
OP_REQUIRES_OK(ctx, ctx->GetAttr(kDeterministic, &deterministic));
OP_REQUIRES_OK(ctx,
DeterminismPolicy::FromString(deterministic, &deterministic_));
}
void ParallelFilterDatasetOp::MakeDataset(OpKernelContext* ctx,
DatasetBase* input,
DatasetBase** output) {
int64_t num_parallel_calls;
OP_REQUIRES_OK(
ctx, ParseScalarArgument(ctx, kNumParallelCalls, &num_parallel_calls));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
if (num_parallel_calls == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, num_parallel_calls, deterministic_,
std::move(captured_func));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ParallelFilterDataset").Device(DEVICE_CPU),
ParallelFilterDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("ParallelFilterDataset");
}
}
} | #include "tensorflow/core/kernels/data/parallel_filter_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "parallel_map_dataset";
class ParallelFilterDatasetParams : public DatasetParams {
public:
template <typename T>
ParallelFilterDatasetParams(
T input_dataset_params, std::vector<Tensor> other_arguments,
int num_parallel_calls, const std::string& deterministic,
FunctionDefHelper::AttrValueWrapper pred_func,
std::vector<FunctionDef> func_lib, DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
num_parallel_calls_(num_parallel_calls),
deterministic_(deterministic),
pred_func_(std::move(pred_func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
auto input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {num_parallel_calls_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size());
input_names->emplace_back(ParallelFilterDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(ParallelFilterDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(ParallelFilterDatasetOp::kNumParallelCalls);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {
{"predicate", pred_func_}, {"Targuments", type_arguments_},
{"output_shapes", output_shapes_}, {"output_types", output_dtypes_},
{"deterministic", deterministic_}, {"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override {
return ParallelFilterDatasetOp::kDatasetType;
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::vector<Tensor> other_arguments_;
int num_parallel_calls_;
std::string deterministic_;
FunctionDefHelper::AttrValueWrapper pred_func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class ParallelFilterDatasetOpTest : public DatasetOpsTestBase {};
ParallelFilterDatasetParams ParallelFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kNondeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
4,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
4,
DeterminismPolicy::kNondeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams ParallelFilterDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
model::kAutotune,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
ParallelFilterDatasetParams InputHasNoElementParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("GetUnique",
{{"T", DT_INT64}, {"out_idx", DT_INT32}}),
{test::function::Unique()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
ParallelFilterDatasetParams InvalidPredFuncFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return ParallelFilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
DeterminismPolicy::kDeterministic,
FunctionDefHelper::FunctionRef("NonZero", {{"T", DT_INT64}}),
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<ParallelFilterDatasetParams>> GetNextTestCases() {
return {{ParallelFilterDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams2(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams3(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams4(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams5(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{ParallelFilterDatasetParams6(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{InputHasNoElementParams(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams, GetNextTestCases())
TEST_F(ParallelFilterDatasetOpTest, DatasetNodeName) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ParallelFilterDatasetOpTest, DatasetTypeString) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ParallelFilterDatasetOp::kDatasetType)));
}
TEST_F(ParallelFilterDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<ParallelFilterDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ParallelFilterDatasetParams1(),
{PartialTensorShape({1})}},
{InputHasNoElementParams(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ParallelFilterDatasetParams>>
CardinalityTestCases() {
return {{ParallelFilterDatasetParams1(),
kUnknownCardinality},
{InputHasNoElementParams(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams, CardinalityTestCases())
TEST_F(ParallelFilterDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<ParallelFilterDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ParallelFilterDatasetParams1(),
{PartialTensorShape({1})}},
{InputHasNoElementParams(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ParallelFilterDatasetOpTest, IteratorPrefix) {
auto dataset_params = ParallelFilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(
name_utils::IteratorPrefix(ParallelFilterDatasetOp::kDatasetType,
dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ParallelFilterDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ParallelFilterDatasetParams1(),
{0, 2, 6},
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{InputHasNoElementParams(),
{0, 2, 6},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ParallelFilterDatasetOpTest,
ParallelFilterDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidPredicateFuncTest
: public ParallelFilterDatasetOpTest,
public ::testing::WithParamInterface<ParallelFilterDatasetParams> {};
TEST_P(ParameterizedInvalidPredicateFuncTest, InvalidPredicateFunc) {
auto dataset_params = GetParam();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_SUITE_P(
ParallelFilterDatasetOpTest, ParameterizedInvalidPredicateFuncTest,
::testing::ValuesIn({InvalidPredFuncFilterDatasetParams1(),
InvalidPredFuncFilterDatasetParams2(),
InvalidPredFuncFilterDatasetParams3()}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_filter_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/parallel_filter_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
983decde-fe7b-4ef5-bfc6-14c31e95fc7b | cpp | tensorflow/tensorflow | pjrt_c_api_gpu | third_party/xla/xla/pjrt/c/pjrt_c_api_gpu.cc | third_party/xla/xla/pjrt/c/pjrt_c_api_gpu_test.cc | #include "xla/pjrt/c/pjrt_c_api_gpu.h"
#include "absl/base/call_once.h"
#include "absl/log/initialize.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_internal.h"
#include "tsl/platform/platform.h"
const PJRT_Api* GetPjrtApi() {
#ifndef PLATFORM_GOOGLE
static absl::once_flag once;
absl::call_once(once, []() { absl::InitializeLog(); });
#endif
return pjrt::gpu_plugin::GetGpuPjrtApi();
} | #include "xla/pjrt/c/pjrt_c_api_gpu.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <thread>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/client/client_library.h"
#include "xla/ffi/api/ffi.h"
#include "xla/ffi/execution_context.h"
#include "xla/ffi/ffi_api.h"
#include "xla/ffi/type_id_registry.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/pjrt/c/pjrt_c_api_ffi_extension.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_extension.h"
#include "xla/pjrt/c/pjrt_c_api_gpu_internal.h"
#include "xla/pjrt/c/pjrt_c_api_helpers.h"
#include "xla/pjrt/c/pjrt_c_api_test.h"
#include "xla/pjrt/c/pjrt_c_api_test_base.h"
#include "xla/pjrt/c/pjrt_c_api_wrapper_impl.h"
#include "xla/pjrt/distributed/in_memory_key_value_store.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/pjrt/pjrt_future.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/tests/literal_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace pjrt {
namespace {
#ifdef TENSORFLOW_USE_ROCM
const bool kUnused = (RegisterPjRtCApiTestFactory([]() { return GetPjrtApi(); },
"rocm"),
true);
#else
const bool kUnused = (RegisterPjRtCApiTestFactory([]() { return GetPjrtApi(); },
"cuda"),
true);
#endif
class PjrtCApiGpuTest : public PjrtCApiTestBase {
public:
PjrtCApiGpuTest() : PjrtCApiTestBase(GetPjrtApi()) {}
};
TEST_F(PjrtCApiGpuTest, CreateViewOfDeviceBuffer) {
auto [buffer, buffer_future] = create_buffer();
TF_CHECK_OK(buffer_future.Await());
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args device_buffer_ptr_args;
device_buffer_ptr_args.struct_size =
PJRT_Buffer_OpaqueDeviceMemoryDataPointer_Args_STRUCT_SIZE;
device_buffer_ptr_args.extension_start = nullptr;
device_buffer_ptr_args.buffer = buffer.get();
PJRT_Error* device_buffer_ptr_error =
api_->PJRT_Buffer_OpaqueDeviceMemoryDataPointer(&device_buffer_ptr_args);
ASSERT_EQ(device_buffer_ptr_error, nullptr);
PJRT_Buffer_Device_Args device_args = PJRT_Buffer_Device_Args{
PJRT_Buffer_Device_Args_STRUCT_SIZE,
nullptr,
buffer.get(),
};
PJRT_Error* device_error = api_->PJRT_Buffer_Device(&device_args);
ASSERT_EQ(device_error, nullptr);
PJRT_Client_CreateViewOfDeviceBuffer_Args create_view_args;
create_view_args.struct_size =
PJRT_Client_CreateViewOfDeviceBuffer_Args_STRUCT_SIZE;
create_view_args.extension_start = nullptr;
create_view_args.client = client_;
create_view_args.device_buffer_ptr = device_buffer_ptr_args.device_memory_ptr;
xla::Shape shape = xla::ShapeUtil::MakeShape(xla::S32, {4});
create_view_args.dims = shape.dimensions().data();
create_view_args.num_dims = shape.dimensions().size();
create_view_args.element_type =
pjrt::ConvertToPjRtBufferType(shape.element_type());
pjrt::BufferMemoryLayoutData c_layout_data;
TF_ASSERT_OK_AND_ASSIGN(
c_layout_data, pjrt::ConvertToBufferMemoryLayoutData(shape.layout()));
create_view_args.layout = &(c_layout_data.c_layout);
create_view_args.device = device_args.device;
std::function<void()> on_delete_callback = []() mutable {};
create_view_args.on_delete_callback_arg =
new std::function(on_delete_callback);
create_view_args.on_delete_callback = [](void* device_buffer_ptr,
void* user_arg) {
auto c_func = reinterpret_cast<std::function<void()>*>(user_arg);
(*c_func)();
delete c_func;
};
create_view_args.stream = reinterpret_cast<intptr_t>(nullptr);
PJRT_Error* error =
api_->PJRT_Client_CreateViewOfDeviceBuffer(&create_view_args);
ASSERT_EQ(error, nullptr);
std::unique_ptr<PJRT_Buffer, ::pjrt::PJRT_BufferDeleter> view_buffer(
create_view_args.buffer, ::pjrt::MakeBufferDeleter(api_));
PJRT_Buffer_ToHostBuffer_Args to_host_args;
to_host_args.struct_size = PJRT_Buffer_ToHostBuffer_Args_STRUCT_SIZE;
to_host_args.extension_start = nullptr;
to_host_args.src = view_buffer.get();
xla::Shape host_shape = xla::ShapeUtil::MakeShape(xla::F32, {4});
auto literal = std::make_shared<xla::Literal>(host_shape);
to_host_args.host_layout = nullptr;
to_host_args.dst = literal->untyped_data();
to_host_args.dst_size = xla::ShapeUtil::ByteSizeOfElements(host_shape);
to_host_args.event = nullptr;
PJRT_Error* to_host_error = api_->PJRT_Buffer_ToHostBuffer(&to_host_args);
ASSERT_EQ(to_host_error, nullptr);
xla::PjRtFuture<> transfer_to_host =
::pjrt::ConvertCEventToCppFuture(to_host_args.event, api_);
TF_CHECK_OK(transfer_to_host.Await());
ASSERT_EQ(literal->data<float>().size(), 4);
std::vector<float> float_data(4);
std::iota(float_data.begin(), float_data.end(), 41.0f);
EXPECT_TRUE(xla::LiteralTestUtil::Equal(
xla::LiteralUtil::CreateR1<float>(float_data), *literal));
}
TEST_F(PjrtCApiGpuTest, CreateAndDestroyExecuteContext) {
PJRT_ExecuteContext_Create_Args create_arg;
create_arg.struct_size = PJRT_ExecuteContext_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.context = nullptr;
EXPECT_EQ(api_->PJRT_ExecuteContext_Create(&create_arg), nullptr);
EXPECT_NE(create_arg.context, nullptr);
const PJRT_FFI_Extension* ffi_extension =
pjrt::FindExtension<PJRT_FFI_Extension>(
api_, PJRT_Extension_Type::PJRT_Extension_Type_FFI);
ASSERT_NE(ffi_extension, nullptr);
std::string string_data = "string_data";
PJRT_FFI_UserData_Add_Args add_args;
add_args.struct_size = PJRT_FFI_UserData_Add_Args_STRUCT_SIZE;
add_args.extension_start = nullptr;
add_args.user_data.type_id = 42;
add_args.user_data.data = &string_data;
add_args.user_data.deleter = nullptr;
add_args.context = create_arg.context;
EXPECT_EQ(ffi_extension->user_data_add(&add_args), nullptr);
TF_ASSERT_OK_AND_ASSIGN(
auto lookup_user_data,
create_arg.context->execute_context->ffi_context().Lookup(
xla::ffi::TypeIdRegistry::TypeId(42)));
EXPECT_EQ(lookup_user_data, &string_data);
PJRT_ExecuteContext_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.context = create_arg.context;
api_->PJRT_ExecuteContext_Destroy(&destroy_args);
}
absl::StatusOr<PJRT_Client_Create_Args> BuildCreateArg(
::pjrt::PJRT_KeyValueCallbackData* kv_callback_data,
std::vector<PJRT_NamedValue>& c_options) {
PJRT_Client_Create_Args args;
args.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.create_options = c_options.data();
args.num_options = c_options.size();
args.kv_get_callback = kv_callback_data->c_kv_get;
args.kv_get_user_arg = &kv_callback_data->kv_get_c_func;
args.kv_put_callback = kv_callback_data->c_kv_put;
args.kv_put_user_arg = &kv_callback_data->kv_put_c_func;
args.client = nullptr;
return args;
}
TEST(PjrtCApiGpuKVStoreTest, CreateClientWithKVCallback) {
auto api = GetPjrtApi();
auto kv_store = std::make_shared<xla::InMemoryKeyValueStore>();
std::shared_ptr<::pjrt::PJRT_KeyValueCallbackData> kv_callback_data =
::pjrt::ConvertToCKeyValueCallbacks(kv_store);
xla::ClientLibrary::DestroyLocalInstances();
int num_nodes = 2;
std::vector<std::thread> threads;
for (int i = 0; i < num_nodes; i++) {
threads.emplace_back([api, i, num_nodes,
kv_callback_data = kv_callback_data,
kv_store = kv_store] {
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"num_nodes", static_cast<int64_t>(num_nodes)},
{"node_id", static_cast<int64_t>(i)},
{"visible_devices", std::vector<int64_t>({0})}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
TF_ASSERT_OK_AND_ASSIGN(
PJRT_Client_Create_Args create_arg,
BuildCreateArg(kv_callback_data.get(), c_options));
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_Devices_Args device_args;
device_args.struct_size = PJRT_Client_Devices_Args_STRUCT_SIZE;
device_args.extension_start = nullptr;
device_args.client = create_arg.client;
PJRT_Error* device_error = api->PJRT_Client_Devices(&device_args);
EXPECT_EQ(device_error, nullptr);
EXPECT_EQ(device_args.num_devices, 2);
PJRT_Client_AddressableDevices_Args addressable_device_args;
addressable_device_args.struct_size =
PJRT_Client_AddressableDevices_Args_STRUCT_SIZE;
addressable_device_args.extension_start = nullptr;
addressable_device_args.client = create_arg.client;
PJRT_Error* addressable_device_error =
api->PJRT_Client_AddressableDevices(&addressable_device_args);
EXPECT_EQ(addressable_device_error, nullptr);
EXPECT_EQ(addressable_device_args.num_addressable_devices, 1);
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
});
}
for (auto& t : threads) {
t.join();
}
}
TEST(PjrtCApiGpuAllocatorTest, ValidOptionsParsing) {
auto api = GetPjrtApi();
std::vector<std::string> allocator_options = {"default", "platform", "bfc",
"cuda_async"};
for (const std::string& allocator_option : allocator_options) {
#ifdef TENSORFLOW_USE_ROCM
if (allocator_option == "cuda_async") {
VLOG(1) << "cuda_async allocator not available on ROCm!";
continue;
}
#endif
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"allocator", allocator_option},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
if (allocator_option == "bfc" || allocator_option == "cuda_async") {
options["memory_fraction"] = 0.5f;
}
if (allocator_option == "cuda_async") {
options["preallocate"] = true;
}
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
}
}
TEST(PjrtCApiGpuAllocatorTest, InvalidAllocatorOptionsParsing) {
auto api = GetPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"allocator", static_cast<std::string>("invalid_allocator")},
{"memory_fraction", 0.5f},
{"preallocate", true},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_NE(error, nullptr);
EXPECT_THAT(error->status,
::tsl::testing::StatusIs(
absl::StatusCode::kUnimplemented,
"Allocator invalid_allocator not supported for PJRT GPU "
"plugin. Supported allocator options are: 'default', "
"'platform', 'bfc' and 'cuda_async'."));
PJRT_Error_Destroy_Args error_destroy_args;
error_destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
error_destroy_args.extension_start = nullptr;
error_destroy_args.error = error;
api->PJRT_Error_Destroy(&error_destroy_args);
}
TEST(PjrtCApiPlatformNameTest, AvailablePlatformName) {
auto api = GetPjrtApi();
std::string expected_platform_name_for_cuda = "cuda";
std::string expected_platform_name_for_rocm = "rocm";
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"platform_name", static_cast<std::string>("gpu")},
{"allocator", static_cast<std::string>("default")},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_EQ(error, nullptr) << error->status.message();
PJRT_Client_PlatformName_Args platform_name_args;
platform_name_args.struct_size = PJRT_Client_PlatformName_Args_STRUCT_SIZE;
platform_name_args.extension_start = nullptr;
platform_name_args.client = create_arg.client;
PJRT_Error* platform_name_error =
api->PJRT_Client_PlatformName(&platform_name_args);
EXPECT_EQ(platform_name_error, nullptr);
#if TENSORFLOW_USE_ROCM
EXPECT_EQ(platform_name_args.platform_name, expected_platform_name_for_rocm);
#else
EXPECT_EQ(platform_name_args.platform_name, expected_platform_name_for_cuda);
#endif
PJRT_Client_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_Client_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.client = create_arg.client;
PJRT_Error* destroy_error = api->PJRT_Client_Destroy(&destroy_args);
CHECK_EQ(destroy_error, nullptr);
}
TEST(PjrtCApiPlatformNameTest, UnavailablePlatformName) {
auto api = GetPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"platform_name", static_cast<std::string>("invalid_platform_name")},
{"allocator", static_cast<std::string>("default")},
{"visible_devices", xla::PjRtValueType(std::vector<int64_t>{0, 1})},
};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_Client_Create_Args create_arg;
create_arg.struct_size = PJRT_Client_Create_Args_STRUCT_SIZE;
create_arg.extension_start = nullptr;
create_arg.client = nullptr;
create_arg.create_options = c_options.data();
create_arg.num_options = c_options.size();
PJRT_Error* error = api->PJRT_Client_Create(&create_arg);
EXPECT_NE(error, nullptr);
EXPECT_THAT(error->status,
::tsl::testing::StatusIs(
absl::StatusCode::kNotFound,
testing::StartsWith("Could not find registered platform with "
"name: \"invalid_platform_name\". "
"Available platform names are:")));
PJRT_Error_Destroy_Args error_destroy_args;
error_destroy_args.struct_size = PJRT_Error_Destroy_Args_STRUCT_SIZE;
error_destroy_args.extension_start = nullptr;
error_destroy_args.error = error;
api->PJRT_Error_Destroy(&error_destroy_args);
}
TEST(PJRTGpuDeviceTopologyTest, CreateGpuTopology) {
auto pjrt_api = gpu_plugin::GetGpuPjrtApi();
PJRT_TopologyDescription_Create_Args args;
args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = nullptr;
args.num_options = 0;
args.create_options = nullptr;
PJRT_Error* error = pjrt_api->PJRT_TopologyDescription_Create(&args);
EXPECT_EQ(error, nullptr) << error->status.message();
auto pjrt_topology =
reinterpret_cast<const PJRT_TopologyDescription*>(args.topology);
ASSERT_NE(pjrt_topology, nullptr);
#ifdef TENSORFLOW_USE_ROCM
EXPECT_EQ(pjrt_topology->topology->platform_id(), xla::RocmId());
EXPECT_EQ(pjrt_topology->topology->platform_name(), xla::RocmName());
#else
EXPECT_EQ(pjrt_topology->topology->platform_id(), xla::CudaId());
EXPECT_EQ(pjrt_topology->topology->platform_name(), xla::CudaName());
#endif
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = const_cast<PJRT_TopologyDescription*>(pjrt_topology);
PJRT_Error* destroy_error =
pjrt_api->PJRT_TopologyDescription_Destroy(&destroy_args);
EXPECT_EQ(destroy_error, nullptr) << destroy_error->status.message();
}
constexpr char const* kTargetConfigString = R"(gpu_device_info {
threads_per_block_limit: 1024
threads_per_warp: 32
shared_memory_per_block: 49152
shared_memory_per_core: 98304
threads_per_core_limit: 2048
core_count: 80
fpus_per_core: 64
block_dim_limit_x: 2147483647
block_dim_limit_y: 65535
block_dim_limit_z: 65535
memory_bandwidth: 898048000000
l2_cache_size: 6291456
clock_rate_ghz: 1.53
device_memory_size: 34072559616
shared_memory_per_block_optin: 98304
cuda_compute_capability {
major: 7
}
registers_per_core_limit: 65536
registers_per_block_limit: 65536
}
platform_name: "CUDA"
dnn_version_info {
major: 9
minor: 3
}
device_description_str: "Tesla V100-SXM2-32GB"
)";
TEST(PJRTGpuDeviceTopologyTest, CreateExplicitGpuTopologyAndTargetConfig) {
auto pjrt_api = gpu_plugin::GetGpuPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"topology", static_cast<std::string>("16 x 2 x 4")},
{"target_config", static_cast<std::string>(kTargetConfigString)}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_TopologyDescription_Create_Args args;
args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = nullptr;
args.num_options = c_options.size();
args.create_options = c_options.data();
PJRT_Error* error = pjrt_api->PJRT_TopologyDescription_Create(&args);
EXPECT_EQ(error, nullptr) << error->status.message();
auto pjrt_topology =
reinterpret_cast<const PJRT_TopologyDescription*>(args.topology);
ASSERT_NE(pjrt_topology, nullptr);
EXPECT_EQ(pjrt_topology->topology->platform_id(), xla::CudaId());
EXPECT_EQ(pjrt_topology->topology->platform_name(), xla::CudaName());
EXPECT_EQ(pjrt_topology->topology->ProcessCount().value(), 16 * 2);
EXPECT_EQ(pjrt_topology->topology->DeviceDescriptions().size(), 16 * 2 * 4);
EXPECT_EQ(pjrt_topology->topology->DeviceDescriptions()[0]->device_kind(),
"Tesla V100-SXM2-32GB");
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = const_cast<PJRT_TopologyDescription*>(pjrt_topology);
PJRT_Error* destroy_error =
pjrt_api->PJRT_TopologyDescription_Destroy(&destroy_args);
EXPECT_EQ(destroy_error, nullptr) << destroy_error->status.message();
}
TEST(PJRTGpuDeviceTopologyTest, CreateExplicitGpuTopology) {
auto pjrt_api = gpu_plugin::GetGpuPjrtApi();
absl::flat_hash_map<std::string, xla::PjRtValueType> options = {
{"topology", static_cast<std::string>("16 x 2 x 4")}};
TF_ASSERT_OK_AND_ASSIGN(std::vector<PJRT_NamedValue> c_options,
::pjrt::ConvertToPjRtNamedValueList(options));
PJRT_TopologyDescription_Create_Args args;
args.struct_size = PJRT_TopologyDescription_Create_Args_STRUCT_SIZE;
args.extension_start = nullptr;
args.topology = nullptr;
args.num_options = c_options.size();
args.create_options = c_options.data();
PJRT_Error* error = pjrt_api->PJRT_TopologyDescription_Create(&args);
EXPECT_EQ(error, nullptr) << error->status.message();
auto pjrt_topology =
reinterpret_cast<const PJRT_TopologyDescription*>(args.topology);
ASSERT_NE(pjrt_topology, nullptr);
EXPECT_EQ(pjrt_topology->topology->ProcessCount().value(), 16 * 2);
EXPECT_EQ(pjrt_topology->topology->DeviceDescriptions().size(), 16 * 2 * 4);
PJRT_TopologyDescription_Destroy_Args destroy_args;
destroy_args.struct_size = PJRT_TopologyDescription_Destroy_Args_STRUCT_SIZE;
destroy_args.extension_start = nullptr;
destroy_args.topology = const_cast<PJRT_TopologyDescription*>(pjrt_topology);
PJRT_Error* destroy_error =
pjrt_api->PJRT_TopologyDescription_Destroy(&destroy_args);
EXPECT_EQ(destroy_error, nullptr) << destroy_error->status.message();
}
void TestCustomCallV2() {}
TEST(PjrtCApiGpuExtensionTest, CustomCallUntyped) {
PJRT_Gpu_Register_Custom_Call_Args args;
args.struct_size = PJRT_Gpu_Register_Custom_Call_Args_STRUCT_SIZE;
std::string function_name = "untyped_function_name";
args.function_name = function_name.c_str();
args.function_name_size = function_name.size();
args.api_version = 0;
args.handler_instantiate = nullptr;
args.handler_prepare = nullptr;
args.handler_initialize = nullptr;
args.handler_execute = reinterpret_cast<void*>(&TestCustomCallV2);
auto api = GetPjrtApi();
const PJRT_Extension_Base* next =
reinterpret_cast<const PJRT_Extension_Base*>(api->extension_start);
while (next != nullptr &&
next->type !=
PJRT_Extension_Type::PJRT_Extension_Type_Gpu_Custom_Call) {
next = next->next;
}
ASSERT_NE(next, nullptr);
PJRT_Error* error =
reinterpret_cast<const PJRT_Gpu_Custom_Call*>(next)->custom_call(&args);
CHECK_EQ(error, nullptr);
void* custom_call = xla::CustomCallTargetRegistry::Global()->Lookup(
function_name, stream_executor::GpuPlatformName());
EXPECT_EQ(custom_call, reinterpret_cast<void*>(&TestCustomCallV2));
}
TEST(PjrtCApiGpuExtensionTest, CustomCallTyped) {
static constexpr auto* noop = +[] { return xla::ffi::Error::Success(); };
XLA_FFI_DEFINE_HANDLER(kNoop, noop, xla::ffi::Ffi::Bind());
PJRT_Gpu_Register_Custom_Call_Args args;
args.struct_size = PJRT_Gpu_Register_Custom_Call_Args_STRUCT_SIZE;
std::string function_name = "typed_function_name";
args.function_name = function_name.c_str();
args.function_name_size = function_name.size();
args.api_version = 1;
args.handler_instantiate = nullptr;
args.handler_prepare = nullptr;
args.handler_initialize = nullptr;
args.handler_execute = reinterpret_cast<void*>(kNoop);
auto api = GetPjrtApi();
const PJRT_Extension_Base* next =
reinterpret_cast<const PJRT_Extension_Base*>(api->extension_start);
while (next != nullptr &&
next->type !=
PJRT_Extension_Type::PJRT_Extension_Type_Gpu_Custom_Call) {
next = next->next;
}
ASSERT_NE(next, nullptr);
PJRT_Error* error =
reinterpret_cast<const PJRT_Gpu_Custom_Call*>(next)->custom_call(&args);
CHECK_EQ(error, nullptr);
auto registration =
xla::ffi::FindHandler(function_name, stream_executor::GpuPlatformName())
.value();
EXPECT_EQ(reinterpret_cast<void*>(registration.bundle.execute), kNoop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_gpu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/c/pjrt_c_api_gpu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d860f9ce-8480-49bb-8ab4-5ebe9fdd68e0 | cpp | google/quiche | oghttp2_util | quiche/http2/adapter/oghttp2_util.cc | quiche/http2/adapter/oghttp2_util_test.cc | #include "quiche/http2/adapter/oghttp2_util.h"
namespace http2 {
namespace adapter {
quiche::HttpHeaderBlock ToHeaderBlock(absl::Span<const Header> headers) {
quiche::HttpHeaderBlock block;
for (const Header& header : headers) {
absl::string_view name = GetStringView(header.first).first;
absl::string_view value = GetStringView(header.second).first;
block.AppendValueOrAddHeader(name, value);
}
return block;
}
}
} | #include "quiche/http2/adapter/oghttp2_util.h"
#include <utility>
#include <vector>
#include "quiche/http2/adapter/http2_protocol.h"
#include "quiche/http2/adapter/test_frame_sequence.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
namespace {
using HeaderPair = std::pair<absl::string_view, absl::string_view>;
TEST(ToHeaderBlock, EmptySpan) {
quiche::HttpHeaderBlock block = ToHeaderBlock({});
EXPECT_TRUE(block.empty());
}
TEST(ToHeaderBlock, ExampleRequestHeaders) {
const std::vector<HeaderPair> pairs = {{":authority", "example.com"},
{":method", "GET"},
{":path", "/example.html"},
{":scheme", "http"},
{"accept", "text/plain, text/html"}};
const std::vector<Header> headers = ToHeaders(pairs);
quiche::HttpHeaderBlock block = ToHeaderBlock(headers);
EXPECT_THAT(block, testing::ElementsAreArray(pairs));
}
TEST(ToHeaderBlock, ExampleResponseHeaders) {
const std::vector<HeaderPair> pairs = {
{":status", "403"},
{"content-length", "1023"},
{"x-extra-info", "humblest apologies"}};
const std::vector<Header> headers = ToHeaders(pairs);
quiche::HttpHeaderBlock block = ToHeaderBlock(headers);
EXPECT_THAT(block, testing::ElementsAreArray(pairs));
}
TEST(ToHeaderBlock, RepeatedRequestHeaderNames) {
const std::vector<HeaderPair> pairs = {
{":authority", "example.com"}, {":method", "GET"},
{":path", "/example.html"}, {":scheme", "http"},
{"cookie", "chocolate_chips=yes"}, {"accept", "text/plain, text/html"},
{"cookie", "raisins=no"}};
const std::vector<HeaderPair> expected = {
{":authority", "example.com"},
{":method", "GET"},
{":path", "/example.html"},
{":scheme", "http"},
{"cookie", "chocolate_chips=yes; raisins=no"},
{"accept", "text/plain, text/html"}};
const std::vector<Header> headers = ToHeaders(pairs);
quiche::HttpHeaderBlock block = ToHeaderBlock(headers);
EXPECT_THAT(block, testing::ElementsAreArray(expected));
}
TEST(ToHeaderBlock, RepeatedResponseHeaderNames) {
const std::vector<HeaderPair> pairs = {
{":status", "403"}, {"x-extra-info", "sorry"},
{"content-length", "1023"}, {"x-extra-info", "humblest apologies"},
{"content-length", "1024"}, {"set-cookie", "chocolate_chips=yes"},
{"set-cookie", "raisins=no"}};
const std::vector<HeaderPair> expected = {
{":status", "403"},
{"x-extra-info", absl::string_view("sorry\0humblest apologies", 24)},
{"content-length", absl::string_view("1023"
"\0"
"1024",
9)},
{"set-cookie", absl::string_view("chocolate_chips=yes\0raisins=no", 30)}};
const std::vector<Header> headers = ToHeaders(pairs);
quiche::HttpHeaderBlock block = ToHeaderBlock(headers);
EXPECT_THAT(block, testing::ElementsAreArray(expected));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/oghttp2_util.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/oghttp2_util_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
1cc91eed-892b-4197-b92b-ad38eaf993c4 | cpp | google/arolla | lru_cache | arolla/util/lru_cache.h | arolla/util/lru_cache_test.cc | #ifndef AROLLA_UTIL_LRU_CACHE_H_
#define AROLLA_UTIL_LRU_CACHE_H_
#include <cstddef>
#include <functional>
#include <list>
#include <utility>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
namespace arolla {
template <typename Key, typename Value, typename KeyHash = absl::Hash<Key>,
typename KeyEq = std::equal_to<>>
class LruCache {
public:
explicit LruCache(size_t capacity) : capacity_(capacity) {
DCHECK_GT(capacity, 0);
index_.reserve(capacity + 1);
}
LruCache(const LruCache&) = delete;
LruCache& operator=(const LruCache&) = delete;
template <typename K>
[[nodiscard]] absl::Nullable<const Value*> LookupOrNull(K&& key) {
if (auto it = index_.find(std::forward<K>(key)); it != index_.end()) {
entries_.splice(entries_.begin(), entries_, it->entry);
return &it->entry->second;
}
return nullptr;
}
template <typename K, typename V>
[[nodiscard]] absl::Nonnull<const Value*> Put(K&& key, V&& value) {
entries_.emplace_front(std::forward<K>(key), std::forward<V>(value));
const auto& [it, ok] = index_.emplace(IndexEntry{entries_.begin()});
if (!ok) {
entries_.pop_front();
entries_.splice(entries_.begin(), entries_, it->entry);
} else if (entries_.size() > capacity_) {
index_.erase(entries_.back().first);
entries_.pop_back();
}
DCHECK_LE(entries_.size(), capacity_);
DCHECK_EQ(entries_.size(), index_.size());
return &entries_.front().second;
}
void Clear() {
entries_.clear();
index_.clear();
}
private:
using Entry = std::pair<const Key, const Value>;
struct IndexEntry {
typename std::list<Entry>::iterator entry;
};
struct IndexRecordHash {
using is_transparent = void;
size_t operator()(const IndexEntry& index_record) const {
return KeyHash()(index_record.entry->first);
}
template <typename K>
size_t operator()(K&& key) const {
return KeyHash()(std::forward<K>(key));
}
};
struct IndexRecordEq {
using is_transparent = void;
bool operator()(const IndexEntry& lhs, const IndexEntry& rhs) const {
return KeyEq()(lhs.entry->first, rhs.entry->first);
}
template <typename K>
bool operator()(const IndexEntry& lhs, K&& rhs) const {
return KeyEq()(lhs.entry->first, std::forward<K>(rhs));
}
};
using Index = absl::flat_hash_set<IndexEntry, IndexRecordHash, IndexRecordEq>;
size_t capacity_;
std::list<Entry> entries_;
Index index_;
};
}
#endif | #include "arolla/util/lru_cache.h"
#include <functional>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/hash/hash.h"
#include "absl/strings/string_view.h"
namespace arolla {
namespace {
using ::testing::IsNull;
using ::testing::Pointee;
TEST(LruCache, BasicBehaviuor) {
LruCache<int, double, std::hash<int>, std::equal_to<int>> cache(2);
ASSERT_THAT(cache.LookupOrNull(1), IsNull());
ASSERT_THAT(cache.LookupOrNull(2), IsNull());
ASSERT_THAT(cache.LookupOrNull(3), IsNull());
(void)cache.Put(1, 1.5);
ASSERT_THAT(cache.LookupOrNull(1), Pointee(1.5));
ASSERT_THAT(cache.LookupOrNull(2), IsNull());
ASSERT_THAT(cache.LookupOrNull(3), IsNull());
(void)cache.Put(2, 2.5);
ASSERT_THAT(cache.LookupOrNull(1), Pointee(1.5));
ASSERT_THAT(cache.LookupOrNull(2), Pointee(2.5));
ASSERT_THAT(cache.LookupOrNull(3), IsNull());
(void)cache.Put(3, 3.5);
ASSERT_THAT(cache.LookupOrNull(1), IsNull());
ASSERT_THAT(cache.LookupOrNull(2), Pointee(2.5));
ASSERT_THAT(cache.LookupOrNull(3), Pointee(3.5));
}
TEST(LruCache, TransparentKeyType) {
LruCache<std::string, int, absl::Hash<absl::string_view>, std::equal_to<>>
cache(3);
(void)cache.Put("1", 1);
(void)cache.Put(absl::string_view("2"), 2);
(void)cache.Put(std::string("3"), 3);
ASSERT_THAT(cache.LookupOrNull("1"), Pointee(1));
ASSERT_THAT(cache.LookupOrNull("2"), Pointee(2));
ASSERT_THAT(cache.LookupOrNull("3"), Pointee(3));
ASSERT_THAT(cache.LookupOrNull(absl::string_view("1")), Pointee(1));
ASSERT_THAT(cache.LookupOrNull(absl::string_view("2")), Pointee(2));
ASSERT_THAT(cache.LookupOrNull(absl::string_view("3")), Pointee(3));
ASSERT_THAT(cache.LookupOrNull(std::string("1")), Pointee(1));
ASSERT_THAT(cache.LookupOrNull(std::string("2")), Pointee(2));
ASSERT_THAT(cache.LookupOrNull(std::string("3")), Pointee(3));
}
TEST(LruCache, Clear) {
LruCache<int, double> cache(2);
ASSERT_THAT(cache.LookupOrNull(1), IsNull());
(void)cache.Put(1, 1.5);
ASSERT_THAT(cache.LookupOrNull(1), Pointee(1.5));
cache.Clear();
ASSERT_THAT(cache.LookupOrNull(1), IsNull());
}
TEST(LruCache, Overwrite) {
LruCache<int, double> cache(2);
(void)cache.Put(1, 1.5);
ASSERT_THAT(cache.LookupOrNull(1), Pointee(1.5));
(void)cache.Put(1, 2.5);
ASSERT_THAT(cache.LookupOrNull(1), Pointee(1.5));
}
TEST(LruCache, EvictionOrder) {
{
LruCache<int, double> cache(2);
(void)cache.Put(1, 1.0);
(void)cache.Put(2, 2.0);
(void)cache.Put(3, 3.0);
EXPECT_THAT(cache.LookupOrNull(1), IsNull());
EXPECT_THAT(cache.LookupOrNull(2), Pointee(2.0));
EXPECT_THAT(cache.LookupOrNull(3), Pointee(3.0));
}
{
LruCache<int, double> cache(2);
(void)cache.Put(1, 1.0);
(void)cache.Put(2, 2.0);
(void)cache.LookupOrNull(1);
(void)cache.Put(3, 3.0);
EXPECT_THAT(cache.LookupOrNull(1), Pointee(1.0));
EXPECT_THAT(cache.LookupOrNull(2), IsNull());
EXPECT_THAT(cache.LookupOrNull(3), Pointee(3.0));
}
{
LruCache<int, double> cache(2);
(void)cache.Put(1, 1.0);
(void)cache.Put(2, 2.0);
(void)cache.Put(1, 1.1);
(void)cache.Put(3, 3.0);
EXPECT_THAT(cache.LookupOrNull(1), Pointee(1.0));
EXPECT_THAT(cache.LookupOrNull(2), IsNull());
EXPECT_THAT(cache.LookupOrNull(3), Pointee(3.0));
}
}
TEST(LruCache, LookupPointerStability) {
LruCache<int, double> cache(3);
(void)cache.Put(1, 1.0);
(void)cache.Put(2, 2.0);
(void)cache.Put(3, 3.0);
auto* p0 = cache.LookupOrNull(0);
auto* p1 = cache.LookupOrNull(1);
auto* p2 = cache.LookupOrNull(2);
auto* q0 = cache.LookupOrNull(0);
auto* q1 = cache.LookupOrNull(1);
auto* q2 = cache.LookupOrNull(2);
EXPECT_EQ(p0, q0);
EXPECT_EQ(p1, q1);
EXPECT_EQ(p2, q2);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/lru_cache.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/lru_cache_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
f24f6e3d-aa16-43d2-9f4f-0415e71257c3 | cpp | google/arolla | dict_codegen_literal | arolla/codegen/dict/dict_codegen_literal.cc | arolla/codegen/dict/dict_codegen_literal_test.cc | #include <cstdint>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "arolla/codegen/expr/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/dict/dict_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::codegen {
namespace {
template <class T>
absl::StatusOr<std::string> CppDictLiteralRepr(TypedRef dict_ref) {
ASSIGN_OR_RETURN(const KeyToRowDict<T>& dict, dict_ref.As<KeyToRowDict<T>>());
std::vector<std::pair<T, int64_t>> sorted_dict(dict.map().begin(),
dict.map().end());
std::sort(sorted_dict.begin(), sorted_dict.end());
std::ostringstream oss;
ASSIGN_OR_RETURN(std::string type_name, CppTypeName(::arolla::GetQType<T>()));
oss << "::arolla::KeyToRowDict<" << type_name << ">{";
for (const auto& [k, v] : sorted_dict) {
ASSIGN_OR_RETURN(std::string key_repr,
CppLiteralRepr(TypedRef::FromValue(k)));
ASSIGN_OR_RETURN(std::string value_repr,
CppLiteralRepr(TypedRef::FromValue(v)));
oss << "{" << key_repr << "," << value_repr << "},";
}
oss << "}";
return oss.str();
}
#define REGISTER_CPP_TYPE(NAME, CTYPE) \
{ \
auto status = []() -> absl::Status { \
ASSIGN_OR_RETURN(std::string type_name, CppTypeName(GetQType<CTYPE>())); \
return RegisterCppType( \
GetKeyToRowDictQType<CTYPE>(), \
absl::StrFormat("::arolla::KeyToRowDict<%s>", type_name), \
CppDictLiteralRepr<CTYPE>); \
}(); \
if (!status.ok()) { \
LOG(FATAL) << status.message(); \
} \
}
int Register() {
REGISTER_CPP_TYPE(INT32, int32_t);
REGISTER_CPP_TYPE(INT64, int64_t);
REGISTER_CPP_TYPE(UINT64, uint64_t);
REGISTER_CPP_TYPE(BOOLEAN, bool);
REGISTER_CPP_TYPE(BYTES, ::arolla::Bytes);
REGISTER_CPP_TYPE(TEXT, ::arolla::Text);
return 0;
}
int registered = Register();
}
} | #include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/codegen/expr/types.h"
#include "arolla/qtype/dict/dict_types.h"
#include "arolla/qtype/typed_ref.h"
namespace {
using ::absl_testing::IsOkAndHolds;
TEST(DictLiteralTest, Sanity) {
EXPECT_THAT(
arolla::codegen::CppTypeName(arolla::GetKeyToRowDictQType<int32_t>()),
IsOkAndHolds("::arolla::KeyToRowDict<int32_t>"));
EXPECT_THAT(arolla::codegen::CppLiteralRepr(arolla::TypedRef::FromValue(
::arolla::KeyToRowDict<int32_t>())),
IsOkAndHolds("::arolla::KeyToRowDict<int32_t>{}"));
EXPECT_THAT(
arolla::codegen::CppLiteralRepr(arolla::TypedRef::FromValue(
arolla::KeyToRowDict<int32_t>{{{5, 2}, {2, 3}}})),
IsOkAndHolds("::arolla::KeyToRowDict<int32_t>{{int32_t{2},int64_t{3}},{"
"int32_t{5},int64_t{2}},}"));
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/dict/dict_codegen_literal.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/dict/dict_codegen_literal_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
28c9bf17-b5d5-4830-a34f-6402572d733a | cpp | tensorflow/tensorflow | tflite_inference_stage | tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.cc | tensorflow/lite/tools/evaluation/stages/tflite_inference_stage_test.cc | #include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
#include <cstring>
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/profiling/time.h"
#include "tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
void ABSL_ATTRIBUTE_WEAK
RegisterSelectedOps(::tflite::MutableOpResolver* resolver) {}
namespace tflite {
namespace evaluation {
namespace {
TfLiteModelInfo GetTfliteModelInfo(const Interpreter& interpreter) {
TfLiteModelInfo model_info;
for (int i : interpreter.inputs()) {
model_info.inputs.push_back(interpreter.tensor(i));
}
for (int i : interpreter.outputs()) {
model_info.outputs.push_back(interpreter.tensor(i));
}
return model_info;
}
}
void TfliteInferenceStage::UpdateModelInfo() {
model_info_ = GetTfliteModelInfo(*interpreter_);
outputs_.clear();
outputs_.reserve(interpreter_->outputs().size());
for (int i : interpreter_->outputs()) {
TfLiteTensor* tensor = interpreter_->tensor(i);
outputs_.push_back(tensor->data.raw);
}
}
TfLiteStatus TfliteInferenceStage::ResizeInputs(
const std::vector<std::vector<int>>& shapes) {
const std::vector<int>& interpreter_inputs = interpreter_->inputs();
if (interpreter_inputs.size() != shapes.size()) {
LOG(ERROR) << "New shape is not compatible";
return kTfLiteError;
}
for (int j = 0; j < shapes.size(); ++j) {
int i = interpreter_inputs[j];
TfLiteTensor* t = interpreter_->tensor(i);
if (t->type != kTfLiteString) {
TF_LITE_ENSURE_STATUS(interpreter_->ResizeInputTensor(i, shapes[j]));
}
}
TF_LITE_ENSURE_STATUS(interpreter_->AllocateTensors());
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::ApplyCustomDelegate(
Interpreter::TfLiteDelegatePtr delegate) {
if (!interpreter_) {
LOG(ERROR) << "Stage not initialized before calling ApplyCustomDelegate";
return kTfLiteError;
}
if (!delegate) {
LOG(WARNING)
<< "Tried to apply null TfLiteDelegatePtr to TfliteInferenceStage";
return kTfLiteOk;
}
delegates_.push_back(std::move(delegate));
TF_LITE_ENSURE_STATUS(
interpreter_->ModifyGraphWithDelegate(delegates_.back().get()));
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::Init(
const DelegateProviders* delegate_providers) {
if (!config_.specification().has_tflite_inference_params()) {
LOG(ERROR) << "TfliteInferenceParams not provided";
return kTfLiteError;
}
auto& params = config_.specification().tflite_inference_params();
if (!params.has_model_file_path()) {
LOG(ERROR) << "Model path not provided";
return kTfLiteError;
}
std::ifstream model_check(params.model_file_path());
if (!model_check.good()) {
LOG(ERROR) << "Model file not found";
return kTfLiteError;
}
model_ = FlatBufferModel::BuildFromFile(params.model_file_path().c_str());
bool apply_default_delegates = true;
if (delegate_providers != nullptr) {
const auto& provider_params = delegate_providers->GetAllParams();
if (provider_params.HasParam("use_xnnpack") &&
provider_params.HasValueSet<bool>("use_xnnpack") &&
!provider_params.Get<bool>("use_xnnpack")) {
apply_default_delegates = false;
}
}
if (apply_default_delegates) {
resolver_ = std::make_unique<ops::builtin::BuiltinOpResolver>();
} else {
resolver_ = std::make_unique<
ops::builtin::BuiltinOpResolverWithoutDefaultDelegates>();
}
RegisterSelectedOps(resolver_.get());
InterpreterBuilder(*model_, *resolver_)(&interpreter_);
if (!interpreter_) {
LOG(ERROR) << "Could not build interpreter";
return kTfLiteError;
}
interpreter_->SetNumThreads(params.num_threads());
if (!delegate_providers) {
std::string error_message;
auto delegate = CreateTfLiteDelegate(params, &error_message);
if (delegate) {
delegates_.push_back(std::move(delegate));
LOG(INFO) << "Successfully created "
<< params.Delegate_Name(params.delegate()) << " delegate.";
} else {
LOG(WARNING) << error_message;
}
} else {
auto delegates = delegate_providers->CreateAllDelegates(params);
for (auto& one : delegates) delegates_.push_back(std::move(one.delegate));
}
for (int i = 0; i < delegates_.size(); ++i) {
if (interpreter_->ModifyGraphWithDelegate(delegates_[i].get()) !=
kTfLiteOk) {
LOG(FATAL) << "Failed to apply delegate " << i;
}
}
interpreter_->AllocateTensors();
UpdateModelInfo();
return kTfLiteOk;
}
TfLiteStatus TfliteInferenceStage::Run() {
if (!inputs_) {
LOG(ERROR) << "Input data not set";
return kTfLiteError;
}
for (int i = 0; i < interpreter_->inputs().size(); ++i) {
TfLiteTensor* tensor = interpreter_->tensor(interpreter_->inputs()[i]);
tensor->data.raw = static_cast<char*>(inputs_->at(i));
}
auto& params = config_.specification().tflite_inference_params();
for (int i = 0; i < params.invocations_per_run(); ++i) {
int64_t start_us = profiling::time::NowMicros();
if (interpreter_->Invoke() != kTfLiteOk) {
LOG(ERROR) << "TFLite interpreter failed to invoke at run " << i;
return kTfLiteError;
}
latency_stats_.UpdateStat(profiling::time::NowMicros() - start_us);
}
return kTfLiteOk;
}
EvaluationStageMetrics TfliteInferenceStage::LatestMetrics() {
auto& params = config_.specification().tflite_inference_params();
EvaluationStageMetrics metrics;
auto* latency_metrics =
metrics.mutable_process_metrics()->mutable_total_latency();
latency_metrics->set_last_us(latency_stats_.newest());
latency_metrics->set_max_us(latency_stats_.max());
latency_metrics->set_min_us(latency_stats_.min());
latency_metrics->set_sum_us(latency_stats_.sum());
latency_metrics->set_avg_us(latency_stats_.avg());
latency_metrics->set_std_deviation_us(latency_stats_.std_deviation());
metrics.set_num_runs(
static_cast<int>(latency_stats_.count() / params.invocations_per_run()));
auto* inference_metrics =
metrics.mutable_process_metrics()->mutable_tflite_inference_metrics();
inference_metrics->set_num_inferences(latency_stats_.count());
return metrics;
}
}
} | #include "tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_config.pb.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace evaluation {
namespace {
constexpr char kTfliteInferenceStageName[] = "tflite_inference_stage";
constexpr char kModelPath[] =
"tensorflow/lite/testdata/add_quantized.bin";
constexpr int kTotalElements = 1 * 8 * 8 * 3;
template <typename T>
T* SetValues(T array[], T value) {
for (int i = 0; i < kTotalElements; i++) {
array[i] = value;
}
return array;
}
EvaluationStageConfig GetTfliteInferenceStageConfig() {
EvaluationStageConfig config;
config.set_name(kTfliteInferenceStageName);
auto* params =
config.mutable_specification()->mutable_tflite_inference_params();
params->set_model_file_path(kModelPath);
params->set_invocations_per_run(2);
return config;
}
TEST(TfliteInferenceStage, NoParams) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()->clear_tflite_inference_params();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, NoModelPath) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->clear_model_file_path();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, IncorrectModelPath) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
config.mutable_specification()
->mutable_tflite_inference_params()
->set_model_file_path("xyz.tflite");
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteError);
}
TEST(TfliteInferenceStage, NoInputData) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.Run(), kTfLiteError);
}
TEST(TfliteInferenceStage, CorrectModelInfo) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
const TfLiteModelInfo* model_info = stage.GetModelInfo();
EXPECT_EQ(model_info->inputs.size(), 1);
const TfLiteTensor* tensor = model_info->inputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, kTotalElements);
const TfLiteIntArray* input_shape = tensor->dims;
EXPECT_EQ(input_shape->data[0], 1);
EXPECT_EQ(input_shape->data[1], 8);
EXPECT_EQ(input_shape->data[2], 8);
EXPECT_EQ(input_shape->data[3], 3);
EXPECT_EQ(model_info->outputs.size(), 1);
tensor = model_info->outputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, kTotalElements);
const TfLiteIntArray* output_shape = tensor->dims;
EXPECT_EQ(output_shape->data[0], 1);
EXPECT_EQ(output_shape->data[1], 8);
EXPECT_EQ(output_shape->data[2], 8);
EXPECT_EQ(output_shape->data[3], 3);
}
TEST(TfliteInferenceStage, TestResizeModel) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.ResizeInputs({{3, 8, 8, 3}}), kTfLiteOk);
const TfLiteModelInfo* model_info = stage.GetModelInfo();
EXPECT_EQ(model_info->inputs.size(), 1);
const TfLiteTensor* tensor = model_info->inputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, 3 * kTotalElements);
const TfLiteIntArray* input_shape = tensor->dims;
EXPECT_EQ(input_shape->data[0], 3);
EXPECT_EQ(input_shape->data[1], 8);
EXPECT_EQ(input_shape->data[2], 8);
EXPECT_EQ(input_shape->data[3], 3);
EXPECT_EQ(model_info->outputs.size(), 1);
tensor = model_info->outputs[0];
EXPECT_EQ(tensor->type, kTfLiteUInt8);
EXPECT_EQ(tensor->bytes, 3 * kTotalElements);
const TfLiteIntArray* output_shape = tensor->dims;
EXPECT_EQ(output_shape->data[0], 3);
EXPECT_EQ(output_shape->data[1], 8);
EXPECT_EQ(output_shape->data[2], 8);
EXPECT_EQ(output_shape->data[3], 3);
}
TEST(TfliteInferenceStage, CorrectOutput) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
EXPECT_EQ(stage.Init(), kTfLiteOk);
uint8_t input_tensor[kTotalElements];
SetValues(input_tensor, static_cast<uint8_t>(2));
std::vector<void*> inputs;
inputs.push_back(input_tensor);
stage.SetInputs(inputs);
EXPECT_EQ(stage.Run(), kTfLiteOk);
uint8_t* output_tensor = static_cast<uint8_t*>(stage.GetOutputs()->at(0));
for (int i = 0; i < kTotalElements; i++) {
EXPECT_EQ(output_tensor[i], static_cast<uint8_t>(6));
}
EvaluationStageMetrics metrics = stage.LatestMetrics();
EXPECT_EQ(metrics.num_runs(), 1);
const auto& latency = metrics.process_metrics().total_latency();
const auto max_latency = latency.max_us();
EXPECT_GT(max_latency, 0);
EXPECT_LT(max_latency, 1e7);
EXPECT_LE(latency.last_us(), max_latency);
EXPECT_LE(latency.min_us(), max_latency);
EXPECT_GE(latency.sum_us(), max_latency);
EXPECT_LE(latency.avg_us(), max_latency);
EXPECT_TRUE(latency.has_std_deviation_us());
EXPECT_EQ(
metrics.process_metrics().tflite_inference_metrics().num_inferences(), 2);
}
TEST(TfliteInferenceStage, CustomDelegate) {
EvaluationStageConfig config = GetTfliteInferenceStageConfig();
TfliteInferenceStage stage(config);
Interpreter::TfLiteDelegatePtr test_delegate = CreateNNAPIDelegate();
EXPECT_NE(stage.ApplyCustomDelegate(std::move(test_delegate)), kTfLiteOk);
EXPECT_EQ(stage.Init(), kTfLiteOk);
EXPECT_EQ(stage.ApplyCustomDelegate(std::move(test_delegate)), kTfLiteOk);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/tflite_inference_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/evaluation/stages/tflite_inference_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a6114bb-5dc2-413d-b16f-9cc25ad67de1 | cpp | abseil/abseil-cpp | notification | absl/synchronization/notification.cc | absl/synchronization/notification_test.cc | #include "absl/synchronization/notification.h"
#include <atomic>
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/tracing.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
void Notification::Notify() {
base_internal::TraceSignal(this, TraceObjectKind());
MutexLock l(&this->mutex_);
#ifndef NDEBUG
if (ABSL_PREDICT_FALSE(notified_yet_.load(std::memory_order_relaxed))) {
ABSL_RAW_LOG(
FATAL,
"Notify() method called more than once for Notification object %p",
static_cast<void *>(this));
}
#endif
notified_yet_.store(true, std::memory_order_release);
}
Notification::~Notification() {
MutexLock l(&this->mutex_);
}
void Notification::WaitForNotification() const {
base_internal::TraceWait(this, TraceObjectKind());
if (!HasBeenNotifiedInternal(&this->notified_yet_)) {
this->mutex_.LockWhen(
Condition(&HasBeenNotifiedInternal, &this->notified_yet_));
this->mutex_.Unlock();
}
base_internal::TraceContinue(this, TraceObjectKind());
}
bool Notification::WaitForNotificationWithTimeout(
absl::Duration timeout) const {
base_internal::TraceWait(this, TraceObjectKind());
bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
if (!notified) {
notified = this->mutex_.LockWhenWithTimeout(
Condition(&HasBeenNotifiedInternal, &this->notified_yet_), timeout);
this->mutex_.Unlock();
}
base_internal::TraceContinue(notified ? this : nullptr, TraceObjectKind());
return notified;
}
bool Notification::WaitForNotificationWithDeadline(absl::Time deadline) const {
base_internal::TraceWait(this, TraceObjectKind());
bool notified = HasBeenNotifiedInternal(&this->notified_yet_);
if (!notified) {
notified = this->mutex_.LockWhenWithDeadline(
Condition(&HasBeenNotifiedInternal, &this->notified_yet_), deadline);
this->mutex_.Unlock();
}
base_internal::TraceContinue(notified ? this : nullptr, TraceObjectKind());
return notified;
}
ABSL_NAMESPACE_END
} | #include "absl/synchronization/notification.h"
#include <thread>
#include <tuple>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/tracing.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class ThreadSafeCounter {
public:
ThreadSafeCounter() : count_(0) {}
void Increment() {
MutexLock lock(&mutex_);
++count_;
}
int Get() const {
MutexLock lock(&mutex_);
return count_;
}
void WaitUntilGreaterOrEqual(int n) {
MutexLock lock(&mutex_);
auto cond = [this, n]() { return count_ >= n; };
mutex_.Await(Condition(&cond));
}
private:
mutable Mutex mutex_;
int count_;
};
static void RunWorker(int i, ThreadSafeCounter* ready_counter,
Notification* notification,
ThreadSafeCounter* done_counter) {
ready_counter->Increment();
notification->WaitForNotification();
done_counter->Increment();
}
static void BasicTests(bool notify_before_waiting, Notification* notification) {
EXPECT_FALSE(notification->HasBeenNotified());
EXPECT_FALSE(
notification->WaitForNotificationWithTimeout(absl::Milliseconds(0)));
EXPECT_FALSE(notification->WaitForNotificationWithDeadline(absl::Now()));
const absl::Duration delay = absl::Milliseconds(50);
const absl::Time start = absl::Now();
EXPECT_FALSE(notification->WaitForNotificationWithTimeout(delay));
const absl::Duration elapsed = absl::Now() - start;
const absl::Duration slop = absl::Milliseconds(5);
EXPECT_LE(delay - slop, elapsed)
<< "WaitForNotificationWithTimeout returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;
ThreadSafeCounter ready_counter;
ThreadSafeCounter done_counter;
if (notify_before_waiting) {
notification->Notify();
}
const int kNumThreads = 10;
std::vector<std::thread> workers;
for (int i = 0; i < kNumThreads; ++i) {
workers.push_back(std::thread(&RunWorker, i, &ready_counter, notification,
&done_counter));
}
if (!notify_before_waiting) {
ready_counter.WaitUntilGreaterOrEqual(kNumThreads);
EXPECT_EQ(0, done_counter.Get());
notification->Notify();
}
notification->WaitForNotification();
EXPECT_TRUE(notification->HasBeenNotified());
EXPECT_TRUE(notification->WaitForNotificationWithTimeout(absl::Seconds(0)));
EXPECT_TRUE(notification->WaitForNotificationWithDeadline(absl::Now()));
for (std::thread& worker : workers) {
worker.join();
}
EXPECT_EQ(kNumThreads, ready_counter.Get());
EXPECT_EQ(kNumThreads, done_counter.Get());
}
TEST(NotificationTest, SanityTest) {
Notification local_notification1, local_notification2;
BasicTests(false, &local_notification1);
BasicTests(true, &local_notification2);
}
#if ABSL_HAVE_ATTRIBUTE_WEAK
namespace base_internal {
namespace {
using TraceRecord = std::tuple<const void*, ObjectKind>;
thread_local TraceRecord tls_signal;
thread_local TraceRecord tls_wait;
thread_local TraceRecord tls_continue;
thread_local TraceRecord tls_observed;
}
extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(const void* object,
ObjectKind kind) {
tls_wait = {object, kind};
}
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(const void* object,
ObjectKind kind) {
tls_continue = {object, kind};
}
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(const void* object,
ObjectKind kind) {
tls_signal = {object, kind};
}
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceObserved)(const void* object,
ObjectKind kind) {
tls_observed = {object, kind};
}
}
TEST(NotificationTest, TracesNotify) {
Notification n;
tls_signal = {};
n.Notify();
EXPECT_EQ(tls_signal, TraceRecord(&n, ObjectKind::kNotification));
}
TEST(NotificationTest, TracesWaitForNotification) {
Notification n;
n.Notify();
tls_wait = tls_continue = {};
n.WaitForNotification();
EXPECT_EQ(tls_wait, TraceRecord(&n, ObjectKind::kNotification));
EXPECT_EQ(tls_continue, TraceRecord(&n, ObjectKind::kNotification));
}
TEST(NotificationTest, TracesWaitForNotificationWithTimeout) {
Notification n;
tls_wait = tls_continue = {};
n.WaitForNotificationWithTimeout(absl::Milliseconds(1));
EXPECT_EQ(tls_wait, TraceRecord(&n, ObjectKind::kNotification));
EXPECT_EQ(tls_continue, TraceRecord(nullptr, ObjectKind::kNotification));
n.Notify();
tls_wait = tls_continue = {};
n.WaitForNotificationWithTimeout(absl::Milliseconds(1));
EXPECT_EQ(tls_wait, TraceRecord(&n, ObjectKind::kNotification));
EXPECT_EQ(tls_continue, TraceRecord(&n, ObjectKind::kNotification));
}
TEST(NotificationTest, TracesHasBeenNotified) {
Notification n;
tls_observed = {};
ASSERT_FALSE(n.HasBeenNotified());
EXPECT_EQ(tls_observed, TraceRecord(nullptr, ObjectKind::kUnknown));
n.Notify();
tls_observed = {};
ASSERT_TRUE(n.HasBeenNotified());
EXPECT_EQ(tls_observed, TraceRecord(&n, ObjectKind::kNotification));
}
}
#endif
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/notification.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/synchronization/notification_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
fadd1566-bcfe-4089-be0e-cc0ef7650b6e | cpp | tensorflow/tensorflow | elementwise | tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc | tensorflow/lite/delegates/gpu/cl/kernels/elementwise_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/elementwise.h"
#include <any>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/node_shader.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ElementwiseOneArgument : public NodeShader {
public:
explicit ElementwiseOneArgument(OperationType operation_type)
: operation_type_(operation_type) {}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::string source;
switch (operation_type_) {
case OperationType::ABS:
source = "value_0 = abs(value_0);";
break;
case OperationType::COS:
source = "value_0 = cos(value_0);";
break;
case OperationType::COPY:
source = "value_0 = value_0;";
break;
case OperationType::ELU:
source = R"(
value_0.x = value_0.x < 0.0 ? exp(value_0.x) - 1.0 : value_0.x;
value_0.y = value_0.y < 0.0 ? exp(value_0.y) - 1.0 : value_0.y;
value_0.z = value_0.z < 0.0 ? exp(value_0.z) - 1.0 : value_0.z;
value_0.w = value_0.w < 0.0 ? exp(value_0.w) - 1.0 : value_0.w;
)";
break;
case OperationType::EXP:
source = "value_0 = exp(value_0);";
break;
case tflite::gpu::OperationType::FLOOR:
source = "value_0 = floor(value_0);";
break;
case tflite::gpu::OperationType::GELU:
source =
"value_0 = 0.5 * value_0 * (1.0 + tanh(0.7978845608 * (value_0 + "
"0.044715 * value_0 * value_0 * value_0)));";
break;
case OperationType::HARD_SWISH:
source =
"value_0 *= clamp(value_0 / 6.0 + vec4(0.5), vec4(0.0), "
"vec4(1.0));";
break;
case OperationType::LOG:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x > 0.0 ? log(value_0.x) : nan;
value_0.y = value_0.y > 0.0 ? log(value_0.y) : nan;
value_0.z = value_0.z > 0.0 ? log(value_0.z) : nan;
value_0.w = value_0.w > 0.0 ? log(value_0.w) : nan;
)";
break;
case OperationType::NEG:
source = "value_0 = -(value_0);";
break;
case OperationType::RSQRT:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x > 0.0 ? 1.0 / sqrt(value_0.x) : nan;
value_0.y = value_0.y > 0.0 ? 1.0 / sqrt(value_0.y) : nan;
value_0.z = value_0.z > 0.0 ? 1.0 / sqrt(value_0.z) : nan;
value_0.w = value_0.w > 0.0 ? 1.0 / sqrt(value_0.w) : nan;
)";
break;
case OperationType::SIGMOID:
source = "value_0 = 1.0 / (1.0 + exp(-1.0 * value_0));";
break;
case OperationType::SIN:
source = "value_0 = sin(value_0);";
break;
case OperationType::SQRT:
source = R"(
const float nan = normalize(vec4(0, 0, 0, 0)).x;
value_0.x = value_0.x >= 0.0 ? sqrt(value_0.x) : nan;
value_0.y = value_0.y >= 0.0 ? sqrt(value_0.y) : nan;
value_0.z = value_0.z >= 0.0 ? sqrt(value_0.z) : nan;
value_0.w = value_0.w >= 0.0 ? sqrt(value_0.w) : nan;
)";
break;
case OperationType::SQUARE:
source = "value_0 = value_0 * value_0;";
break;
case OperationType::TANH:
source = "value_0 = tanh(value_0);";
break;
default:
return absl::InvalidArgumentError(
"Incorrect elementwise operation type.");
}
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
source,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
private:
OperationType operation_type_;
};
class ElementwiseTwoArguments : public NodeShader {
public:
explicit ElementwiseTwoArguments(OperationType operation_type)
: operation_type_(operation_type) {}
inline bool IsElementwiseSupported(const GenerationContext& ctx) const {
return ctx.input_shapes.size() == 2 &&
ctx.input_shapes[0] == ctx.input_shapes[1];
}
inline bool IsBroadcastSupported(const GenerationContext& ctx) const {
return ctx.input_shapes.size() == 2 && ctx.input_shapes[1][1] == 1 &&
ctx.input_shapes[1][2] == 1 &&
ctx.input_shapes[0][3] == ctx.input_shapes[1][3];
}
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
std::vector<Variable> parameters;
std::vector<std::pair<std::string, Object>> objects;
std::string argument0, argument1;
if (IsElementwiseSupported(ctx)) {
argument0 = "value_0";
argument1 = "value_1";
} else if (IsBroadcastSupported(ctx)) {
argument0 = "$input_data_0[gid.x, gid.y, gid.z]$";
argument1 = "$input_data_1[0, 0, gid.z]$";
} else {
const auto& attr =
std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
const auto* tensor =
std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
const auto* scalar = std::get_if<float>(&attr.param);
if (!tensor && !scalar) {
return absl::InvalidArgumentError(
"Couldn't read scalar of const vector data from the attributes.");
}
argument0 = "value_0";
if (tensor) {
argument1 = "$const_data[gid.z]$";
objects.push_back({"const_data", MakeReadonlyObject(tensor->data)});
} else {
argument1 = "vec4($const_data$)";
parameters.push_back({"const_data", *scalar});
}
if (attr.runtime_tensor_is_second) {
argument0 = argument1;
argument1 = "value_0";
}
}
std::string source;
switch (operation_type_) {
case OperationType::DIV: {
source = "value_0 = $0/$1;";
break;
}
case tflite::gpu::OperationType::FLOOR_DIV:
source = "value_0 = floor($0 / $1);";
break;
case tflite::gpu::OperationType::FLOOR_MOD:
source = "value_0 = $0 - floor($0 / $1) * $1;";
break;
case OperationType::MAXIMUM: {
source = "value_0 = max($0, $1);";
break;
}
case OperationType::MINIMUM: {
source = "value_0 = min($0, $1);";
break;
}
case OperationType::SQUARED_DIFF: {
source = "value_0 = ($0 - $1) * ($0 - $1);";
break;
}
case OperationType::SUB: {
source = "value_0 = $0 - $1;";
break;
}
case OperationType::POW: {
source = "value_0 = pow($0, $1);";
break;
}
default:
return absl::InvalidArgumentError(
"Incorrect elementwise with scalar operation type.");
}
source = absl::Substitute(source, argument0, argument1);
*generated_code = {
std::move(parameters),
std::move(objects),
{},
uint3(),
uint3(),
source,
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
private:
OperationType operation_type_;
};
}
std::unique_ptr<NodeShader> NewElementwiseNodeShader(
OperationType operation_type) {
switch (operation_type) {
case OperationType::ABS:
case OperationType::COS:
case OperationType::COPY:
case OperationType::ELU:
case OperationType::EXP:
case OperationType::FLOOR:
case OperationType::GELU:
case OperationType::HARD_SWISH:
case OperationType::LOG:
case OperationType::NEG:
case OperationType::RSQRT:
case OperationType::SIGMOID:
case OperationType::SIN:
case OperationType::SQRT:
case OperationType::SQUARE:
case OperationType::TANH:
return std::make_unique<ElementwiseOneArgument>(operation_type);
case OperationType::DIV:
case OperationType::FLOOR_DIV:
case OperationType::FLOOR_MOD:
case OperationType::MAXIMUM:
case OperationType::MINIMUM:
case OperationType::POW:
case OperationType::SQUARED_DIFF:
case OperationType::SUB:
return std::make_unique<ElementwiseTwoArguments>(operation_type);
default:
return nullptr;
}
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/elementwise_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Abs) { ASSERT_OK(AbsTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Cos) { ASSERT_OK(CosTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Copy) { ASSERT_OK(CopyTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Elu) { ASSERT_OK(EluTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Exp) { ASSERT_OK(ExpTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Floor) { ASSERT_OK(FloorTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, FloorDiv) { ASSERT_OK(FloorDivTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, FloorMod) { ASSERT_OK(FloorModTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Gelu) { ASSERT_OK(GeluTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, HardSwish) { ASSERT_OK(HardSwishTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Log) { ASSERT_OK(LogTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Neg) { ASSERT_OK(NegTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Rsqrt) { ASSERT_OK(RsqrtTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sigmoid) { ASSERT_OK(SigmoidTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sin) { ASSERT_OK(SinTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sqrt) { ASSERT_OK(SqrtTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Square) { ASSERT_OK(SquareTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Tanh) { ASSERT_OK(TanhTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Sub) { ASSERT_OK(SubTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, SquaredDiff) {
ASSERT_OK(SquaredDiffTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Div) { ASSERT_OK(DivTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Pow) { ASSERT_OK(PowTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Add) { ASSERT_OK(AddTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Maximum) { ASSERT_OK(MaximumTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, MaximumWithScalar) {
ASSERT_OK(MaximumWithScalarTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumWithConstantLinearTensor) {
ASSERT_OK(MaximumWithConstantLinearTensorTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumWithConstantHWCTensor) {
ASSERT_OK(MaximumWithConstantHWCTensorTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumWithConstantHWCTensorBroadcastChannels) {
ASSERT_OK(MaximumWithConstantHWCTensorBroadcastChannelsTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Minimum) { ASSERT_OK(MinimumTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, MinimumWithScalar) {
ASSERT_OK(MinimumWithScalarTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Mul) { ASSERT_OK(MulTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, MulBroadcastHW) {
ASSERT_OK(MulBroadcastHWTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MulBroadcastChannels) {
ASSERT_OK(MulBroadcastChannelsTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, SubWithScalarAtFirstPosition) {
ASSERT_OK(SubWithScalarAtFirstPositionTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Less) { ASSERT_OK(LessTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, LessEqual) { ASSERT_OK(LessEqualTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, Greater) { ASSERT_OK(GreaterTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, GreaterEqual) {
ASSERT_OK(GreaterEqualTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, Equal) { ASSERT_OK(EqualTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, NotEqual) { ASSERT_OK(NotEqualTest(&exec_env_)); }
TEST_F(OpenCLOperationTest, CosBroadcast) {
ASSERT_OK(CosBroadcastTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MaximumScalarBroadcastInput) {
ASSERT_OK(MaximumScalarBroadcastInputTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MulLinearBroadcastInput) {
ASSERT_OK(MulLinearBroadcastInputTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, MulBroadcastBothInputs) {
ASSERT_OK(MulBroadcastBothInputsTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, LogicalAndTest) {
ASSERT_OK(LogicalAndTest(&exec_env_));
}
TEST_F(OpenCLOperationTest, LogicalAndWithConstantTest) {
ASSERT_OK(LogicalAndWithConstantTest(&exec_env_));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/elementwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dd9b6d0b-cfde-4480-9c14-d8bc39df53b2 | cpp | tensorflow/tensorflow | criticality | third_party/xla/third_party/tsl/tsl/platform/default/criticality.h | third_party/xla/third_party/tsl/tsl/platform/criticality_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
namespace tsl {
namespace criticality {
inline Criticality GetCriticality() {
return Criticality::kCritical;
}
}
}
#endif | #include "tsl/platform/criticality.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace criticality {
TEST(CriticalityTest, Basic) {
EXPECT_EQ(GetCriticality(), Criticality::kCritical);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/criticality.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/criticality_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1049daff-5780-4d51-8e50-1ccb85757684 | cpp | tensorflow/tensorflow | increase_dynamism_for_auto_jit_pass | tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc | tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc | #include "tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.h"
#include <iterator>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
template <class T>
using StatusOrOptional = StatusOr<std::optional<T>>;
StatusOrOptional<Tensor> TryToGetTensorFromConstOp(Node* n) {
if (n->type_string() != "Const") {
return {std::nullopt};
}
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "value", &proto));
Tensor tensor(proto->dtype());
TF_RET_CHECK(tensor.FromProto(*proto));
return {tensor};
}
struct SliceInputs {
Output slice_op;
Output input;
Output begin;
Output size;
std::vector<int64_t> size_as_vector;
};
std::vector<int64_t> IntTensorAsVector(const Tensor& t) {
DCHECK(t.dtype() == DT_INT32 || t.dtype() == DT_INT64);
std::vector<int64_t> result;
result.reserve(t.NumElements());
for (int i = 0; i < t.NumElements(); i++) {
int64_t element = t.dtype() == DT_INT32
? static_cast<int64_t>(t.flat<int32>()(i))
: t.flat<int64_t>()(i);
result.push_back(element);
}
return result;
}
StatusOrOptional<SliceInputs> GetSliceInputs(Node* slice) {
const int kSliceInputIndex = 0;
const int kSliceBeginIndex = 1;
const int kSliceSizeIndex = 2;
const Edge* slice_input_edge;
TF_RETURN_IF_ERROR(slice->input_edge(kSliceInputIndex, &slice_input_edge));
const Edge* slice_size_edge;
TF_RETURN_IF_ERROR(slice->input_edge(kSliceSizeIndex, &slice_size_edge));
const Edge* slice_begin_edge;
TF_RETURN_IF_ERROR(slice->input_edge(kSliceBeginIndex, &slice_begin_edge));
SliceInputs slice_inputs;
slice_inputs.input =
Output(slice_input_edge->src(), slice_input_edge->src_output());
slice_inputs.begin =
Output(slice_begin_edge->src(), slice_begin_edge->src_output());
slice_inputs.size =
Output(slice_size_edge->src(), slice_size_edge->src_output());
TF_ASSIGN_OR_RETURN(std::optional<Tensor> tf_slice_size,
TryToGetTensorFromConstOp(slice_inputs.size.node()));
if (!tf_slice_size.has_value()) {
return {std::nullopt};
}
if (tf_slice_size->dims() != 1) {
return {std::nullopt};
}
slice_inputs.size_as_vector = IntTensorAsVector(*tf_slice_size);
return {slice_inputs};
}
Output MakeInt64(const Scope& host_scope, absl::string_view name,
const Output& x) {
return x.type() == DT_INT64
? x
: ops::Cast(host_scope.WithOpName(name, "_s64"), x, DT_INT64);
}
SliceInputs MakeSliceIndexAndSizeInt64(const Scope& host_scope,
const SliceInputs& slice_inputs) {
SliceInputs result;
result.input = slice_inputs.input;
result.begin = MakeInt64(host_scope, "begin", slice_inputs.begin);
result.size = MakeInt64(host_scope, "size", slice_inputs.size);
result.size_as_vector = slice_inputs.size_as_vector;
return result;
}
class ConstantCache {
public:
explicit ConstantCache(const Scope& s,
const std::vector<const Edge*>& control_deps)
: scope_(s), control_deps_(control_deps) {}
Output Get1DHostConstant(int64_t constant) {
auto it = cache_.find(constant);
if (it == cache_.end()) {
Output new_const =
ops::Const(scope_.WithOpName("const_", constant), {constant});
it = cache_.insert({constant, new_const}).first;
for (const Edge* e : control_deps_) {
scope_.graph()->AddControlEdge(e->src(), new_const.node());
}
}
return it->second;
}
private:
Scope scope_;
std::unordered_map<int, Output> cache_;
std::vector<const Edge*> control_deps_;
};
Status ComputeSliceSize(const Scope& host_scope,
const SliceInputs& slice_inputs,
std::vector<const Edge*> control_deps, Output* size) {
if (absl::c_all_of(slice_inputs.size_as_vector,
[](int64_t i) { return i >= 0; })) {
*size = slice_inputs.size;
return absl::OkStatus();
}
Output input_shape =
ops::Shape(host_scope.WithOpName("input_shape"), slice_inputs.input,
ops::Shape::OutType(DT_INT64));
ConstantCache constant_pool(host_scope, control_deps);
std::vector<Output> slice_size;
for (int i = 0, end = slice_inputs.size_as_vector.size(); i < end; i++) {
if (slice_inputs.size_as_vector[i] >= 0) {
slice_size.push_back(
constant_pool.Get1DHostConstant(slice_inputs.size_as_vector[i]));
continue;
}
DCHECK_EQ(slice_inputs.size_as_vector[i], -1);
Output begin_i = ops::Slice(
host_scope.WithOpName("begin_", i), slice_inputs.begin,
constant_pool.Get1DHostConstant(i), constant_pool.Get1DHostConstant(1));
Output input_shape_i = ops::Slice(
host_scope.WithOpName("input_shape_", i), input_shape,
constant_pool.Get1DHostConstant(i), constant_pool.Get1DHostConstant(1));
slice_size.push_back(ops::Sub(host_scope.WithOpName("slice_size_", i),
input_shape_i, begin_i));
DCHECK_EQ(slice_size.back().type(), DT_INT64);
}
if (slice_size.size() == 1) {
*size = slice_size[0];
} else {
auto concat_axis = ops::Const(host_scope.WithOpName("concat_axis"), 0);
for (const Edge* e : control_deps) {
host_scope.graph()->AddControlEdge(e->src(), concat_axis.node());
}
*size = ops::Concat(host_scope.WithOpName("slice_size"), slice_size,
concat_axis);
}
return absl::OkStatus();
}
Status ConvertTensorFlowSliceToStaticShapedSlice(
Graph* g, Node* slice, const SliceInputs& slice_inputs,
absl::string_view cluster_name, Node** result) {
string host_name;
TF_RETURN_IF_ERROR(DeviceNameUtils::DeviceNameToCpuDeviceName(
slice->assigned_device_name(), &host_name));
Status status;
Scope main_scope =
NewInternalScope(g, &status, nullptr)
.WithXlaCluster(string(cluster_name))
.NewSubScope(absl::StrCat(slice->name(), "/static_shaped_slice"));
Scope host_scope = main_scope.WithAssignedDevice(host_name);
SliceInputs slice_inputs_int64 =
MakeSliceIndexAndSizeInt64(host_scope, slice_inputs);
Node* old_size;
std::vector<const Edge*> old_size_ctrl_deps;
TF_RETURN_IF_ERROR(slice->input_node(2, &old_size));
absl::c_copy_if(old_size->in_edges(), std::back_inserter(old_size_ctrl_deps),
[](const Edge* e) { return e->IsControlEdge(); });
Output slice_size;
TF_RETURN_IF_ERROR(ComputeSliceSize(host_scope, slice_inputs_int64,
old_size_ctrl_deps, &slice_size));
*result =
ops::Slice(main_scope.WithAssignedDevice(slice->assigned_device_name())
.WithOpName("static_shaped_slice"),
slice_inputs_int64.input, slice_inputs_int64.begin, slice_size)
.node();
TF_RETURN_IF_ERROR(main_scope.status());
std::vector<string> compile_time_const_inputs;
compile_time_const_inputs.push_back("size");
(*result)->AddAttr(kXlaCompileTimeConstantInputsAttr,
compile_time_const_inputs);
return status;
}
void ReplaceTensorFlowSliceWithStaticShapedSlice(Graph* g, Node* slice,
Node* static_shaped_slice) {
std::vector<const Edge*> slice_out_edges;
absl::c_copy(slice->out_edges(), std::back_inserter(slice_out_edges));
for (const Edge* e : slice_out_edges) {
DCHECK(e->src_output() == 0 || e->src_output() == Graph::kControlSlot);
int src_output = e->src_output();
int dst_input = e->dst_input();
Node* dst = e->dst();
g->RemoveEdge(e);
g->AddEdge(static_shaped_slice, src_output, dst, dst_input);
}
for (const Edge* e : slice->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), static_shaped_slice);
}
}
g->RemoveNode(slice);
}
Status RewriteSlice(Graph* g, Node* slice, const SliceInputs& slice_inputs,
absl::string_view cluster_name) {
VLOG(3) << "Rewriting slice " << slice->name()
<< " to a \"static shaped\" Slice";
Node* static_shaped_slice;
TF_RETURN_IF_ERROR(ConvertTensorFlowSliceToStaticShapedSlice(
g, slice, slice_inputs, cluster_name, &static_shaped_slice));
ReplaceTensorFlowSliceWithStaticShapedSlice(g, slice, static_shaped_slice);
return absl::OkStatus();
}
absl::StatusOr<bool> ShouldRewriteSlice(Node* n) {
if (n->type_string() != "Slice") {
return false;
}
if (!GetXlaClusterForNode(*n).has_value()) {
return false;
}
TF_ASSIGN_OR_RETURN(std::optional<SliceInputs> slice_inputs,
GetSliceInputs(n));
if (!slice_inputs.has_value()) {
return false;
}
bool slice_size_has_error =
absl::c_all_of(slice_inputs->size_as_vector,
[](int64_t size_i) { return size_i >= -1; });
if (!slice_size_has_error) {
return false;
}
return !slice_inputs->begin.node()->IsConstant();
}
Status FindAndRewriteSlices(Graph* g, bool* changed) {
std::vector<Node*> slices_to_rewrite;
for (Node* n : g->nodes()) {
TF_ASSIGN_OR_RETURN(bool is_rewritable, ShouldRewriteSlice(n));
if (is_rewritable) {
slices_to_rewrite.push_back(n);
}
}
for (Node* n : slices_to_rewrite) {
TF_ASSIGN_OR_RETURN(std::optional<SliceInputs> slice_inputs,
GetSliceInputs(n));
TF_RET_CHECK(slice_inputs.has_value());
TF_RETURN_IF_ERROR(
RewriteSlice(g, n, *slice_inputs, *GetXlaClusterForNode(*n)));
}
if (!slices_to_rewrite.empty()) {
FixupSourceAndSinkEdges(g);
}
*changed = !slices_to_rewrite.empty();
return absl::OkStatus();
}
}
Status IncreaseDynamismForAutoJitPass::Run(
const GraphOptimizationPassOptions& options) {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
if (flags->tf_xla_clustering_debug) {
DumpGraphToFile("before_increase_dynamism_for_auto_jit_pass",
**options.graph, options.flib_def);
}
bool changed;
TF_RETURN_IF_ERROR(FindAndRewriteSlices(options.graph->get(), &changed));
if (changed && flags->tf_xla_clustering_debug) {
DumpGraphToFile("increase_dynamism_for_auto_jit_pass", **options.graph,
options.flib_def);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.h"
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/compiler/jit/node_matchers.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace {
using ::testing::_;
using testing::matchers::AssignedDevice;
using testing::matchers::Attr;
using testing::matchers::Const;
using testing::matchers::CtrlDeps;
using testing::matchers::Inputs;
using testing::matchers::Name;
using testing::matchers::NodeWith;
using testing::matchers::Op;
using testing::matchers::Out;
class FakeDevice : public Device {
public:
explicit FakeDevice(const DeviceAttributes& device_attributes)
: Device(nullptr, device_attributes) {}
Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
static std::unique_ptr<Device> Make(const string& name, const string& type) {
DeviceAttributes device_attributes;
device_attributes.set_name(name);
device_attributes.set_device_type(DeviceType(type).type());
return std::make_unique<FakeDevice>(device_attributes);
}
};
const char* kHostName = "/job:worker/replica:0/task:0/device:CPU:0";
const char* kDeviceName = "/job:worker/replica:0/task:0/device:GPU:0";
Status IncreaseDynamismForAutoJit(const Scope& s,
std::unique_ptr<Graph>* result) {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(FakeDevice::Make(kDeviceName, DEVICE_GPU));
devices.push_back(FakeDevice::Make(kHostName, DEVICE_CPU));
std::unique_ptr<DeviceSet> device_set(new DeviceSet());
for (auto& device : devices) {
device_set->AddDevice(device.get());
}
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
GraphOptimizationPassOptions options;
options.graph = &graph;
options.device_set = device_set.get();
options.session_options = &session_options;
std::unordered_map<string, string> assigned_device_names;
for (Node* n : s.graph()->nodes()) {
assigned_device_names[n->name()] = n->assigned_device_name();
}
TF_RETURN_IF_ERROR(s.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
n->set_assigned_device_name(assigned_device_names[n->name()]);
}
IncreaseDynamismForAutoJitPass rewriter;
TF_RETURN_IF_ERROR(rewriter.Run(options));
*result = std::move(graph);
return absl::OkStatus();
}
TEST(SliceToDynamicSliceRewriteTest, Basic) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
const int64_t zero_64 = 0;
const int32_t zero_32 = 0;
const int64_t one_64 = 1;
auto m_input = Out(NodeWith(Op("Placeholder"), Name("input")));
auto m_begin_s64 = Out(NodeWith(
Op("Cast"), Inputs(Out(NodeWith(Op("Placeholder"), Name("begin"))))));
auto m_input_shape = Out(NodeWith(Op("Shape"), Inputs(m_input)));
auto m_slice_size_0 = Out(NodeWith(
Op("Sub"), AssignedDevice(kHostName),
Inputs(
Out(NodeWith(Op("Slice"), AssignedDevice(kHostName),
Inputs(m_input_shape, Const(zero_64), Const(one_64)))),
Out(NodeWith(Op("Slice"), AssignedDevice(kHostName),
Inputs(m_begin_s64, Const(zero_64), Const(one_64)))))));
auto m_dynamic_slice_size =
Out(NodeWith(Op("ConcatV2"), AssignedDevice(kHostName),
Inputs(m_slice_size_0, Const(static_cast<int64_t>(500)),
Const(zero_32))));
std::vector<string> compile_time_constant_inputs;
compile_time_constant_inputs.push_back("size");
auto m_dynamic_slice = NodeWith(
Op("Slice"), AssignedDevice(kDeviceName),
Attr(kXlaCompileTimeConstantInputsAttr, compile_time_constant_inputs),
Inputs(m_input, m_begin_s64, m_dynamic_slice_size));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(static_shaped_slice, m_dynamic_slice);
}
TEST(SliceToDynamicSliceRewriteTest, SliceFromVector) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
EXPECT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("ConcatV2")))));
}
TEST(SliceToDynamicSliceRewriteTest, ControlDependencePreserved) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output control_pred = ops::Placeholder(root.WithOpName("control"), DT_BOOL);
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
root.graph()->AddControlEdge(control_pred.node(), slice.node());
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(static_shaped_slice,
NodeWith(Op("Slice"),
CtrlDeps(NodeWith(Op("Placeholder"), Name("control")))));
}
int64_t ToInt64(int v) { return static_cast<int64_t>(v); }
TEST(SliceToDynamicSliceRewriteTest, Int64Indices) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size =
ops::Const(root.WithOpName("size"), {ToInt64(-1), ToInt64(500)});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("Cast")))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteInvalidSlice) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size_placeholder =
ops::Placeholder(root.WithOpName("size_placeholder"), DT_INT32);
Output slice =
ops::Slice(root.WithOpName("slice"), input, begin, size_placeholder);
Output size = ops::Const(root.WithOpName("size"), {-8, 500});
TF_ASSERT_OK(root.graph()->UpdateEdge(size.node(),
0,
slice.node(), 2));
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteUnclusteredSlice) {
Scope root =
Scope::NewRootScope().ExitOnError().WithAssignedDevice(kDeviceName);
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteSliceWithNonConstSize) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size = ops::Placeholder(root.WithOpName("size"), DT_INT64);
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, ScalarSlice) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size = ops::Const<int64_t>(root.WithOpName("size"), {});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(), "slice/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_THAT(static_shaped_slice,
NodeWith(Op("Slice"), Attr(kXlaCompileTimeConstantInputsAttr),
Inputs(_, _, Out(NodeWith(Name(size.node()->name()))))));
}
TEST(SliceToDynamicSliceRewriteTest, IndicesNotVector) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
auto ToInt64 = [](int v) { return static_cast<int64_t>(v); };
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64);
Output size_placeholder = ops::Placeholder(root.WithOpName("size"), DT_INT64);
Output slice =
ops::Slice(root.WithOpName("slice"), input, begin, size_placeholder);
Output size =
ops::Const(root.WithOpName("size"), {{ToInt64(-1)}, {ToInt64(500)}});
TF_ASSERT_OK(root.graph()->UpdateEdge(size.node(), 0, slice.node(), 2));
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
EXPECT_THAT(result->nodes(),
Not(Contains(NodeWith(Op("Slice"),
Attr(kXlaCompileTimeConstantInputsAttr)))));
}
TEST(SliceToDynamicSliceRewriteTest, SliceWithSliceInput) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size_a = ops::Const(root.WithOpName("size_a"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size_a);
Output size_b = ops::Const(root.WithOpName("size_a"), {-1, 200});
Output slice_with_slice_input = ops::Slice(
root.WithOpName("slice_with_slice_input"), slice, begin, size_b);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(),
"slice_with_slice_input/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_EQ(static_shaped_slice->output_type(0), DT_FLOAT)
<< "Expected DT_FLOAT, was "
<< DataType_Name(static_shaped_slice->output_type(0));
EXPECT_THAT(
static_shaped_slice,
NodeWith(
Op("Slice"),
Inputs(Out(NodeWith(
Op("Slice"),
Name("slice/static_shaped_slice/static_shaped_slice"))),
_, _)));
}
TEST(SliceToDynamicSliceRewriteTest, SliceWithSliceBegin) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input_float =
ops::Placeholder(root.WithOpName("input_float"), DT_FLOAT);
Output input_i64 = ops::Placeholder(root.WithOpName("input_i64"), DT_INT64);
Output begin_begin =
ops::Placeholder(root.WithOpName("begin_begin"), DT_INT32);
Output begin_size = ops::Const(root.WithOpName("begin_size"), {-1});
Output begin =
ops::Slice(root.WithOpName("begin"), input_i64, begin_begin, begin_size);
Output size =
ops::Const(root.WithOpName("size"), {ToInt64(-1), ToInt64(200)});
Output slice_with_slice_begin = ops::Slice(
root.WithOpName("slice_with_slice_begin"), input_float, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* static_shaped_slice = testing::FindNodeByName(
result.get(),
"slice_with_slice_begin/static_shaped_slice/static_shaped_slice");
ASSERT_NE(static_shaped_slice, nullptr);
EXPECT_EQ(static_shaped_slice->output_type(0), DT_FLOAT)
<< "Expected DT_FLOAT, was "
<< DataType_Name(static_shaped_slice->output_type(0));
EXPECT_THAT(
static_shaped_slice,
NodeWith(
Op("Slice"),
Inputs(_,
Out(NodeWith(
Op("Slice"),
Name("begin/static_shaped_slice/static_shaped_slice"))),
_)));
}
TEST(SliceToDynamicSliceRewriteTest, WithControlDepsToConstant) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32);
Output size = ops::Const(root.WithOpName("size"), {-1});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
Output dependency = ops::Placeholder(root.WithOpName("dependency"), DT_BOOL);
root.graph()->AddControlEdge(dependency.node(), size.node());
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* const_0 = testing::FindNodeByName(result.get(),
"slice/static_shaped_slice/const_0");
EXPECT_NE(const_0, nullptr);
EXPECT_THAT(const_0,
NodeWith(Op("Const"), CtrlDeps(NodeWith(Op("Placeholder"),
Name("dependency")))));
}
TEST(SliceToDynamicSliceRewriteTest, DontRewriteSliceWithConstBegin) {
Scope root = Scope::NewRootScope()
.ExitOnError()
.WithAssignedDevice(kDeviceName)
.WithXlaCluster("cluster_0");
Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT);
Output begin = ops::Const(root.WithOpName("begin"), {10, 10});
Output size = ops::Const(root.WithOpName("size"), {-1, 500});
Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result));
Node* slice_node = testing::FindNodeByName(result.get(), "slice");
EXPECT_THAT(slice_node,
NodeWith(Op("Slice"), Inputs(Out(NodeWith(Op("Placeholder"))),
Out(NodeWith(Op("Const"))),
Out(NodeWith(Op("Const"))))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb500ced-65dc-4fb0-889f-5da1a2768a2a | cpp | tensorflow/tensorflow | alias_analysis | third_party/xla/xla/service/llvm_ir/alias_analysis.cc | third_party/xla/xla/service/llvm_ir/alias_analysis_test.cc | #include "xla/service/llvm_ir/alias_analysis.h"
#include <map>
#include "absl/container/flat_hash_set.h"
#include "llvm/IR/MDBuilder.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/hlo_value.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_type_conversion_util.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace llvm_ir {
static const BufferAllocation* kParameterAllocation = new BufferAllocation(
-1, 0, LogicalBuffer::Color(0));
void AliasAnalysis::AddAliasingInformationToIrArray(const HloInstruction& hlo,
llvm_ir::IrArray* array,
const ShapeIndex& index) {
BufferAllocation::Slice buffer_slice;
if (hlo.opcode() == HloOpcode::kParameter &&
hlo.parent() == module_.entry_computation()) {
buffer_slice = BufferAllocation::Slice(kParameterAllocation, 0, 0);
} else {
auto unique_slice = assignment_.GetUniqueSlice(&hlo, index);
if (!unique_slice.ok()) {
return;
}
buffer_slice = unique_slice.value();
}
if (module_.config().debug_options().xla_llvm_enable_alias_scope_metadata()) {
llvm::MDNode*& alias_scope_md = alias_scope_metadata_[buffer_slice];
if (alias_scope_md == nullptr) {
alias_scope_md =
GetAliasScopeMetadataForBuffer(buffer_slice, GetAliasDomain());
}
if (alias_scope_md != nullptr) {
array->AddAliasScopeMetadata(alias_scope_md);
}
}
if (module_.config().debug_options().xla_llvm_enable_noalias_metadata()) {
llvm::MDNode*& noalias_md = noalias_metadata_[{buffer_slice, &hlo}];
if (noalias_md == nullptr) {
noalias_md = GetNoaliasMetadataForBuffer(buffer_slice, GetAliasDomain(),
assignment_, hlo);
}
if (noalias_md != nullptr) {
array->AddNoaliasMetadata(noalias_md);
}
}
if (module_.config()
.debug_options()
.xla_llvm_enable_invariant_load_metadata()) {
if (hlo.opcode() == HloOpcode::kParameter &&
hlo.parent() == module_.entry_computation()) {
array->MarkInvariantOverWholeProgram(context_);
}
}
}
llvm::MDNode* AliasAnalysis::GetAliasDomain() {
llvm::MDBuilder metadata_builder(*context_);
if (alias_domain_ == nullptr) {
alias_domain_ =
metadata_builder.createAliasScopeDomain("XLA global AA domain");
}
return alias_domain_;
}
llvm::MDNode* AliasAnalysis::GetAliasScopeMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain) {
if (buffer_slice.allocation() == kParameterAllocation) {
return nullptr;
}
llvm::MDBuilder metadata_builder(domain->getContext());
llvm::MDNode* scope = metadata_builder.createAliasScope(
"buffer: " + buffer_slice.ToString(), domain);
llvm::MDNode* scope_list = llvm::MDNode::get(domain->getContext(), scope);
return scope_list;
}
llvm::MDNode* AliasAnalysis::GetNoaliasMetadataForBuffer(
const BufferAllocation::Slice& buffer_slice, llvm::MDNode* domain,
const BufferAssignment& assignment, const HloInstruction& hlo) {
std::vector<const HloValue*> worklist;
absl::flat_hash_set<const HloInstruction*> added_to_worklist;
auto add_buffers_to_worklist =
[&](const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kParameter) {
return;
}
if (added_to_worklist.contains(instruction)) {
return;
}
added_to_worklist.insert(instruction);
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&](const Shape& , const ShapeIndex& index) {
for (const HloValue* buffer :
assignment.GetSourceBuffers(instruction, index)) {
if (assignment.HasAllocation(*buffer)) {
worklist.push_back(buffer);
}
}
});
};
for (HloInstruction* user : hlo.users()) {
add_buffers_to_worklist(user);
for (HloInstruction* operand : user->operands()) {
add_buffers_to_worklist(operand);
}
}
add_buffers_to_worklist(&hlo);
for (HloInstruction* operand : hlo.operands()) {
add_buffers_to_worklist(operand);
}
std::set<BufferAllocation::Slice> buffers;
for (const HloValue* buffer : worklist) {
const BufferAllocation::Slice noalias_slice =
assignment.GetAssignedAllocation(*buffer).GetSlice(*buffer);
if (!buffer_slice.OverlapsWith(noalias_slice)) {
buffers.insert(noalias_slice);
constexpr int kMaxNoAliasSetSize = 500;
if (buffers.size() >= kMaxNoAliasSetSize) {
break;
}
}
}
if (buffers.empty()) {
return nullptr;
}
llvm::MDBuilder metadata_builder(domain->getContext());
std::vector<llvm::Metadata*> scopes;
for (const BufferAllocation::Slice noalias_slice : buffers) {
llvm::MDNode* scope = metadata_builder.createAliasScope(
"buffer: " + noalias_slice.ToString(), domain);
scopes.push_back(scope);
}
llvm::MDNode* noalias_list =
llvm::MDNode::get(domain->getContext(), AsArrayRef(scopes));
return noalias_list;
}
}
} | #include "absl/status/status.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/service/cpu/tests/cpu_codegen_test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
class AliasAnalysisTest : public CpuCodegenTest {
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
};
static absl::Status FakeCustomCallTarget(ffi::AnyBuffer,
ffi::Result<ffi::AnyBuffer>) {
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFakeCustomCallTarget, FakeCustomCallTarget,
ffi::Ffi::Bind()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"__xla_test$$FakeCustomCallTarget", "Host",
kFakeCustomCallTarget);
TEST_F(AliasAnalysisTest, EmbeddedComputationParamsMayAliasTemps) {
const char* hlo_string = R"(
HloModule while
body {
const.0.125 = f32[] constant(0.125)
body.state = f32[] parameter(0)
ROOT add.2.2 = f32[] add(const.0.125, body.state)
}
condition {
const.100 = f32[] constant(100)
condition.state = f32[] parameter(0)
addend = f32[] custom-call(condition.state), custom_call_target="__xla_test$$FakeCustomCallTarget", api_version=API_VERSION_TYPED_FFI
add = f32[] add(addend, condition.state)
ROOT greater-than = pred[] compare(const.100, add), direction=GT
}
ENTRY while3 {
const.0 = f32[] constant(0)
ROOT while = f32[] while(const.0), condition=condition, body=body
}
)";
CompileAndVerifyIr(hlo_string, R"(
; CHECK-LABEL: @body(ptr %retval
; CHECK: %[[add_result:.*]] = fadd float %[[fadd_lhs:.*]], %[[fadd_rhs:.*]]
; CHECK: store float %[[add_result]], ptr %[[store_dest:.*]], align 4, !alias.scope ![[alias_scope_md_for_store:[0-9]+]]
;
; CHECK-LABEL: @condition(ptr %retval, ptr noalias %run_options, ptr noalias %params
; CHECK: %[[cond_state_buf_ptr:.*]] = getelementptr inbounds ptr, ptr %buffer_table, i64 0
; CHECK: %[[cond_state_buf_untyped:.*]] = load ptr, ptr %[[cond_state_buf_ptr]]
; CHECK: load float, ptr %[[cond_state_buf_untyped]], align 4, !alias.scope ![[alias_scope_md_for_store]], !noalias ![[noalias_md_for_load:.*]]
;
; CHECK-LABEL: @while3(
![[alias_scope_md_for_store]] = !{![[buffer_idx_0:.*]]}
![[buffer_idx_0]] = !{!"buffer: {index:0, offset:0, size:4}", ![[aa_md_root:.*]]}
![[aa_md_root]] = !{!"XLA global AA domain"}
![[buffer_idx_1:.*]] = !{!"buffer: {index:1, offset:0, size:4}", !3}
![[buffer_idx_1_offset_16:.*]] = !{!"buffer: {index:1, offset:16, size:1}", !3}
![[noalias_md_for_load]] = !{![[buffer_idx_1_offset_16]], ![[buffer_idx_1]]}
}
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/alias_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/llvm_ir/alias_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5da530f5-75a9-4bfc-ad08-cd8396413f6e | cpp | google/quiche | moqt_session | quiche/quic/moqt/moqt_session.cc | quiche/quic/moqt/moqt_session_test.cc | #include "quiche/quic/moqt/moqt_session.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/moqt/moqt_framer.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/moqt_parser.h"
#include "quiche/quic/moqt/moqt_priority.h"
#include "quiche/quic/moqt/moqt_publisher.h"
#include "quiche/quic/moqt/moqt_subscribe_windows.h"
#include "quiche/quic/moqt/moqt_track.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_stream.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/web_transport/web_transport.h"
#define ENDPOINT \
(perspective() == Perspective::IS_SERVER ? "MoQT Server: " : "MoQT Client: ")
namespace moqt {
namespace {
using ::quic::Perspective;
constexpr MoqtPriority kDefaultSubscriberPriority = 0x80;
constexpr webtransport::SendGroupId kMoqtSendGroupId = 0;
bool PublisherHasData(const MoqtTrackPublisher& publisher) {
absl::StatusOr<MoqtTrackStatusCode> status = publisher.GetTrackStatus();
return status.ok() && DoesTrackStatusImplyHavingData(*status);
}
SubscribeWindow SubscribeMessageToWindow(const MoqtSubscribe& subscribe,
MoqtTrackPublisher& publisher) {
const FullSequence sequence = PublisherHasData(publisher)
? publisher.GetLargestSequence()
: FullSequence{0, 0};
switch (GetFilterType(subscribe)) {
case MoqtFilterType::kLatestGroup:
return SubscribeWindow(sequence.group, 0);
case MoqtFilterType::kLatestObject:
return SubscribeWindow(sequence.group, sequence.object);
case MoqtFilterType::kAbsoluteStart:
return SubscribeWindow(*subscribe.start_group, *subscribe.start_object);
case MoqtFilterType::kAbsoluteRange:
return SubscribeWindow(*subscribe.start_group, *subscribe.start_object,
*subscribe.end_group, *subscribe.end_object);
case MoqtFilterType::kNone:
QUICHE_BUG(MoqtSession_Subscription_invalid_filter_passed);
return SubscribeWindow(0, 0);
}
}
class DefaultPublisher : public MoqtPublisher {
public:
static DefaultPublisher* GetInstance() {
static DefaultPublisher* instance = new DefaultPublisher();
return instance;
}
absl::StatusOr<std::shared_ptr<MoqtTrackPublisher>> GetTrack(
const FullTrackName& track_name) override {
return absl::NotFoundError("No tracks published");
}
};
}
MoqtSession::MoqtSession(webtransport::Session* session,
MoqtSessionParameters parameters,
MoqtSessionCallbacks callbacks)
: session_(session),
parameters_(parameters),
callbacks_(std::move(callbacks)),
framer_(quiche::SimpleBufferAllocator::Get(), parameters.using_webtrans),
publisher_(DefaultPublisher::GetInstance()),
local_max_subscribe_id_(parameters.max_subscribe_id),
liveness_token_(std::make_shared<Empty>()) {}
MoqtSession::ControlStream* MoqtSession::GetControlStream() {
if (!control_stream_.has_value()) {
return nullptr;
}
webtransport::Stream* raw_stream = session_->GetStreamById(*control_stream_);
if (raw_stream == nullptr) {
return nullptr;
}
return static_cast<ControlStream*>(raw_stream->visitor());
}
void MoqtSession::SendControlMessage(quiche::QuicheBuffer message) {
ControlStream* control_stream = GetControlStream();
if (control_stream == nullptr) {
QUICHE_LOG(DFATAL) << "Trying to send a message on the control stream "
"while it does not exist";
return;
}
control_stream->SendOrBufferMessage(std::move(message));
}
void MoqtSession::OnSessionReady() {
QUICHE_DLOG(INFO) << ENDPOINT << "Underlying session ready";
if (parameters_.perspective == Perspective::IS_SERVER) {
return;
}
webtransport::Stream* control_stream =
session_->OpenOutgoingBidirectionalStream();
if (control_stream == nullptr) {
Error(MoqtError::kInternalError, "Unable to open a control stream");
return;
}
control_stream->SetVisitor(
std::make_unique<ControlStream>(this, control_stream));
control_stream_ = control_stream->GetStreamId();
MoqtClientSetup setup = MoqtClientSetup{
.supported_versions = std::vector<MoqtVersion>{parameters_.version},
.role = MoqtRole::kPubSub,
.max_subscribe_id = parameters_.max_subscribe_id,
.supports_object_ack = parameters_.support_object_acks,
};
if (!parameters_.using_webtrans) {
setup.path = parameters_.path;
}
SendControlMessage(framer_.SerializeClientSetup(setup));
QUIC_DLOG(INFO) << ENDPOINT << "Send the SETUP message";
}
void MoqtSession::OnSessionClosed(webtransport::SessionErrorCode,
const std::string& error_message) {
if (!error_.empty()) {
return;
}
QUICHE_DLOG(INFO) << ENDPOINT << "Underlying session closed with message: "
<< error_message;
error_ = error_message;
std::move(callbacks_.session_terminated_callback)(error_message);
}
void MoqtSession::OnIncomingBidirectionalStreamAvailable() {
while (webtransport::Stream* stream =
session_->AcceptIncomingBidirectionalStream()) {
if (control_stream_.has_value()) {
Error(MoqtError::kProtocolViolation, "Bidirectional stream already open");
return;
}
stream->SetVisitor(std::make_unique<ControlStream>(this, stream));
stream->visitor()->OnCanRead();
}
}
void MoqtSession::OnIncomingUnidirectionalStreamAvailable() {
while (webtransport::Stream* stream =
session_->AcceptIncomingUnidirectionalStream()) {
stream->SetVisitor(std::make_unique<IncomingDataStream>(this, stream));
stream->visitor()->OnCanRead();
}
}
void MoqtSession::OnDatagramReceived(absl::string_view datagram) {
MoqtObject message;
absl::string_view payload = ParseDatagram(datagram, message);
QUICHE_DLOG(INFO) << ENDPOINT
<< "Received OBJECT message in datagram for subscribe_id "
<< message.subscribe_id << " for track alias "
<< message.track_alias << " with sequence "
<< message.group_id << ":" << message.object_id
<< " priority " << message.publisher_priority << " length "
<< payload.size();
auto [full_track_name, visitor] = TrackPropertiesFromAlias(message);
if (visitor != nullptr) {
visitor->OnObjectFragment(full_track_name, message.group_id,
message.object_id, message.publisher_priority,
message.object_status,
message.forwarding_preference, payload, true);
}
}
void MoqtSession::Error(MoqtError code, absl::string_view error) {
if (!error_.empty()) {
return;
}
QUICHE_DLOG(INFO) << ENDPOINT << "MOQT session closed with code: "
<< static_cast<int>(code) << " and message: " << error;
error_ = std::string(error);
session_->CloseSession(static_cast<uint64_t>(code), error);
std::move(callbacks_.session_terminated_callback)(error);
}
void MoqtSession::Announce(FullTrackName track_namespace,
MoqtOutgoingAnnounceCallback announce_callback) {
if (peer_role_ == MoqtRole::kPublisher) {
std::move(announce_callback)(
track_namespace,
MoqtAnnounceErrorReason{MoqtAnnounceErrorCode::kInternalError,
"ANNOUNCE cannot be sent to Publisher"});
return;
}
if (pending_outgoing_announces_.contains(track_namespace)) {
std::move(announce_callback)(
track_namespace,
MoqtAnnounceErrorReason{
MoqtAnnounceErrorCode::kInternalError,
"ANNOUNCE message already outstanding for namespace"});
return;
}
MoqtAnnounce message;
message.track_namespace = track_namespace;
SendControlMessage(framer_.SerializeAnnounce(message));
QUIC_DLOG(INFO) << ENDPOINT << "Sent ANNOUNCE message for "
<< message.track_namespace;
pending_outgoing_announces_[track_namespace] = std::move(announce_callback);
}
bool MoqtSession::SubscribeAbsolute(const FullTrackName& name,
uint64_t start_group, uint64_t start_object,
RemoteTrack::Visitor* visitor,
MoqtSubscribeParameters parameters) {
MoqtSubscribe message;
message.full_track_name = name;
message.subscriber_priority = kDefaultSubscriberPriority;
message.group_order = std::nullopt;
message.start_group = start_group;
message.start_object = start_object;
message.end_group = std::nullopt;
message.end_object = std::nullopt;
message.parameters = std::move(parameters);
return Subscribe(message, visitor);
}
bool MoqtSession::SubscribeAbsolute(const FullTrackName& name,
uint64_t start_group, uint64_t start_object,
uint64_t end_group,
RemoteTrack::Visitor* visitor,
MoqtSubscribeParameters parameters) {
if (end_group < start_group) {
QUIC_DLOG(ERROR) << "Subscription end is before beginning";
return false;
}
MoqtSubscribe message;
message.full_track_name = name;
message.subscriber_priority = kDefaultSubscriberPriority;
message.group_order = std::nullopt;
message.start_group = start_group;
message.start_object = start_object;
message.end_group = end_group;
message.end_object = std::nullopt;
message.parameters = std::move(parameters);
return Subscribe(message, visitor);
}
bool MoqtSession::SubscribeAbsolute(const FullTrackName& name,
uint64_t start_group, uint64_t start_object,
uint64_t end_group, uint64_t end_object,
RemoteTrack::Visitor* visitor,
MoqtSubscribeParameters parameters) {
if (end_group < start_group) {
QUIC_DLOG(ERROR) << "Subscription end is before beginning";
return false;
}
if (end_group == start_group && end_object < start_object) {
QUIC_DLOG(ERROR) << "Subscription end is before beginning";
return false;
}
MoqtSubscribe message;
message.full_track_name = name;
message.subscriber_priority = kDefaultSubscriberPriority;
message.group_order = std::nullopt;
message.start_group = start_group;
message.start_object = start_object;
message.end_group = end_group;
message.end_object = end_object;
message.parameters = std::move(parameters);
return Subscribe(message, visitor);
}
bool MoqtSession::SubscribeCurrentObject(const FullTrackName& name,
RemoteTrack::Visitor* visitor,
MoqtSubscribeParameters parameters) {
MoqtSubscribe message;
message.full_track_name = name;
message.subscriber_priority = kDefaultSubscriberPriority;
message.group_order = std::nullopt;
message.start_group = std::nullopt;
message.start_object = std::nullopt;
message.end_group = std::nullopt;
message.end_object = std::nullopt;
message.parameters = std::move(parameters);
return Subscribe(message, visitor);
}
bool MoqtSession::SubscribeCurrentGroup(const FullTrackName& name,
RemoteTrack::Visitor* visitor,
MoqtSubscribeParameters parameters) {
MoqtSubscribe message;
message.full_track_name = name;
message.subscriber_priority = kDefaultSubscriberPriority;
message.group_order = std::nullopt;
message.start_group = std::nullopt;
message.start_object = 0;
message.end_group = std::nullopt;
message.end_object = std::nullopt;
message.parameters = std::move(parameters);
return Subscribe(message, visitor);
}
bool MoqtSession::SubscribeIsDone(uint64_t subscribe_id, SubscribeDoneCode code,
absl::string_view reason_phrase) {
auto it = published_subscriptions_.find(subscribe_id);
if (it == published_subscriptions_.end()) {
return false;
}
PublishedSubscription& subscription = *it->second;
std::vector<webtransport::StreamId> streams_to_reset =
subscription.GetAllStreams();
MoqtSubscribeDone subscribe_done;
subscribe_done.subscribe_id = subscribe_id;
subscribe_done.status_code = code;
subscribe_done.reason_phrase = reason_phrase;
subscribe_done.final_id = subscription.largest_sent();
SendControlMessage(framer_.SerializeSubscribeDone(subscribe_done));
QUIC_DLOG(INFO) << ENDPOINT << "Sent SUBSCRIBE_DONE message for "
<< subscribe_id;
published_subscriptions_.erase(it);
for (webtransport::StreamId stream_id : streams_to_reset) {
webtransport::Stream* stream = session_->GetStreamById(stream_id);
if (stream == nullptr) {
continue;
}
stream->ResetWithUserCode(kResetCodeSubscriptionGone);
}
return true;
}
bool MoqtSession::Subscribe(MoqtSubscribe& message,
RemoteTrack::Visitor* visitor) {
if (peer_role_ == MoqtRole::kSubscriber) {
QUIC_DLOG(INFO) << ENDPOINT << "Tried to send SUBSCRIBE to subscriber peer";
return false;
}
if (next_subscribe_id_ > peer_max_subscribe_id_) {
QUIC_DLOG(INFO) << ENDPOINT << "Tried to send SUBSCRIBE with ID "
<< next_subscribe_id_
<< " which is greater than the maximum ID "
<< peer_max_subscribe_id_;
return false;
}
message.subscribe_id = next_subscribe_id_++;
auto it = remote_track_aliases_.find(message.full_track_name);
if (it != remote_track_aliases_.end()) {
message.track_alias = it->second;
if (message.track_alias >= next_remote_track_alias_) {
next_remote_track_alias_ = message.track_alias + 1;
}
} else {
message.track_alias = next_remote_track_alias_++;
}
if (SupportsObjectAck() && visitor != nullptr) {
visitor->OnCanAckObjects(absl::bind_front(&MoqtSession::SendObjectAck, this,
message.subscribe_id));
} else {
QUICHE_DLOG_IF(WARNING, message.parameters.object_ack_window.has_value())
<< "Attempting to set object_ack_window on a connection that does not "
"support it.";
message.parameters.object_ack_window = std::nullopt;
}
SendControlMessage(framer_.SerializeSubscribe(message));
QUIC_DLOG(INFO) << ENDPOINT << "Sent SUBSCRIBE message for "
<< message.full_track_name;
active_subscribes_.try_emplace(message.subscribe_id, message, visitor);
return true;
}
webtransport::Stream* MoqtSession::OpenOrQueueDataStream(
uint64_t subscription_id, FullSequence first_object) {
auto it = published_subscriptions_.find(subscription_id);
if (it == published_subscriptions_.end()) {
return nullptr;
}
PublishedSubscription& subscription = *it->second;
if (!session_->CanOpenNextOutgoingUnidirectionalStream()) {
subscription.AddQueuedOutgoingDataStream(first_object);
return nullptr;
}
return OpenDataStream(subscription, first_object);
}
webtransport::Stream* MoqtSession::OpenDataStream(
PublishedSubscription& subscription, FullSequence first_object) {
webtransport::Stream* new_stream =
session_->OpenOutgoingUnidirectionalStream();
if (new_stream == nullptr) {
QUICHE_BUG(MoqtSession_OpenDataStream_blocked)
<< "OpenDataStream called when creation of new streams is blocked.";
return nullptr;
}
new_stream->SetVisitor(std::make_unique<OutgoingDataStream>(
this, new_stream, subscription, first_object));
subscription.OnDataStreamCreated(new_stream->GetStreamId(), first_object);
return new_stream;
}
void MoqtSession::OnCanCreateNewOutgoingUnidirectionalStream() {
while (!subscribes_with_queued_outgoing_data_streams_.empty() &&
session_->CanOpenNextOutgoingUnidirectionalStream()) {
auto next = subscribes_with_queued_outgoing_data_streams_.rbegin();
auto subscription = published_subscriptions_.find(next->subscription_id);
if (subscription == published_subscriptions_.end()) {
subscribes_with_queued_outgoing_data_streams_.erase((++next).base());
continue;
}
webtransport::Stream* stream =
OpenDataStream(*subscription->second,
subscription->second->NextQueuedOutgoingDataStream());
if (stream != nullptr) {
stream->visitor()->OnCanWrite();
}
}
}
void MoqtSession::UpdateQueuedSendOrder(
uint64_t subscribe_id,
std::optional<webtransport::SendOrder> old_send_order,
std::optional<webtransport::SendOrder> new_send_order) {
if (old_send_order == new_send_order) {
return;
}
if (old_send_order.has_value()) {
subscribes_with_queued_outgoing_data_streams_.erase(
SubscriptionWithQueuedStream{*old_send_order, subscribe_id});
}
if (new_send_order.has_value()) {
subscribes_with_queued_outgoing_data_streams_.emplace(*new_send_order,
subscribe_id);
}
}
void MoqtSession::GrantMoreSubscribes(uint64_t num_subscribes) {
local_max_subscribe_id_ += num_subscribes;
MoqtMaxSubscribeId message;
message.max_subscribe_id = local_max_subscribe_id_;
SendControlMessage(framer_.SerializeMaxSubscribeId(message));
}
std::pair<FullTrackName, RemoteTrack::Visitor*>
MoqtSession::TrackPropertiesFromAlias(const MoqtObject& message) {
auto it = remote_tracks_.find(message.track_alias);
RemoteTrack::Visitor* visitor = nullptr;
if (it == remote_tracks_.end()) {
auto subscribe_it = active_subscribes_.find(message.subscribe_id);
if (subscribe_it == active_subscribes_.end()) {
return std::pair<FullTrackName, RemoteTrack::Visitor*>(
{FullTrackName{}, nullptr});
}
ActiveSubscribe& subscribe = subscribe_it->second;
visitor = subscribe.visitor;
subscribe.received_object = true;
if (subscribe.forwarding_preference.has_value()) {
if (message.forwarding_preference != *subscribe.forwarding_preference) {
Error(MoqtError::kProtocolViolation,
"Forwarding preference changes mid-track");
return std::pair<FullTrackName, RemoteTrack::Visitor*>(
{FullTrackName{}, nullptr});
}
} else {
subscribe.forwarding_preference = message.forwarding_preference;
}
return std::make_pair(subscribe.message.full_track_name, subscribe.visitor);
}
RemoteTrack& track = it->second;
if (!track.CheckForwardingPreference(message.forwarding_preference)) {
Error(MoqtError::kProtocolViolation,
"Forwarding preference changes mid-track");
return std::pair<FullTrackName, RemoteTrack::Visitor*>(
{FullTrackName{}, nullptr});
}
return std::make_pair(track.full_track_name(), track.visitor());
}
template <class Parser>
static void ForwardStreamDataToParser(webtransport::Stream& stream,
Parser& parser) {
bool fin =
quiche::ProcessAllReadableRegions(stream, [&](absl::string_view chunk) {
parser.ProcessData(chunk, false);
});
if (fin) {
parser.ProcessData("", true);
}
}
MoqtSession::ControlStream::ControlStream(MoqtSession* session,
webtransport::Stream* stream)
: session_(session),
stream_(stream),
parser_(session->parameters_.using_webtrans, *this) {
stream_->SetPriority(
webtransport::StreamPriority{kMoqtSendGroupId,
kMoqtControlStreamSendOrder});
}
void MoqtSession::ControlStream::OnCanRead() {
ForwardStreamDataToParser(*stream_, parser_);
}
void MoqtSession::ControlStream::OnCanWrite() {
}
void MoqtSession::ControlStream::OnResetStreamReceived(
webtransport::StreamErrorCode error) {
session_->Error(MoqtError::kProtocolViolation,
absl::StrCat("Control stream reset with error code ", error));
}
void MoqtSession::ControlStream::OnStopSendingReceived(
webtransport::StreamErrorCode error) {
session_->Error(MoqtError::kProtocolViolation,
absl::StrCat("Control stream reset with error code ", error));
}
void MoqtSession::ControlStream::OnClientSetupMessage(
const MoqtClientSetup& message) {
session_->control_stream_ = stream_->GetStreamId();
if (perspective() == Perspective::IS_CLIENT) {
session_->Error(MoqtError::kProtocolViolation,
"Received CLIENT_SETUP from server");
return;
}
if (absl::c_find(message.supported_versions, session_->parameters_.version) ==
message.supported_versions.end()) {
session_->Error(MoqtError::kProtocolViolation,
absl::StrCat("Version mismatch: expected 0x",
absl::Hex(session_->parameters_.version)));
return;
}
session_->peer_supports_object_ack_ = message.supports_object_ack;
QUICHE_DLOG(INFO) << ENDPOINT << "Received the SETUP message";
if (session_->parameters_.perspective == Perspective::IS_SERVER) {
MoqtServerSetup response;
response.selected_version = session_->parameters_.version;
response.role = MoqtRole::kPubSub;
response.max_subscribe_id = session_->parameters_.max_subscribe_id;
response.supports_object_ack = session_->parameters_.support_object_acks;
SendOrBufferMessage(session_->framer_.SerializeServerSetup(response));
QUIC_DLOG(INFO) << ENDPOINT << "Sent the SETUP message";
}
if (message.max_subscribe_id.has_value()) {
session_->peer_max_subscribe_id_ = *message.max_subscribe_id;
}
std::move(session_->callbacks_.session_established_callback)();
session_->peer_role_ = *message.role;
}
void MoqtSession::ControlStream::OnServerSetupMessage(
const MoqtServerSetup& message) {
if (perspective() == Perspective::IS_SERVER) {
session_->Error(MoqtError::kProtocolViolation,
"Received SERVER_SETUP from client");
return;
}
if (message.selected_version != session_->parameters_.version) {
session_->Error(MoqtError::kProtocolViolation,
absl::StrCat("Version mismatch: expected 0x",
absl::Hex(session_->parameters_.version)));
return;
}
session_->peer_supports_object_ack_ = message.supports_object_ack;
QUIC_DLOG(INFO) << ENDPOINT << "Received the SETUP message";
if (message.max_subscribe_id.has_value()) {
session_->peer_max_subscribe_id_ = *message.max_subscribe_id;
}
std::move(session_->callbacks_.session_established_callback)();
session_->peer_role_ = *message.role;
}
void MoqtSession::ControlStream::SendSubscribeError(
const MoqtSubscribe& message, SubscribeErrorCode error_code,
absl::string_view reason_phrase, uint64_t track_alias) {
MoqtSubscribeError subscribe_error;
subscribe_error.subscribe_id = message.subscribe_id;
subscribe_error.error_code = error_code;
subscribe_error.reason_phrase = reason_phrase;
subscribe_error.track_alias = track_alias;
SendOrBufferMessage(
session_->framer_.SerializeSubscribeError(subscribe_error));
}
void MoqtSession::ControlStream::OnSubscribeMessage(
const MoqtSubscribe& message) {
if (session_->peer_role_ == MoqtRole::kPublisher) {
QUIC_DLOG(INFO) << ENDPOINT << "Publisher peer sent SUBSCRIBE";
session_->Error(MoqtError::kProtocolViolation,
"Received SUBSCRIBE from publisher");
return;
}
if (message.subscribe_id > session_->local_max_subscribe_id_) {
QUIC_DLOG(INFO) << ENDPOINT << "Received SUBSCRIBE with too large ID";
session_->Error(MoqtError::kTooManySubscribes,
"Received SUBSCRIBE with too large ID");
return;
}
QUIC_DLOG(INFO) << ENDPOINT << "Received a SUBSCRIBE for "
<< message.full_track_name;
const FullTrackName& track_name = message.full_track_name;
absl::StatusOr<std::shared_ptr<MoqtTrackPublisher>> track_publisher =
session_->publisher_->GetTrack(track_name);
if (!track_publisher.ok()) {
QUIC_DLOG(INFO) << ENDPOINT << "SUBSCRIBE for " << track_name
<< " rejected by the application: "
<< track_publisher.status();
SendSubscribeError(message, SubscribeErrorCode::kTrackDoesNotExist,
track_publisher.status().message(), message.track_alias);
return;
}
std::optional<FullSequence> largest_id;
if (PublisherHasData(**track_publisher)) {
largest_id = (*track_publisher)->GetLargestSequence();
}
MoqtDeliveryOrder delivery_order = (*track_publisher)->GetDeliveryOrder();
MoqtPublishingMonitorInterface* monitoring = nullptr;
auto monitoring_it =
session_->monitoring_interfaces_for_published_tracks_.find(track_name);
if (monitoring_it !=
session_->monitoring_interfaces_for_published_tracks_.end()) {
monitoring = monitoring_it->second;
session_->monitoring_interfaces_for_published_tracks_.erase(monitoring_it);
}
auto subscription = std::make_unique<MoqtSession::PublishedSubscription>(
session_, *std::move(track_publisher), message, monitoring);
auto [it, success] = session_->published_subscriptions_.emplace(
message.subscribe_id, std::move(subscription));
if (!success) {
SendSubscribeError(message, SubscribeErrorCode::kInternalError,
"Duplicate subscribe ID", message.track_alias);
}
MoqtSubscribeOk subscribe_ok;
subscribe_ok.subscribe_id = message.subscribe_id;
subscribe_ok.group_order = delivery_order;
subscribe_ok.largest_id = largest_id;
SendOrBufferMessage(session_->framer_.SerializeSubscribeOk(subscribe_ok));
if (largest_id.has_value()) {
it->second->Backfill();
}
}
void MoqtSession::ControlStream::OnSubscribeOkMessage(
const MoqtSubscribeOk& message) {
auto it = session_->active_subscribes_.find(message.subscribe_id);
if (it == session_->active_subscribes_.end()) {
session_->Error(MoqtError::kProtocolViolation,
"Received SUBSCRIBE_OK for nonexistent subscribe");
return;
}
MoqtSubscribe& subscribe = it->second.message;
QUIC_DLOG(INFO) << ENDPOINT << "Received the SUBSCRIBE_OK for "
<< "subscribe_id = " << message.subscribe_id << " "
<< subscribe.full_track_name;
RemoteTrack::Visitor* visitor = it->second.visitor;
auto [track_iter, new_entry] = session_->remote_tracks_.try_emplace(
subscribe.track_alias, subscribe.full_track_name, subscribe.track_alias,
visitor);
if (it->second.forwarding_preference.has_value()) {
if (!track_iter->second.CheckForwardingPreference(
*it->second.forwarding_preference)) {
session_->Error(MoqtError::kProtocolViolation,
"Forwarding preference different in early objects");
return;
}
}
if (visitor != nullptr) {
visitor->OnReply(subscribe.full_track_name, std::nullopt);
}
session_->active_subscribes_.erase(it);
}
void MoqtSession::ControlStream::OnSubscribeErrorMessage(
const MoqtSubscribeError& message) {
auto it = session_->active_subscribes_.find(message.subscribe_id);
if (it == session_->active_subscribes_.end()) {
session_->Error(MoqtError::kProtocolViolation,
"Received SUBSCRIBE_ERROR for nonexistent subscribe");
return;
}
if (it->second.received_object) {
session_->Error(MoqtError::kProtocolViolation,
"Received SUBSCRIBE_ERROR after object");
return;
}
MoqtSubscribe& subscribe = it->second.message;
QUIC_DLOG(INFO) << ENDPOINT << "Received the SUBSCRIBE_ERROR for "
<< "subscribe_id = " << message.subscribe_id << " ("
<< subscribe.full_track_name << ")"
<< ", error = " << static_cast<int>(message.error_code)
<< " (" << message.reason_phrase << ")";
RemoteTrack::Visitor* visitor = it->second.visitor;
if (message.error_code == SubscribeErrorCode::kRetryTrackAlias) {
session_->remote_track_aliases_[subscribe.full_track_name] =
message.track_alias;
session_->Subscribe(subscribe, visitor);
} else if (visitor != nullptr) {
visitor->OnReply(subscribe.full_track_name, message.reason_phrase);
}
session_->active_subscribes_.erase(it);
}
void MoqtSession::ControlStream::OnUnsubscribeMessage(
const MoqtUnsubscribe& message) {
session_->SubscribeIsDone(message.subscribe_id,
SubscribeDoneCode::kUnsubscribed, "");
}
void MoqtSession::ControlStream::OnSubscribeUpdateMessage(
const MoqtSubscribeUpdate& message) {
auto it = session_->published_subscriptions_.find(message.subscribe_id);
if (it == session_->published_subscriptions_.end()) {
return;
}
FullSequence start(message.start_group, message.start_object);
std::optional<FullSequence> end;
if (message.end_group.has_value()) {
end = FullSequence(*message.end_group, message.end_object.has_value()
? *message.end_object
: UINT64_MAX);
}
it->second->Update(start, end, message.subscriber_priority);
}
void MoqtSession::ControlStream::OnAnnounceMessage(
const MoqtAnnounce& message) {
if (session_->peer_role_ == MoqtRole::kSubscriber) {
QUIC_DLOG(INFO) << ENDPOINT << "Subscriber peer sent SUBSCRIBE";
session_->Error(MoqtError::kProtocolViolation,
"Received ANNOUNCE from Subscriber");
return;
}
std::optional<MoqtAnnounceErrorReason> error =
session_->callbacks_.incoming_announce_callback(message.track_namespace);
if (error.has_value()) {
MoqtAnnounceError reply;
reply.track_namespace = message.track_namespace;
reply.error_code = error->error_code;
reply.reason_phrase = error->reason_phrase;
SendOrBufferMessage(session_->framer_.SerializeAnnounceError(reply));
return;
}
MoqtAnnounceOk ok;
ok.track_namespace = message.track_namespace;
SendOrBufferMessage(session_->framer_.SerializeAnnounceOk(ok));
}
void MoqtSession::ControlStream::OnAnnounceOkMessage(
const MoqtAnnounceOk& message) {
auto it = session_->pending_outgoing_announces_.find(message.track_namespace);
if (it == session_->pending_outgoing_announces_.end()) {
session_->Error(MoqtError::kProtocolViolation,
"Received ANNOUNCE_OK for nonexistent announce");
return;
}
std::move(it->second)(message.track_namespace, std::nullopt);
session_->pending_outgoing_announces_.erase(it);
}
void MoqtSession::ControlStream::OnAnnounceErrorMessage(
const MoqtAnnounceError& message) {
auto it = session_->pending_outgoing_announces_.find(message.track_namespace);
if (it == session_->pending_outgoing_announces_.end()) {
session_->Error(MoqtError::kProtocolViolation,
"Received ANNOUNCE_ERROR for nonexistent announce");
return;
}
std::move(it->second)(
message.track_namespace,
MoqtAnnounceErrorReason{message.error_code,
std::string(message.reason_phrase)});
session_->pending_outgoing_announces_.erase(it);
}
void MoqtSession::ControlStream::OnAnnounceCancelMessage(
const MoqtAnnounceCancel& message) {
}
void MoqtSession::ControlStream::OnMaxSubscribeIdMessage(
const MoqtMaxSubscribeId& message) {
if (session_->peer_role_ == MoqtRole::kSubscriber) {
QUIC_DLOG(INFO) << ENDPOINT << "Subscriber peer sent MAX_SUBSCRIBE_ID";
session_->Error(MoqtError::kProtocolViolation,
"Received MAX_SUBSCRIBE_ID from Subscriber");
return;
}
if (message.max_subscribe_id < session_->peer_max_subscribe_id_) {
QUIC_DLOG(INFO) << ENDPOINT
<< "Peer sent MAX_SUBSCRIBE_ID message with "
"lower value than previous";
session_->Error(MoqtError::kProtocolViolation,
"MAX_SUBSCRIBE_ID message has lower value than previous");
return;
}
session_->peer_max_subscribe_id_ = message.max_subscribe_id;
}
void MoqtSession::ControlStream::OnParsingError(MoqtError error_code,
absl::string_view reason) {
session_->Error(error_code, absl::StrCat("Parse error: ", reason));
}
void MoqtSession::ControlStream::SendOrBufferMessage(
quiche::QuicheBuffer message, bool fin) {
quiche::StreamWriteOptions options;
options.set_send_fin(fin);
options.set_buffer_unconditionally(true);
std::array<absl::string_view, 1> write_vector = {message.AsStringView()};
absl::Status success = stream_->Writev(absl::MakeSpan(write_vector), options);
if (!success.ok()) {
session_->Error(MoqtError::kInternalError,
"Failed to write a control message");
}
}
void MoqtSession::IncomingDataStream::OnObjectMessage(const MoqtObject& message,
absl::string_view payload,
bool end_of_message) {
QUICHE_DVLOG(1) << ENDPOINT << "Received OBJECT message on stream "
<< stream_->GetStreamId() << " for subscribe_id "
<< message.subscribe_id << " for track alias "
<< message.track_alias << " with sequence "
<< message.group_id << ":" << message.object_id
<< " priority " << message.publisher_priority
<< " forwarding_preference "
<< MoqtForwardingPreferenceToString(
message.forwarding_preference)
<< " length " << payload.size() << " length "
<< message.payload_length << (end_of_message ? "F" : "");
if (!session_->parameters_.deliver_partial_objects) {
if (!end_of_message) {
if (partial_object_.empty()) {
partial_object_.reserve(message.payload_length);
}
absl::StrAppend(&partial_object_, payload);
return;
}
if (!partial_object_.empty()) {
absl::StrAppend(&partial_object_, payload);
payload = absl::string_view(partial_object_);
}
}
auto [full_track_name, visitor] = session_->TrackPropertiesFromAlias(message);
if (visitor != nullptr) {
visitor->OnObjectFragment(
full_track_name, message.group_id, message.object_id,
message.publisher_priority, message.object_status,
message.forwarding_preference, payload, end_of_message);
}
partial_object_.clear();
}
void MoqtSession::IncomingDataStream::OnCanRead() {
ForwardStreamDataToParser(*stream_, parser_);
}
void MoqtSession::IncomingDataStream::OnControlMessageReceived() {
session_->Error(MoqtError::kProtocolViolation,
"Received a control message on a data stream");
}
void MoqtSession::IncomingDataStream::OnParsingError(MoqtError error_code,
absl::string_view reason) {
session_->Error(error_code, absl::StrCat("Parse error: ", reason));
}
MoqtSession::PublishedSubscription::PublishedSubscription(
MoqtSession* session, std::shared_ptr<MoqtTrackPublisher> track_publisher,
const MoqtSubscribe& subscribe,
MoqtPublishingMonitorInterface* monitoring_interface)
: subscription_id_(subscribe.subscribe_id),
session_(session),
track_publisher_(track_publisher),
track_alias_(subscribe.track_alias),
window_(SubscribeMessageToWindow(subscribe, *track_publisher)),
subscriber_priority_(subscribe.subscriber_priority),
subscriber_delivery_order_(subscribe.group_order),
monitoring_interface_(monitoring_interface) {
track_publisher->AddObjectListener(this);
if (monitoring_interface_ != nullptr) {
monitoring_interface_->OnObjectAckSupportKnown(
subscribe.parameters.object_ack_window.has_value());
}
QUIC_DLOG(INFO) << ENDPOINT << "Created subscription for "
<< subscribe.full_track_name;
}
MoqtSession::PublishedSubscription::~PublishedSubscription() {
track_publisher_->RemoveObjectListener(this);
}
SendStreamMap& MoqtSession::PublishedSubscription::stream_map() {
if (!lazily_initialized_stream_map_.has_value()) {
QUICHE_DCHECK(
DoesTrackStatusImplyHavingData(*track_publisher_->GetTrackStatus()));
lazily_initialized_stream_map_.emplace(
track_publisher_->GetForwardingPreference());
}
return *lazily_initialized_stream_map_;
}
void MoqtSession::PublishedSubscription::Update(
FullSequence start, std::optional<FullSequence> end,
MoqtPriority subscriber_priority) {
window_.UpdateStartEnd(start, end);
subscriber_priority_ = subscriber_priority;
}
void MoqtSession::PublishedSubscription::set_subscriber_priority(
MoqtPriority priority) {
if (priority == subscriber_priority_) {
return;
}
if (queued_outgoing_data_streams_.empty()) {
subscriber_priority_ = priority;
return;
}
webtransport::SendOrder old_send_order =
FinalizeSendOrder(queued_outgoing_data_streams_.rbegin()->first);
subscriber_priority_ = priority;
session_->UpdateQueuedSendOrder(subscription_id_, old_send_order,
FinalizeSendOrder(old_send_order));
};
void MoqtSession::PublishedSubscription::OnNewObjectAvailable(
FullSequence sequence) {
if (!window_.InWindow(sequence)) {
return;
}
MoqtForwardingPreference forwarding_preference =
track_publisher_->GetForwardingPreference();
if (forwarding_preference == MoqtForwardingPreference::kDatagram) {
SendDatagram(sequence);
return;
}
std::optional<webtransport::StreamId> stream_id =
stream_map().GetStreamForSequence(sequence);
webtransport::Stream* raw_stream = nullptr;
if (stream_id.has_value()) {
raw_stream = session_->session_->GetStreamById(*stream_id);
} else {
raw_stream = session_->OpenOrQueueDataStream(subscription_id_, sequence);
}
if (raw_stream == nullptr) {
return;
}
OutgoingDataStream* stream =
static_cast<OutgoingDataStream*>(raw_stream->visitor());
stream->SendObjects(*this);
}
void MoqtSession::PublishedSubscription::Backfill() {
const FullSequence start = window_.start();
const FullSequence end = track_publisher_->GetLargestSequence();
const MoqtForwardingPreference preference =
track_publisher_->GetForwardingPreference();
absl::flat_hash_set<ReducedSequenceIndex> already_opened;
std::vector<FullSequence> objects =
track_publisher_->GetCachedObjectsInRange(start, end);
QUICHE_DCHECK(absl::c_is_sorted(objects));
for (FullSequence sequence : objects) {
auto [it, was_missing] =
already_opened.insert(ReducedSequenceIndex(sequence, preference));
if (!was_missing) {
continue;
}
OnNewObjectAvailable(sequence);
}
}
std::vector<webtransport::StreamId>
MoqtSession::PublishedSubscription::GetAllStreams() const {
if (!lazily_initialized_stream_map_.has_value()) {
return {};
}
return lazily_initialized_stream_map_->GetAllStreams();
}
webtransport::SendOrder MoqtSession::PublishedSubscription::GetSendOrder(
FullSequence sequence) const {
MoqtForwardingPreference forwarding_preference =
track_publisher_->GetForwardingPreference();
MoqtPriority publisher_priority = track_publisher_->GetPublisherPriority();
MoqtDeliveryOrder delivery_order = subscriber_delivery_order().value_or(
track_publisher_->GetDeliveryOrder());
switch (forwarding_preference) {
case MoqtForwardingPreference::kTrack:
return SendOrderForStream(subscriber_priority_, publisher_priority,
0, delivery_order);
break;
case MoqtForwardingPreference::kSubgroup:
return SendOrderForStream(subscriber_priority_, publisher_priority,
sequence.group, sequence.subgroup,
delivery_order);
break;
case MoqtForwardingPreference::kDatagram:
QUICHE_NOTREACHED();
return 0;
}
}
void MoqtSession::PublishedSubscription::AddQueuedOutgoingDataStream(
FullSequence first_object) {
std::optional<webtransport::SendOrder> start_send_order =
queued_outgoing_data_streams_.empty()
? std::optional<webtransport::SendOrder>()
: queued_outgoing_data_streams_.rbegin()->first;
webtransport::SendOrder send_order = GetSendOrder(first_object);
queued_outgoing_data_streams_.emplace(
UpdateSendOrderForSubscriberPriority(send_order, 0), first_object);
if (!start_send_order.has_value()) {
session_->UpdateQueuedSendOrder(subscription_id_, std::nullopt, send_order);
} else if (*start_send_order < send_order) {
session_->UpdateQueuedSendOrder(
subscription_id_, FinalizeSendOrder(*start_send_order), send_order);
}
}
FullSequence
MoqtSession::PublishedSubscription::NextQueuedOutgoingDataStream() {
QUICHE_DCHECK(!queued_outgoing_data_streams_.empty());
if (queued_outgoing_data_streams_.empty()) {
return FullSequence();
}
auto it = queued_outgoing_data_streams_.rbegin();
webtransport::SendOrder old_send_order = FinalizeSendOrder(it->first);
FullSequence first_object = it->second;
queued_outgoing_data_streams_.erase((++it).base());
if (queued_outgoing_data_streams_.empty()) {
session_->UpdateQueuedSendOrder(subscription_id_, old_send_order,
std::nullopt);
} else {
webtransport::SendOrder new_send_order =
FinalizeSendOrder(queued_outgoing_data_streams_.rbegin()->first);
if (old_send_order != new_send_order) {
session_->UpdateQueuedSendOrder(subscription_id_, old_send_order,
new_send_order);
}
}
return first_object;
}
void MoqtSession::PublishedSubscription::OnDataStreamCreated(
webtransport::StreamId id, FullSequence start_sequence) {
stream_map().AddStream(start_sequence, id);
}
void MoqtSession::PublishedSubscription::OnDataStreamDestroyed(
webtransport::StreamId id, FullSequence end_sequence) {
stream_map().RemoveStream(end_sequence, id);
}
void MoqtSession::PublishedSubscription::OnObjectSent(FullSequence sequence) {
if (largest_sent_.has_value()) {
largest_sent_ = std::max(*largest_sent_, sequence);
} else {
largest_sent_ = sequence;
}
}
MoqtSession::OutgoingDataStream::OutgoingDataStream(
MoqtSession* session, webtransport::Stream* stream,
PublishedSubscription& subscription, FullSequence first_object)
: session_(session),
stream_(stream),
subscription_id_(subscription.subscription_id()),
next_object_(first_object),
session_liveness_(session->liveness_token_) {
UpdateSendOrder(subscription);
}
MoqtSession::OutgoingDataStream::~OutgoingDataStream() {
if (session_liveness_.expired()) {
return;
}
auto it = session_->published_subscriptions_.find(subscription_id_);
if (it != session_->published_subscriptions_.end()) {
it->second->OnDataStreamDestroyed(stream_->GetStreamId(), next_object_);
}
}
void MoqtSession::OutgoingDataStream::OnCanWrite() {
PublishedSubscription* subscription = GetSubscriptionIfValid();
if (subscription == nullptr) {
return;
}
SendObjects(*subscription);
}
MoqtSession::PublishedSubscription*
MoqtSession::OutgoingDataStream::GetSubscriptionIfValid() {
auto it = session_->published_subscriptions_.find(subscription_id_);
if (it == session_->published_subscriptions_.end()) {
stream_->ResetWithUserCode(kResetCodeSubscriptionGone);
return nullptr;
}
PublishedSubscription* subscription = it->second.get();
MoqtTrackPublisher& publisher = subscription->publisher();
absl::StatusOr<MoqtTrackStatusCode> status = publisher.GetTrackStatus();
if (!status.ok()) {
return nullptr;
}
if (!DoesTrackStatusImplyHavingData(*status)) {
QUICHE_BUG(GetSubscriptionIfValid_InvalidTrackStatus)
<< "The track publisher returned a status indicating that no objects "
"are available, but a stream for those objects exists.";
session_->Error(MoqtError::kInternalError,
"Invalid track state provided by application");
return nullptr;
}
return subscription;
}
void MoqtSession::OutgoingDataStream::SendObjects(
PublishedSubscription& subscription) {
while (stream_->CanWrite()) {
std::optional<PublishedObject> object =
subscription.publisher().GetCachedObject(next_object_);
if (!object.has_value()) {
break;
}
if (!subscription.InWindow(next_object_)) {
bool success = stream_->SendFin();
QUICHE_BUG_IF(OutgoingDataStream_fin_due_to_update, !success)
<< "Writing FIN failed despite CanWrite() being true.";
return;
}
SendNextObject(subscription, *std::move(object));
}
}
void MoqtSession::OutgoingDataStream::SendNextObject(
PublishedSubscription& subscription, PublishedObject object) {
QUICHE_DCHECK(object.sequence == next_object_);
QUICHE_DCHECK(stream_->CanWrite());
MoqtTrackPublisher& publisher = subscription.publisher();
QUICHE_DCHECK(DoesTrackStatusImplyHavingData(*publisher.GetTrackStatus()));
MoqtForwardingPreference forwarding_preference =
publisher.GetForwardingPreference();
UpdateSendOrder(subscription);
MoqtObject header;
header.subscribe_id = subscription_id_;
header.track_alias = subscription.track_alias();
header.group_id = object.sequence.group;
header.object_id = object.sequence.object;
header.publisher_priority = publisher.GetPublisherPriority();
header.object_status = object.status;
header.forwarding_preference = forwarding_preference;
header.subgroup_id =
(forwarding_preference == MoqtForwardingPreference::kSubgroup)
? 0
: std::optional<uint64_t>();
header.payload_length = object.payload.length();
quiche::QuicheBuffer serialized_header =
session_->framer_.SerializeObjectHeader(header, !stream_header_written_);
bool fin = false;
switch (forwarding_preference) {
case MoqtForwardingPreference::kTrack:
if (object.status == MoqtObjectStatus::kEndOfGroup ||
object.status == MoqtObjectStatus::kGroupDoesNotExist) {
++next_object_.group;
next_object_.object = 0;
} else {
++next_object_.object;
}
fin = object.status == MoqtObjectStatus::kEndOfTrack ||
!subscription.InWindow(next_object_);
break;
case MoqtForwardingPreference::kSubgroup:
++next_object_.object;
fin = object.status == MoqtObjectStatus::kEndOfTrack ||
object.status == MoqtObjectStatus::kEndOfGroup ||
object.status == MoqtObjectStatus::kEndOfSubgroup ||
object.status == MoqtObjectStatus::kGroupDoesNotExist ||
!subscription.InWindow(next_object_);
break;
case MoqtForwardingPreference::kDatagram:
QUICHE_NOTREACHED();
break;
}
std::array<absl::string_view, 2> write_vector = {
serialized_header.AsStringView(), object.payload.AsStringView()};
quiche::StreamWriteOptions options;
options.set_send_fin(fin);
absl::Status write_status = stream_->Writev(write_vector, options);
if (!write_status.ok()) {
QUICHE_BUG(MoqtSession_SendNextObject_write_failed)
<< "Writing into MoQT stream failed despite CanWrite() being true "
"before; status: "
<< write_status;
session_->Error(MoqtError::kInternalError, "Data stream write error");
return;
}
QUIC_DVLOG(1) << "Stream " << stream_->GetStreamId() << " successfully wrote "
<< object.sequence << ", fin = " << fin
<< ", next: " << next_object_;
stream_header_written_ = true;
subscription.OnObjectSent(object.sequence);
}
void MoqtSession::PublishedSubscription::SendDatagram(FullSequence sequence) {
std::optional<PublishedObject> object =
track_publisher_->GetCachedObject(sequence);
if (!object.has_value()) {
QUICHE_BUG(PublishedSubscription_SendDatagram_object_not_in_cache)
<< "Got notification about an object that is not in the cache";
return;
}
MoqtObject header;
header.subscribe_id = subscription_id_;
header.track_alias = track_alias();
header.group_id = object->sequence.group;
header.object_id = object->sequence.object;
header.publisher_priority = track_publisher_->GetPublisherPriority();
header.object_status = object->status;
header.forwarding_preference = MoqtForwardingPreference::kDatagram;
header.subgroup_id = std::nullopt;
header.payload_length = object->payload.length();
quiche::QuicheBuffer datagram = session_->framer_.SerializeObjectDatagram(
header, object->payload.AsStringView());
session_->session_->SendOrQueueDatagram(datagram.AsStringView());
OnObjectSent(object->sequence);
}
void MoqtSession::OutgoingDataStream::UpdateSendOrder(
PublishedSubscription& subscription) {
stream_->SetPriority(
webtransport::StreamPriority{kMoqtSendGroupId,
subscription.GetSendOrder(next_object_)});
}
} | #include "quiche/quic/moqt/moqt_session.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/moqt/moqt_known_track_publisher.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/moqt_parser.h"
#include "quiche/quic/moqt/moqt_priority.h"
#include "quiche/quic/moqt/moqt_publisher.h"
#include "quiche/quic/moqt/moqt_track.h"
#include "quiche/quic/moqt/tools/moqt_mock_visitor.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/quiche_stream.h"
#include "quiche/web_transport/test_tools/mock_web_transport.h"
#include "quiche/web_transport/web_transport.h"
namespace moqt {
namespace test {
namespace {
using ::quic::test::MemSliceFromString;
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Return;
using ::testing::StrictMock;
constexpr webtransport::StreamId kControlStreamId = 4;
constexpr webtransport::StreamId kIncomingUniStreamId = 15;
constexpr webtransport::StreamId kOutgoingUniStreamId = 14;
static std::optional<MoqtMessageType> ExtractMessageType(
const absl::string_view message) {
quic::QuicDataReader reader(message);
uint64_t value;
if (!reader.ReadVarInt62(&value)) {
return std::nullopt;
}
return static_cast<MoqtMessageType>(value);
}
static std::shared_ptr<MockTrackPublisher> SetupPublisher(
FullTrackName track_name, MoqtForwardingPreference forwarding_preference,
FullSequence largest_sequence) {
auto publisher = std::make_shared<MockTrackPublisher>(std::move(track_name));
ON_CALL(*publisher, GetTrackStatus())
.WillByDefault(Return(MoqtTrackStatusCode::kInProgress));
ON_CALL(*publisher, GetForwardingPreference())
.WillByDefault(Return(forwarding_preference));
ON_CALL(*publisher, GetLargestSequence())
.WillByDefault(Return(largest_sequence));
return publisher;
}
}
class MoqtSessionPeer {
public:
static std::unique_ptr<MoqtControlParserVisitor> CreateControlStream(
MoqtSession* session, webtransport::test::MockStream* stream) {
auto new_stream =
std::make_unique<MoqtSession::ControlStream>(session, stream);
session->control_stream_ = kControlStreamId;
EXPECT_CALL(*stream, visitor())
.Times(AnyNumber())
.WillRepeatedly(Return(new_stream.get()));
return new_stream;
}
static std::unique_ptr<MoqtDataParserVisitor> CreateIncomingDataStream(
MoqtSession* session, webtransport::Stream* stream) {
auto new_stream =
std::make_unique<MoqtSession::IncomingDataStream>(session, stream);
return new_stream;
}
static MoqtControlParserVisitor*
FetchParserVisitorFromWebtransportStreamVisitor(
MoqtSession* session, webtransport::StreamVisitor* visitor) {
return static_cast<MoqtSession::ControlStream*>(visitor);
}
static void CreateRemoteTrack(MoqtSession* session, const FullTrackName& name,
RemoteTrack::Visitor* visitor,
uint64_t track_alias) {
session->remote_tracks_.try_emplace(track_alias, name, track_alias,
visitor);
session->remote_track_aliases_.try_emplace(name, track_alias);
}
static void AddActiveSubscribe(MoqtSession* session, uint64_t subscribe_id,
MoqtSubscribe& subscribe,
RemoteTrack::Visitor* visitor) {
session->active_subscribes_[subscribe_id] = {subscribe, visitor};
}
static MoqtObjectListener* AddSubscription(
MoqtSession* session, std::shared_ptr<MoqtTrackPublisher> publisher,
uint64_t subscribe_id, uint64_t track_alias, uint64_t start_group,
uint64_t start_object) {
MoqtSubscribe subscribe;
subscribe.full_track_name = publisher->GetTrackName();
subscribe.track_alias = track_alias;
subscribe.subscribe_id = subscribe_id;
subscribe.start_group = start_group;
subscribe.start_object = start_object;
subscribe.subscriber_priority = 0x80;
session->published_subscriptions_.emplace(
subscribe_id, std::make_unique<MoqtSession::PublishedSubscription>(
session, std::move(publisher), subscribe,
nullptr));
return session->published_subscriptions_[subscribe_id].get();
}
static void DeleteSubscription(MoqtSession* session, uint64_t subscribe_id) {
session->published_subscriptions_.erase(subscribe_id);
}
static void UpdateSubscriberPriority(MoqtSession* session,
uint64_t subscribe_id,
MoqtPriority priority) {
session->published_subscriptions_[subscribe_id]->set_subscriber_priority(
priority);
}
static void set_peer_role(MoqtSession* session, MoqtRole role) {
session->peer_role_ = role;
}
static RemoteTrack& remote_track(MoqtSession* session, uint64_t track_alias) {
return session->remote_tracks_.find(track_alias)->second;
}
static void set_next_subscribe_id(MoqtSession* session, uint64_t id) {
session->next_subscribe_id_ = id;
}
static void set_peer_max_subscribe_id(MoqtSession* session, uint64_t id) {
session->peer_max_subscribe_id_ = id;
}
};
class MoqtSessionTest : public quic::test::QuicTest {
public:
MoqtSessionTest()
: session_(&mock_session_,
MoqtSessionParameters(quic::Perspective::IS_CLIENT, ""),
session_callbacks_.AsSessionCallbacks()) {
session_.set_publisher(&publisher_);
MoqtSessionPeer::set_peer_max_subscribe_id(&session_,
kDefaultInitialMaxSubscribeId);
}
~MoqtSessionTest() {
EXPECT_CALL(session_callbacks_.session_deleted_callback, Call());
}
MockSessionCallbacks session_callbacks_;
StrictMock<webtransport::test::MockSession> mock_session_;
MoqtSession session_;
MoqtKnownTrackPublisher publisher_;
};
TEST_F(MoqtSessionTest, Queries) {
EXPECT_EQ(session_.perspective(), quic::Perspective::IS_CLIENT);
}
TEST_F(MoqtSessionTest, OnSessionReady) {
webtransport::test::MockStream mock_stream;
EXPECT_CALL(mock_session_, OpenOutgoingBidirectionalStream())
.WillOnce(Return(&mock_stream));
std::unique_ptr<webtransport::StreamVisitor> visitor;
EXPECT_CALL(mock_stream, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> new_visitor) {
visitor = std::move(new_visitor);
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillOnce(Return(webtransport::StreamId(4)));
EXPECT_CALL(mock_session_, GetStreamById(4)).WillOnce(Return(&mock_stream));
bool correct_message = false;
EXPECT_CALL(mock_stream, visitor()).WillOnce([&] { return visitor.get(); });
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kClientSetup);
return absl::OkStatus();
});
session_.OnSessionReady();
EXPECT_TRUE(correct_message);
MoqtControlParserVisitor* stream_input =
MoqtSessionPeer::FetchParserVisitorFromWebtransportStreamVisitor(
&session_, visitor.get());
MoqtServerSetup setup = {
kDefaultMoqtVersion,
MoqtRole::kPubSub,
};
EXPECT_CALL(session_callbacks_.session_established_callback, Call()).Times(1);
stream_input->OnServerSetupMessage(setup);
}
TEST_F(MoqtSessionTest, OnClientSetup) {
MoqtSession server_session(
&mock_session_, MoqtSessionParameters(quic::Perspective::IS_SERVER),
session_callbacks_.AsSessionCallbacks());
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&server_session, &mock_stream);
MoqtClientSetup setup = {
{kDefaultMoqtVersion},
MoqtRole::kPubSub,
std::nullopt,
};
bool correct_message = false;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kServerSetup);
return absl::OkStatus();
});
EXPECT_CALL(mock_stream, GetStreamId()).WillOnce(Return(0));
EXPECT_CALL(session_callbacks_.session_established_callback, Call()).Times(1);
stream_input->OnClientSetupMessage(setup);
}
TEST_F(MoqtSessionTest, OnSessionClosed) {
bool reported_error = false;
EXPECT_CALL(session_callbacks_.session_terminated_callback, Call(_))
.WillOnce([&](absl::string_view error_message) {
reported_error = true;
EXPECT_EQ(error_message, "foo");
});
session_.OnSessionClosed(webtransport::SessionErrorCode(1), "foo");
EXPECT_TRUE(reported_error);
}
TEST_F(MoqtSessionTest, OnIncomingBidirectionalStream) {
::testing::InSequence seq;
webtransport::test::MockStream mock_stream;
StrictMock<webtransport::test::MockStreamVisitor> mock_stream_visitor;
EXPECT_CALL(mock_session_, AcceptIncomingBidirectionalStream())
.WillOnce(Return(&mock_stream));
EXPECT_CALL(mock_stream, SetVisitor(_)).Times(1);
EXPECT_CALL(mock_stream, visitor()).WillOnce(Return(&mock_stream_visitor));
EXPECT_CALL(mock_stream_visitor, OnCanRead()).Times(1);
EXPECT_CALL(mock_session_, AcceptIncomingBidirectionalStream())
.WillOnce(Return(nullptr));
session_.OnIncomingBidirectionalStreamAvailable();
}
TEST_F(MoqtSessionTest, OnIncomingUnidirectionalStream) {
::testing::InSequence seq;
webtransport::test::MockStream mock_stream;
StrictMock<webtransport::test::MockStreamVisitor> mock_stream_visitor;
EXPECT_CALL(mock_session_, AcceptIncomingUnidirectionalStream())
.WillOnce(Return(&mock_stream));
EXPECT_CALL(mock_stream, SetVisitor(_)).Times(1);
EXPECT_CALL(mock_stream, visitor()).WillOnce(Return(&mock_stream_visitor));
EXPECT_CALL(mock_stream_visitor, OnCanRead()).Times(1);
EXPECT_CALL(mock_session_, AcceptIncomingUnidirectionalStream())
.WillOnce(Return(nullptr));
session_.OnIncomingUnidirectionalStreamAvailable();
}
TEST_F(MoqtSessionTest, Error) {
bool reported_error = false;
EXPECT_CALL(
mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kParameterLengthMismatch),
"foo"))
.Times(1);
EXPECT_CALL(session_callbacks_.session_terminated_callback, Call(_))
.WillOnce([&](absl::string_view error_message) {
reported_error = (error_message == "foo");
});
session_.Error(MoqtError::kParameterLengthMismatch, "foo");
EXPECT_TRUE(reported_error);
}
TEST_F(MoqtSessionTest, AddLocalTrack) {
MoqtSubscribe request = {
1,
2,
FullTrackName({"foo", "bar"}),
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
MoqtSubscribeParameters(),
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
bool correct_message = false;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]),
MoqtMessageType::kSubscribeError);
return absl::OkStatus();
});
stream_input->OnSubscribeMessage(request);
EXPECT_TRUE(correct_message);
auto track_publisher =
std::make_shared<MockTrackPublisher>(FullTrackName("foo", "bar"));
EXPECT_CALL(*track_publisher, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kStatusNotAvailable));
publisher_.Add(track_publisher);
correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kSubscribeOk);
return absl::OkStatus();
});
stream_input->OnSubscribeMessage(request);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, AnnounceWithOk) {
testing::MockFunction<void(
FullTrackName track_namespace,
std::optional<MoqtAnnounceErrorReason> error_message)>
announce_resolved_callback;
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(mock_session_, GetStreamById(_)).WillOnce(Return(&mock_stream));
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kAnnounce);
return absl::OkStatus();
});
session_.Announce(FullTrackName{"foo"},
announce_resolved_callback.AsStdFunction());
EXPECT_TRUE(correct_message);
MoqtAnnounceOk ok = {
FullTrackName{"foo"},
};
correct_message = false;
EXPECT_CALL(announce_resolved_callback, Call(_, _))
.WillOnce([&](FullTrackName track_namespace,
std::optional<MoqtAnnounceErrorReason> error) {
correct_message = true;
EXPECT_EQ(track_namespace, FullTrackName{"foo"});
EXPECT_FALSE(error.has_value());
});
stream_input->OnAnnounceOkMessage(ok);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, AnnounceWithError) {
testing::MockFunction<void(
FullTrackName track_namespace,
std::optional<MoqtAnnounceErrorReason> error_message)>
announce_resolved_callback;
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(mock_session_, GetStreamById(_)).WillOnce(Return(&mock_stream));
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kAnnounce);
return absl::OkStatus();
});
session_.Announce(FullTrackName{"foo"},
announce_resolved_callback.AsStdFunction());
EXPECT_TRUE(correct_message);
MoqtAnnounceError error = {
FullTrackName{"foo"},
MoqtAnnounceErrorCode::kInternalError,
"Test error",
};
correct_message = false;
EXPECT_CALL(announce_resolved_callback, Call(_, _))
.WillOnce([&](FullTrackName track_namespace,
std::optional<MoqtAnnounceErrorReason> error) {
correct_message = true;
EXPECT_EQ(track_namespace, FullTrackName{"foo"});
ASSERT_TRUE(error.has_value());
EXPECT_EQ(error->error_code, MoqtAnnounceErrorCode::kInternalError);
EXPECT_EQ(error->reason_phrase, "Test error");
});
stream_input->OnAnnounceErrorMessage(error);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, SubscribeForPast) {
FullTrackName ftn("foo", "bar");
auto track = std::make_shared<MockTrackPublisher>(ftn);
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kInProgress));
EXPECT_CALL(*track, GetCachedObject(_)).WillRepeatedly([] {
return std::optional<PublishedObject>();
});
EXPECT_CALL(*track, GetCachedObjectsInRange(_, _))
.WillRepeatedly(Return(std::vector<FullSequence>()));
EXPECT_CALL(*track, GetLargestSequence())
.WillRepeatedly(Return(FullSequence(10, 20)));
publisher_.Add(track);
MoqtSubscribe request = {
1,
2,
FullTrackName({"foo", "bar"}),
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
MoqtSubscribeParameters(),
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kSubscribeOk);
return absl::OkStatus();
});
stream_input->OnSubscribeMessage(request);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, SubscribeIdTooHigh) {
MoqtSubscribe request = {
kDefaultInitialMaxSubscribeId + 1,
2,
FullTrackName({"foo", "bar"}),
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
MoqtSubscribeParameters(),
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kTooManySubscribes),
"Received SUBSCRIBE with too large ID"))
.Times(1);
stream_input->OnSubscribeMessage(request);
}
TEST_F(MoqtSessionTest, TooManySubscribes) {
MoqtSessionPeer::set_next_subscribe_id(&session_,
kDefaultInitialMaxSubscribeId);
MockRemoteTrackVisitor remote_track_visitor;
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(mock_session_, GetStreamById(_)).WillOnce(Return(&mock_stream));
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kSubscribe);
return absl::OkStatus();
});
EXPECT_TRUE(session_.SubscribeCurrentGroup(FullTrackName("foo", "bar"),
&remote_track_visitor));
EXPECT_FALSE(session_.SubscribeCurrentGroup(FullTrackName("foo", "bar"),
&remote_track_visitor));
}
TEST_F(MoqtSessionTest, SubscribeWithOk) {
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
MockRemoteTrackVisitor remote_track_visitor;
EXPECT_CALL(mock_session_, GetStreamById(_)).WillOnce(Return(&mock_stream));
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kSubscribe);
return absl::OkStatus();
});
session_.SubscribeCurrentGroup(FullTrackName("foo", "bar"),
&remote_track_visitor);
MoqtSubscribeOk ok = {
0,
quic::QuicTimeDelta::FromMilliseconds(0),
};
correct_message = false;
EXPECT_CALL(remote_track_visitor, OnReply(_, _))
.WillOnce([&](const FullTrackName& ftn,
std::optional<absl::string_view> error_message) {
correct_message = true;
EXPECT_EQ(ftn, FullTrackName("foo", "bar"));
EXPECT_FALSE(error_message.has_value());
});
stream_input->OnSubscribeOkMessage(ok);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, MaxSubscribeIdChangesResponse) {
MoqtSessionPeer::set_next_subscribe_id(&session_,
kDefaultInitialMaxSubscribeId + 1);
MockRemoteTrackVisitor remote_track_visitor;
EXPECT_FALSE(session_.SubscribeCurrentGroup(FullTrackName("foo", "bar"),
&remote_track_visitor));
MoqtMaxSubscribeId max_subscribe_id = {
kDefaultInitialMaxSubscribeId + 1,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
stream_input->OnMaxSubscribeIdMessage(max_subscribe_id);
EXPECT_CALL(mock_session_, GetStreamById(_)).WillOnce(Return(&mock_stream));
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kSubscribe);
return absl::OkStatus();
});
EXPECT_TRUE(session_.SubscribeCurrentGroup(FullTrackName("foo", "bar"),
&remote_track_visitor));
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, LowerMaxSubscribeIdIsAnError) {
MoqtMaxSubscribeId max_subscribe_id = {
kDefaultInitialMaxSubscribeId - 1,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(
mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"MAX_SUBSCRIBE_ID message has lower value than previous"))
.Times(1);
stream_input->OnMaxSubscribeIdMessage(max_subscribe_id);
}
TEST_F(MoqtSessionTest, GrantMoreSubscribes) {
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(mock_session_, GetStreamById(_)).WillOnce(Return(&mock_stream));
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]),
MoqtMessageType::kMaxSubscribeId);
return absl::OkStatus();
});
session_.GrantMoreSubscribes(1);
EXPECT_TRUE(correct_message);
MoqtSubscribe request = {
kDefaultInitialMaxSubscribeId + 1,
2,
FullTrackName({"foo", "bar"}),
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
MoqtSubscribeParameters(),
};
correct_message = false;
FullTrackName ftn("foo", "bar");
auto track = std::make_shared<MockTrackPublisher>(ftn);
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kInProgress));
EXPECT_CALL(*track, GetCachedObject(_)).WillRepeatedly([] {
return std::optional<PublishedObject>();
});
EXPECT_CALL(*track, GetCachedObjectsInRange(_, _))
.WillRepeatedly(Return(std::vector<FullSequence>()));
EXPECT_CALL(*track, GetLargestSequence())
.WillRepeatedly(Return(FullSequence(10, 20)));
publisher_.Add(track);
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kSubscribeOk);
return absl::OkStatus();
});
stream_input->OnSubscribeMessage(request);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, SubscribeWithError) {
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
MockRemoteTrackVisitor remote_track_visitor;
EXPECT_CALL(mock_session_, GetStreamById(_)).WillOnce(Return(&mock_stream));
bool correct_message = true;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kSubscribe);
return absl::OkStatus();
});
session_.SubscribeCurrentGroup(FullTrackName("foo", "bar"),
&remote_track_visitor);
MoqtSubscribeError error = {
0,
SubscribeErrorCode::kInvalidRange,
"deadbeef",
2,
};
correct_message = false;
EXPECT_CALL(remote_track_visitor, OnReply(_, _))
.WillOnce([&](const FullTrackName& ftn,
std::optional<absl::string_view> error_message) {
correct_message = true;
EXPECT_EQ(ftn, FullTrackName("foo", "bar"));
EXPECT_EQ(*error_message, "deadbeef");
});
stream_input->OnSubscribeErrorMessage(error);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, ReplyToAnnounce) {
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
MoqtAnnounce announce = {
FullTrackName{"foo"},
};
bool correct_message = false;
EXPECT_CALL(session_callbacks_.incoming_announce_callback,
Call(FullTrackName{"foo"}))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kAnnounceOk);
return absl::OkStatus();
});
stream_input->OnAnnounceMessage(announce);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, IncomingObject) {
MockRemoteTrackVisitor visitor_;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSessionPeer::CreateRemoteTrack(&session_, ftn, &visitor_, 2);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
8,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session_, &mock_stream);
EXPECT_CALL(visitor_, OnObjectFragment(_, _, _, _, _, _, _, _)).Times(1);
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, true);
}
TEST_F(MoqtSessionTest, IncomingPartialObject) {
MockRemoteTrackVisitor visitor_;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSessionPeer::CreateRemoteTrack(&session_, ftn, &visitor_, 2);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
16,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session_, &mock_stream);
EXPECT_CALL(visitor_, OnObjectFragment(_, _, _, _, _, _, _, _)).Times(1);
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, false);
object_stream->OnObjectMessage(object, payload, true);
}
TEST_F(MoqtSessionTest, IncomingPartialObjectNoBuffer) {
MoqtSessionParameters parameters(quic::Perspective::IS_CLIENT);
parameters.deliver_partial_objects = true;
MoqtSession session(&mock_session_, parameters,
session_callbacks_.AsSessionCallbacks());
MockRemoteTrackVisitor visitor_;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSessionPeer::CreateRemoteTrack(&session, ftn, &visitor_, 2);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
16,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session, &mock_stream);
EXPECT_CALL(visitor_, OnObjectFragment(_, _, _, _, _, _, _, _)).Times(2);
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, false);
object_stream->OnObjectMessage(object, payload, true);
}
TEST_F(MoqtSessionTest, ObjectBeforeSubscribeOk) {
MockRemoteTrackVisitor visitor_;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSubscribe subscribe = {
1,
2,
ftn,
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
};
MoqtSessionPeer::AddActiveSubscribe(&session_, 1, subscribe, &visitor_);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
8,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session_, &mock_stream);
EXPECT_CALL(visitor_, OnObjectFragment(_, _, _, _, _, _, _, _))
.WillOnce([&](const FullTrackName& full_track_name,
uint64_t group_sequence, uint64_t object_sequence,
MoqtPriority publisher_priority, MoqtObjectStatus status,
MoqtForwardingPreference forwarding_preference,
absl::string_view payload, bool end_of_message) {
EXPECT_EQ(full_track_name, ftn);
EXPECT_EQ(group_sequence, object.group_id);
EXPECT_EQ(object_sequence, object.object_id);
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, true);
MoqtSubscribeOk ok = {
1,
quic::QuicTimeDelta::FromMilliseconds(0),
MoqtDeliveryOrder::kAscending,
std::nullopt,
};
webtransport::test::MockStream mock_control_stream;
std::unique_ptr<MoqtControlParserVisitor> control_stream =
MoqtSessionPeer::CreateControlStream(&session_, &mock_control_stream);
EXPECT_CALL(visitor_, OnReply(_, _)).Times(1);
control_stream->OnSubscribeOkMessage(ok);
}
TEST_F(MoqtSessionTest, ObjectBeforeSubscribeError) {
MockRemoteTrackVisitor visitor;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSubscribe subscribe = {
1,
2,
ftn,
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
};
MoqtSessionPeer::AddActiveSubscribe(&session_, 1, subscribe, &visitor);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
8,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session_, &mock_stream);
EXPECT_CALL(visitor, OnObjectFragment(_, _, _, _, _, _, _, _))
.WillOnce([&](const FullTrackName& full_track_name,
uint64_t group_sequence, uint64_t object_sequence,
MoqtPriority publisher_priority, MoqtObjectStatus status,
MoqtForwardingPreference forwarding_preference,
absl::string_view payload, bool end_of_message) {
EXPECT_EQ(full_track_name, ftn);
EXPECT_EQ(group_sequence, object.group_id);
EXPECT_EQ(object_sequence, object.object_id);
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, true);
MoqtSubscribeError subscribe_error = {
1,
SubscribeErrorCode::kRetryTrackAlias,
"foo",
3,
};
webtransport::test::MockStream mock_control_stream;
std::unique_ptr<MoqtControlParserVisitor> control_stream =
MoqtSessionPeer::CreateControlStream(&session_, &mock_control_stream);
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Received SUBSCRIBE_ERROR after object"))
.Times(1);
control_stream->OnSubscribeErrorMessage(subscribe_error);
}
TEST_F(MoqtSessionTest, TwoEarlyObjectsDifferentForwarding) {
MockRemoteTrackVisitor visitor;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSubscribe subscribe = {
1,
2,
ftn,
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
};
MoqtSessionPeer::AddActiveSubscribe(&session_, 1, subscribe, &visitor);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
8,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session_, &mock_stream);
EXPECT_CALL(visitor, OnObjectFragment(_, _, _, _, _, _, _, _))
.WillOnce([&](const FullTrackName& full_track_name,
uint64_t group_sequence, uint64_t object_sequence,
MoqtPriority publisher_priority, MoqtObjectStatus status,
MoqtForwardingPreference forwarding_preference,
absl::string_view payload, bool end_of_message) {
EXPECT_EQ(full_track_name, ftn);
EXPECT_EQ(group_sequence, object.group_id);
EXPECT_EQ(object_sequence, object.object_id);
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, true);
object.forwarding_preference = MoqtForwardingPreference::kTrack;
++object.object_id;
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Forwarding preference changes mid-track"))
.Times(1);
object_stream->OnObjectMessage(object, payload, true);
}
TEST_F(MoqtSessionTest, EarlyObjectForwardingDoesNotMatchTrack) {
MockRemoteTrackVisitor visitor;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSubscribe subscribe = {
1,
2,
ftn,
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
};
MoqtSessionPeer::AddActiveSubscribe(&session_, 1, subscribe, &visitor);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
8,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session_, &mock_stream);
EXPECT_CALL(visitor, OnObjectFragment(_, _, _, _, _, _, _, _))
.WillOnce([&](const FullTrackName& full_track_name,
uint64_t group_sequence, uint64_t object_sequence,
MoqtPriority publisher_priority, MoqtObjectStatus status,
MoqtForwardingPreference forwarding_preference,
absl::string_view payload, bool end_of_message) {
EXPECT_EQ(full_track_name, ftn);
EXPECT_EQ(group_sequence, object.group_id);
EXPECT_EQ(object_sequence, object.object_id);
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, true);
MoqtSessionPeer::CreateRemoteTrack(&session_, ftn, &visitor, 2);
MoqtSessionPeer::remote_track(&session_, 2)
.CheckForwardingPreference(MoqtForwardingPreference::kTrack);
MoqtSubscribeOk ok = {
1,
quic::QuicTimeDelta::FromMilliseconds(0),
MoqtDeliveryOrder::kAscending,
std::nullopt,
};
webtransport::test::MockStream mock_control_stream;
std::unique_ptr<MoqtControlParserVisitor> control_stream =
MoqtSessionPeer::CreateControlStream(&session_, &mock_control_stream);
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Forwarding preference different in early objects"))
.Times(1);
control_stream->OnSubscribeOkMessage(ok);
}
TEST_F(MoqtSessionTest, CreateIncomingDataStreamAndSend) {
FullTrackName ftn("foo", "bar");
auto track = SetupPublisher(ftn, MoqtForwardingPreference::kSubgroup,
FullSequence(4, 2));
MoqtObjectListener* subscription =
MoqtSessionPeer::AddSubscription(&session_, track, 0, 2, 5, 0);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(true));
bool fin = false;
webtransport::test::MockStream mock_stream;
EXPECT_CALL(mock_stream, CanWrite()).WillRepeatedly([&] { return !fin; });
EXPECT_CALL(mock_session_, OpenOutgoingUnidirectionalStream())
.WillOnce(Return(&mock_stream));
std::unique_ptr<webtransport::StreamVisitor> stream_visitor;
EXPECT_CALL(mock_stream, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor = std::move(visitor);
});
EXPECT_CALL(mock_stream, visitor()).WillOnce([&] {
return stream_visitor.get();
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kOutgoingUniStreamId));
EXPECT_CALL(mock_session_, GetStreamById(kOutgoingUniStreamId))
.WillRepeatedly(Return(&mock_stream));
bool correct_message = false;
const std::string kExpectedMessage = {0x04, 0x00, 0x02, 0x05, 0x00, 0x00};
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = absl::StartsWith(data[0], kExpectedMessage);
fin |= options.send_fin();
return absl::OkStatus();
});
EXPECT_CALL(*track, GetCachedObject(FullSequence(5, 0))).WillRepeatedly([] {
return PublishedObject{FullSequence(5, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")};
});
EXPECT_CALL(*track, GetCachedObject(FullSequence(5, 1))).WillRepeatedly([] {
return std::optional<PublishedObject>();
});
subscription->OnNewObjectAvailable(FullSequence(5, 0));
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, UnidirectionalStreamCannotBeOpened) {
FullTrackName ftn("foo", "bar");
auto track = SetupPublisher(ftn, MoqtForwardingPreference::kSubgroup,
FullSequence(4, 2));
MoqtObjectListener* subscription =
MoqtSessionPeer::AddSubscription(&session_, track, 0, 2, 5, 0);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(false));
subscription->OnNewObjectAvailable(FullSequence(5, 0));
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(true));
bool fin = false;
webtransport::test::MockStream mock_stream;
EXPECT_CALL(mock_stream, CanWrite()).WillRepeatedly([&] { return !fin; });
EXPECT_CALL(mock_session_, OpenOutgoingUnidirectionalStream())
.WillOnce(Return(&mock_stream));
std::unique_ptr<webtransport::StreamVisitor> stream_visitor;
EXPECT_CALL(mock_stream, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor = std::move(visitor);
});
EXPECT_CALL(mock_stream, visitor()).WillOnce([&] {
return stream_visitor.get();
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kOutgoingUniStreamId));
EXPECT_CALL(mock_session_, GetStreamById(kOutgoingUniStreamId))
.WillRepeatedly(Return(&mock_stream));
EXPECT_CALL(mock_stream, Writev(_, _)).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*track, GetCachedObject(FullSequence(5, 0))).WillRepeatedly([] {
return PublishedObject{FullSequence(5, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")};
});
EXPECT_CALL(*track, GetCachedObject(FullSequence(5, 1))).WillRepeatedly([] {
return std::optional<PublishedObject>();
});
session_.OnCanCreateNewOutgoingUnidirectionalStream();
}
TEST_F(MoqtSessionTest, OutgoingStreamDisappears) {
FullTrackName ftn("foo", "bar");
auto track = SetupPublisher(ftn, MoqtForwardingPreference::kSubgroup,
FullSequence(4, 2));
MoqtObjectListener* subscription =
MoqtSessionPeer::AddSubscription(&session_, track, 0, 2, 5, 0);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(true));
webtransport::test::MockStream mock_stream;
EXPECT_CALL(mock_stream, CanWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(mock_session_, OpenOutgoingUnidirectionalStream())
.WillOnce(Return(&mock_stream));
std::unique_ptr<webtransport::StreamVisitor> stream_visitor;
EXPECT_CALL(mock_stream, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor = std::move(visitor);
});
EXPECT_CALL(mock_stream, visitor()).WillRepeatedly([&] {
return stream_visitor.get();
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kOutgoingUniStreamId));
EXPECT_CALL(mock_session_, GetStreamById(kOutgoingUniStreamId))
.WillRepeatedly(Return(&mock_stream));
EXPECT_CALL(mock_stream, Writev(_, _)).WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*track, GetCachedObject(FullSequence(5, 0))).WillRepeatedly([] {
return PublishedObject{FullSequence(5, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")};
});
EXPECT_CALL(*track, GetCachedObject(FullSequence(5, 1))).WillOnce([] {
return std::optional<PublishedObject>();
});
subscription->OnNewObjectAvailable(FullSequence(5, 0));
EXPECT_CALL(mock_session_, GetStreamById(kOutgoingUniStreamId))
.WillRepeatedly(Return(nullptr));
EXPECT_CALL(*track, GetCachedObject(FullSequence(5, 1))).Times(0);
subscription->OnNewObjectAvailable(FullSequence(5, 1));
}
TEST_F(MoqtSessionTest, OneBidirectionalStreamClient) {
webtransport::test::MockStream mock_stream;
EXPECT_CALL(mock_session_, OpenOutgoingBidirectionalStream())
.WillOnce(Return(&mock_stream));
std::unique_ptr<webtransport::StreamVisitor> visitor;
EXPECT_CALL(mock_stream, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> new_visitor) {
visitor = std::move(new_visitor);
});
EXPECT_CALL(mock_stream, GetStreamId())
.WillOnce(Return(webtransport::StreamId(4)));
EXPECT_CALL(mock_session_, GetStreamById(4)).WillOnce(Return(&mock_stream));
bool correct_message = false;
EXPECT_CALL(mock_stream, visitor()).WillOnce([&] { return visitor.get(); });
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kClientSetup);
return absl::OkStatus();
});
session_.OnSessionReady();
EXPECT_TRUE(correct_message);
bool reported_error = false;
EXPECT_CALL(mock_session_, AcceptIncomingBidirectionalStream())
.WillOnce(Return(&mock_stream));
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Bidirectional stream already open"))
.Times(1);
EXPECT_CALL(session_callbacks_.session_terminated_callback, Call(_))
.WillOnce([&](absl::string_view error_message) {
reported_error = (error_message == "Bidirectional stream already open");
});
session_.OnIncomingBidirectionalStreamAvailable();
EXPECT_TRUE(reported_error);
}
TEST_F(MoqtSessionTest, OneBidirectionalStreamServer) {
MoqtSession server_session(
&mock_session_, MoqtSessionParameters(quic::Perspective::IS_SERVER),
session_callbacks_.AsSessionCallbacks());
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&server_session, &mock_stream);
MoqtClientSetup setup = {
{kDefaultMoqtVersion},
MoqtRole::kPubSub,
std::nullopt,
};
bool correct_message = false;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]), MoqtMessageType::kServerSetup);
return absl::OkStatus();
});
EXPECT_CALL(mock_stream, GetStreamId()).WillOnce(Return(0));
EXPECT_CALL(session_callbacks_.session_established_callback, Call()).Times(1);
stream_input->OnClientSetupMessage(setup);
bool reported_error = false;
EXPECT_CALL(mock_session_, AcceptIncomingBidirectionalStream())
.WillOnce(Return(&mock_stream));
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Bidirectional stream already open"))
.Times(1);
EXPECT_CALL(session_callbacks_.session_terminated_callback, Call(_))
.WillOnce([&](absl::string_view error_message) {
reported_error = (error_message == "Bidirectional stream already open");
});
server_session.OnIncomingBidirectionalStreamAvailable();
EXPECT_TRUE(reported_error);
}
TEST_F(MoqtSessionTest, ReceiveUnsubscribe) {
FullTrackName ftn("foo", "bar");
auto track =
SetupPublisher(ftn, MoqtForwardingPreference::kTrack, FullSequence(4, 2));
MoqtSessionPeer::AddSubscription(&session_, track, 0, 1, 3, 4);
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
MoqtUnsubscribe unsubscribe = {
0,
};
EXPECT_CALL(mock_session_, GetStreamById(4)).WillOnce(Return(&mock_stream));
bool correct_message = false;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]),
MoqtMessageType::kSubscribeDone);
return absl::OkStatus();
});
stream_input->OnUnsubscribeMessage(unsubscribe);
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, SendDatagram) {
FullTrackName ftn("foo", "bar");
std::shared_ptr<MockTrackPublisher> track_publisher = SetupPublisher(
ftn, MoqtForwardingPreference::kDatagram, FullSequence{4, 0});
MoqtObjectListener* listener =
MoqtSessionPeer::AddSubscription(&session_, track_publisher, 0, 2, 5, 0);
bool correct_message = false;
uint8_t kExpectedMessage[] = {
0x01, 0x00, 0x02, 0x05, 0x00, 0x00, 0x08, 0x64,
0x65, 0x61, 0x64, 0x62, 0x65, 0x65, 0x66,
};
EXPECT_CALL(mock_session_, SendOrQueueDatagram(_))
.WillOnce([&](absl::string_view datagram) {
if (datagram.size() == sizeof(kExpectedMessage)) {
correct_message = (0 == memcmp(datagram.data(), kExpectedMessage,
sizeof(kExpectedMessage)));
}
return webtransport::DatagramStatus(
webtransport::DatagramStatusCode::kSuccess, "");
});
EXPECT_CALL(*track_publisher, GetCachedObject(FullSequence{5, 0}))
.WillRepeatedly([] {
return PublishedObject{FullSequence{5, 0}, MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")};
});
listener->OnNewObjectAvailable(FullSequence(5, 0));
EXPECT_TRUE(correct_message);
}
TEST_F(MoqtSessionTest, ReceiveDatagram) {
MockRemoteTrackVisitor visitor_;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSessionPeer::CreateRemoteTrack(&session_, ftn, &visitor_, 2);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kDatagram,
std::nullopt,
8,
};
char datagram[] = {0x01, 0x01, 0x02, 0x00, 0x00, 0x00, 0x08, 0x64,
0x65, 0x61, 0x64, 0x62, 0x65, 0x65, 0x66};
EXPECT_CALL(visitor_,
OnObjectFragment(ftn, object.group_id, object.object_id,
object.publisher_priority, object.object_status,
object.forwarding_preference, payload, true))
.Times(1);
session_.OnDatagramReceived(absl::string_view(datagram, sizeof(datagram)));
}
TEST_F(MoqtSessionTest, ForwardingPreferenceMismatch) {
MockRemoteTrackVisitor visitor_;
FullTrackName ftn("foo", "bar");
std::string payload = "deadbeef";
MoqtSessionPeer::CreateRemoteTrack(&session_, ftn, &visitor_, 2);
MoqtObject object = {
1,
2,
0,
0,
0,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
0,
8,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtDataParserVisitor> object_stream =
MoqtSessionPeer::CreateIncomingDataStream(&session_, &mock_stream);
EXPECT_CALL(visitor_, OnObjectFragment(_, _, _, _, _, _, _, _)).Times(1);
EXPECT_CALL(mock_stream, GetStreamId())
.WillRepeatedly(Return(kIncomingUniStreamId));
object_stream->OnObjectMessage(object, payload, true);
++object.object_id;
object.forwarding_preference = MoqtForwardingPreference::kTrack;
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Forwarding preference changes mid-track"))
.Times(1);
object_stream->OnObjectMessage(object, payload, true);
}
TEST_F(MoqtSessionTest, AnnounceToPublisher) {
MoqtSessionPeer::set_peer_role(&session_, MoqtRole::kPublisher);
testing::MockFunction<void(
FullTrackName track_namespace,
std::optional<MoqtAnnounceErrorReason> error_message)>
announce_resolved_callback;
EXPECT_CALL(announce_resolved_callback, Call(_, _)).Times(1);
session_.Announce(FullTrackName{"foo"},
announce_resolved_callback.AsStdFunction());
}
TEST_F(MoqtSessionTest, SubscribeFromPublisher) {
MoqtSessionPeer::set_peer_role(&session_, MoqtRole::kPublisher);
MoqtSubscribe request = {
1,
2,
FullTrackName({"foo", "bar"}),
0x80,
std::nullopt,
0,
0,
std::nullopt,
std::nullopt,
MoqtSubscribeParameters(),
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Received SUBSCRIBE from publisher"))
.Times(1);
EXPECT_CALL(session_callbacks_.session_terminated_callback, Call(_)).Times(1);
stream_input->OnSubscribeMessage(request);
}
TEST_F(MoqtSessionTest, AnnounceFromSubscriber) {
MoqtSessionPeer::set_peer_role(&session_, MoqtRole::kSubscriber);
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtControlParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
MoqtAnnounce announce = {
FullTrackName{"foo"},
};
EXPECT_CALL(mock_session_,
CloseSession(static_cast<uint64_t>(MoqtError::kProtocolViolation),
"Received ANNOUNCE from Subscriber"))
.Times(1);
EXPECT_CALL(session_callbacks_.session_terminated_callback, Call(_)).Times(1);
stream_input->OnAnnounceMessage(announce);
}
TEST_F(MoqtSessionTest, QueuedStreamsOpenedInOrder) {
FullTrackName ftn("foo", "bar");
auto track = SetupPublisher(ftn, MoqtForwardingPreference::kSubgroup,
FullSequence(0, 0));
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kNotYetBegun));
MoqtObjectListener* subscription =
MoqtSessionPeer::AddSubscription(&session_, track, 0, 14, 0, 0);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(false))
.WillOnce(Return(false))
.WillOnce(Return(false));
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kInProgress));
subscription->OnNewObjectAvailable(FullSequence(1, 0));
subscription->OnNewObjectAvailable(FullSequence(0, 0));
subscription->OnNewObjectAvailable(FullSequence(2, 0));
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillRepeatedly(Return(true));
webtransport::test::MockStream mock_stream0, mock_stream1, mock_stream2;
EXPECT_CALL(mock_session_, OpenOutgoingUnidirectionalStream())
.WillOnce(Return(&mock_stream0))
.WillOnce(Return(&mock_stream1))
.WillOnce(Return(&mock_stream2));
std::unique_ptr<webtransport::StreamVisitor> stream_visitor[3];
EXPECT_CALL(mock_stream0, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor[0] = std::move(visitor);
});
EXPECT_CALL(mock_stream1, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor[1] = std::move(visitor);
});
EXPECT_CALL(mock_stream2, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor[2] = std::move(visitor);
});
EXPECT_CALL(mock_stream0, GetStreamId()).WillRepeatedly(Return(0));
EXPECT_CALL(mock_stream1, GetStreamId()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_stream2, GetStreamId()).WillRepeatedly(Return(2));
EXPECT_CALL(mock_stream0, visitor()).WillOnce([&]() {
return stream_visitor[0].get();
});
EXPECT_CALL(mock_stream1, visitor()).WillOnce([&]() {
return stream_visitor[1].get();
});
EXPECT_CALL(mock_stream2, visitor()).WillOnce([&]() {
return stream_visitor[2].get();
});
EXPECT_CALL(*track, GetCachedObject(FullSequence(0, 0)))
.WillOnce(
Return(PublishedObject{FullSequence(0, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")}));
EXPECT_CALL(*track, GetCachedObject(FullSequence(0, 1)))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*track, GetCachedObject(FullSequence(1, 0)))
.WillOnce(
Return(PublishedObject{FullSequence(1, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")}));
EXPECT_CALL(*track, GetCachedObject(FullSequence(1, 1)))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*track, GetCachedObject(FullSequence(2, 0)))
.WillOnce(
Return(PublishedObject{FullSequence(2, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")}));
EXPECT_CALL(*track, GetCachedObject(FullSequence(2, 1)))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(mock_stream0, CanWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(mock_stream1, CanWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(mock_stream2, CanWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(mock_stream0, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
EXPECT_EQ(static_cast<const uint8_t>(data[0][4]), 0);
return absl::OkStatus();
});
EXPECT_CALL(mock_stream1, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
EXPECT_EQ(static_cast<const uint8_t>(data[0][3]), 1);
return absl::OkStatus();
});
EXPECT_CALL(mock_stream2, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
EXPECT_EQ(static_cast<const uint8_t>(data[0][3]), 2);
return absl::OkStatus();
});
session_.OnCanCreateNewOutgoingUnidirectionalStream();
}
TEST_F(MoqtSessionTest, StreamQueuedForSubscriptionThatDoesntExist) {
FullTrackName ftn("foo", "bar");
auto track = SetupPublisher(ftn, MoqtForwardingPreference::kSubgroup,
FullSequence(0, 0));
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kNotYetBegun));
MoqtObjectListener* subscription =
MoqtSessionPeer::AddSubscription(&session_, track, 0, 14, 0, 0);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(false));
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kInProgress));
subscription->OnNewObjectAvailable(FullSequence(0, 0));
MoqtSessionPeer::DeleteSubscription(&session_, 0);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillRepeatedly(Return(true));
EXPECT_CALL(mock_session_, OpenOutgoingUnidirectionalStream()).Times(0);
session_.OnCanCreateNewOutgoingUnidirectionalStream();
}
TEST_F(MoqtSessionTest, QueuedStreamPriorityChanged) {
FullTrackName ftn("foo", "bar");
auto track = SetupPublisher(ftn, MoqtForwardingPreference::kSubgroup,
FullSequence(0, 0));
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kNotYetBegun));
MoqtObjectListener* subscription0 =
MoqtSessionPeer::AddSubscription(&session_, track, 0, 14, 0, 0);
MoqtObjectListener* subscription1 =
MoqtSessionPeer::AddSubscription(&session_, track, 1, 14, 0, 0);
MoqtSessionPeer::UpdateSubscriberPriority(&session_, 0, 1);
MoqtSessionPeer::UpdateSubscriberPriority(&session_, 1, 2);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(false))
.WillOnce(Return(false))
.WillOnce(Return(false))
.WillOnce(Return(false));
EXPECT_CALL(*track, GetTrackStatus())
.WillRepeatedly(Return(MoqtTrackStatusCode::kInProgress));
subscription0->OnNewObjectAvailable(FullSequence(0, 0));
subscription1->OnNewObjectAvailable(FullSequence(0, 0));
subscription0->OnNewObjectAvailable(FullSequence(1, 0));
subscription1->OnNewObjectAvailable(FullSequence(1, 0));
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(true))
.WillOnce(Return(false));
webtransport::test::MockStream mock_stream0;
EXPECT_CALL(mock_session_, OpenOutgoingUnidirectionalStream())
.WillOnce(Return(&mock_stream0));
std::unique_ptr<webtransport::StreamVisitor> stream_visitor0;
EXPECT_CALL(mock_stream0, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor0 = std::move(visitor);
});
EXPECT_CALL(mock_stream0, GetStreamId()).WillRepeatedly(Return(0));
EXPECT_CALL(mock_stream0, visitor()).WillOnce([&]() {
return stream_visitor0.get();
});
EXPECT_CALL(*track, GetCachedObject(FullSequence(0, 0)))
.WillOnce(
Return(PublishedObject{FullSequence(0, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")}));
EXPECT_CALL(*track, GetCachedObject(FullSequence(0, 1)))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(mock_stream0, CanWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(mock_stream0, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
EXPECT_EQ(static_cast<const uint8_t>(data[0][1]), 0);
EXPECT_EQ(static_cast<const uint8_t>(data[0][3]), 0);
return absl::OkStatus();
});
session_.OnCanCreateNewOutgoingUnidirectionalStream();
MoqtSessionPeer::UpdateSubscriberPriority(&session_, 1, 0);
EXPECT_CALL(mock_session_, CanOpenNextOutgoingUnidirectionalStream())
.WillOnce(Return(true))
.WillRepeatedly(Return(false));
webtransport::test::MockStream mock_stream1;
EXPECT_CALL(mock_session_, OpenOutgoingUnidirectionalStream())
.WillOnce(Return(&mock_stream1));
std::unique_ptr<webtransport::StreamVisitor> stream_visitor1;
EXPECT_CALL(mock_stream1, SetVisitor(_))
.WillOnce([&](std::unique_ptr<webtransport::StreamVisitor> visitor) {
stream_visitor1 = std::move(visitor);
});
EXPECT_CALL(mock_stream1, GetStreamId()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_stream1, visitor()).WillOnce([&]() {
return stream_visitor1.get();
});
EXPECT_CALL(*track, GetCachedObject(FullSequence(0, 0)))
.WillOnce(
Return(PublishedObject{FullSequence(0, 0), MoqtObjectStatus::kNormal,
MemSliceFromString("deadbeef")}));
EXPECT_CALL(*track, GetCachedObject(FullSequence(0, 1)))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(mock_stream1, CanWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(mock_stream1, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
EXPECT_EQ(static_cast<const uint8_t>(data[0][1]), 1);
EXPECT_EQ(static_cast<const uint8_t>(data[0][3]), 0);
return absl::OkStatus();
});
session_.OnCanCreateNewOutgoingUnidirectionalStream();
}
#if 0
TEST_F(MoqtSessionTest, SubscribeUpdateClosesSubscription) {
MoqtSessionPeer::set_peer_role(&session_, MoqtRole::kSubscriber);
FullTrackName ftn("foo", "bar");
MockLocalTrackVisitor track_visitor;
session_.AddLocalTrack(ftn, MoqtForwardingPreference::kTrack, &track_visitor);
MoqtSessionPeer::AddSubscription(&session_, ftn, 0, 2, 5, 0);
LocalTrack* track = MoqtSessionPeer::local_track(&session_, ftn);
track->GetWindow(0)->OnObjectSent(FullSequence(7, 3),
MoqtObjectStatus::kNormal);
MoqtSubscribeUpdate update = {
0,
5,
0,
7,
3,
};
webtransport::test::MockStream mock_stream;
std::unique_ptr<MoqtParserVisitor> stream_input =
MoqtSessionPeer::CreateControlStream(&session_, &mock_stream);
EXPECT_CALL(mock_session_, GetStreamById(4)).WillOnce(Return(&mock_stream));
bool correct_message = false;
EXPECT_CALL(mock_stream, Writev(_, _))
.WillOnce([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
correct_message = true;
EXPECT_EQ(*ExtractMessageType(data[0]),
MoqtMessageType::kSubscribeDone);
return absl::OkStatus();
});
stream_input->OnSubscribeUpdateMessage(update);
EXPECT_TRUE(correct_message);
EXPECT_FALSE(session_.HasSubscribers(ftn));
}
#endif
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_session.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_session_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
83a1dfda-633f-4317-b2d6-68d361a98e54 | cpp | tensorflow/tensorflow | cross_op | tensorflow/compiler/tf2xla/kernels/cross_op.cc | tensorflow/core/kernels/cross_op_test.cc | #include <vector>
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
namespace tensorflow {
namespace {
class CrossOp : public XlaOpKernel {
public:
explicit CrossOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape in0_shape = ctx->InputShape(0);
TensorShape in1_shape = ctx->InputShape(1);
OP_REQUIRES(ctx, in0_shape == in1_shape,
errors::InvalidArgument("Both inputs must be of same shape: ",
in0_shape.DebugString(), " vs. ",
in1_shape.DebugString()));
OP_REQUIRES(ctx, in0_shape.dims() >= 1,
errors::InvalidArgument("Input must be at least 1D",
in0_shape.DebugString()));
auto inner_dim = in0_shape.dim_size(in0_shape.dims() - 1);
OP_REQUIRES(ctx, inner_dim == 3,
errors::FailedPrecondition(
"Cross-products are only defined for 3-element vectors."));
std::vector<int64_t> starts(in0_shape.dims(), 0);
std::vector<int64_t> limits;
const auto& dim_sizes = in0_shape.dim_sizes();
limits.reserve(dim_sizes.size());
for (auto dim_size : in0_shape.dim_sizes()) {
limits.push_back(dim_size);
}
std::vector<int64_t> strides(in0_shape.dims(), 1);
xla::XlaBuilder* b = ctx->builder();
auto in0 = ctx->Input(0);
auto in1 = ctx->Input(1);
starts.back() = 0;
limits.back() = 1;
auto u1 = xla::Slice(in0, starts, limits, strides);
auto v1 = xla::Slice(in1, starts, limits, strides);
starts.back() = 1;
limits.back() = 2;
auto u2 = xla::Slice(in0, starts, limits, strides);
auto v2 = xla::Slice(in1, starts, limits, strides);
starts.back() = 2;
limits.back() = 3;
auto u3 = xla::Slice(in0, starts, limits, strides);
auto v3 = xla::Slice(in1, starts, limits, strides);
auto s1 = xla::Sub(xla::Mul(u2, v3), xla::Mul(u3, v2));
auto s2 = xla::Sub(xla::Mul(u3, v1), xla::Mul(u1, v3));
auto s3 = xla::Sub(xla::Mul(u1, v2), xla::Mul(u2, v1));
auto output = xla::ConcatInDim(b, {s1, s2, s3}, in0_shape.dims() - 1);
ctx->SetOutput(0, output);
}
private:
CrossOp(const CrossOp&) = delete;
void operator=(const CrossOp&) = delete;
};
REGISTER_XLA_OP(Name("Cross"), CrossOp);
}
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class CrossOpTest : public OpsTestBase {
protected:
CrossOpTest() {
TF_EXPECT_OK(NodeDefBuilder("cross_op", "Cross")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(CrossOpTest, Zero) {
AddInputFromArray<float>(TensorShape({3}), {0, 0, 0});
AddInputFromArray<float>(TensorShape({3}), {0, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {0, 0, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CrossOpTest, RightHandRule) {
AddInputFromArray<float>(TensorShape({2, 3}), {1, 0, 0, 0, 1, 0});
AddInputFromArray<float>(TensorShape({2, 3}), {0, 1, 0, 1, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected, {{0, 0, 1, 0, 0, -1}});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(CrossOpTest, ArbitraryNonintegral) {
const float u1 = -0.669, u2 = -0.509, u3 = 0.125;
const float v1 = -0.477, v2 = 0.592, v3 = -0.110;
const float s1 = u2 * v3 - u3 * v2;
const float s2 = u3 * v1 - u1 * v3;
const float s3 = u1 * v2 - u2 * v1;
AddInputFromArray<float>(TensorShape({3}), {u1, u2, u3});
AddInputFromArray<float>(TensorShape({3}), {v1, v2, v3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({3}));
test::FillValues<float>(&expected, {s1, s2, s3});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 1e-6);
}
class CrossOpIntTest : public OpsTestBase {
protected:
CrossOpIntTest() {
TF_EXPECT_OK(NodeDefBuilder("cross_int_op", "Cross")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
};
TEST_F(CrossOpIntTest, RightHandRule) {
AddInputFromArray<int>(TensorShape({2, 3}), {2, 0, 0, 0, 2, 0});
AddInputFromArray<int>(TensorShape({2, 3}), {0, 2, 0, 2, 0, 0});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int>(&expected, {{0, 0, 4, 0, 0, -4}});
test::ExpectTensorEqual<int>(expected, *GetOutput(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/cross_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/cross_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe34d6ce-f548-4ce9-b75b-ba93249e22dd | cpp | tensorflow/tensorflow | autotune_result_wrapper | third_party/xla/xla/autotune_result_wrapper.cc | third_party/xla/xla/autotune_result_wrapper_test.cc | #include "xla/autotune_result_wrapper.h"
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
namespace xla {
absl::StatusOr<AutotuneResultWrapper>
AutotuneResultWrapper::FromKeyAndValue(OpaqueKey key, OpaqueValue value) {
AutotuneResults key_proto;
if (!key_proto.ParseFromString(key)) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Could not parse the provided key");
}
AutotuneResults::Entry value_entry;
if (!value_entry.ParseFromString(value)) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Could not parse the provided value");
}
AutotuneResults::Entry full_entry;
full_entry.set_device(key_proto.results(0).device());
full_entry.set_hlo(key_proto.results(0).hlo());
*full_entry.mutable_result() = value_entry.result();
return AutotuneResultWrapper(full_entry, key_proto.version());
}
AutotuneResultWrapper::OpaqueKey AutotuneResultWrapper::Key() const {
AutotuneResults key_proto;
key_proto.set_version(version_);
auto entry = key_proto.add_results();
entry->set_device(autotune_result_.device());
entry->set_hlo(autotune_result_.hlo());
OpaqueKey serialized;
CHECK(tsl::SerializeToStringDeterministic(key_proto, &serialized));
return serialized;
}
AutotuneResultWrapper::OpaqueValue AutotuneResultWrapper::Value() const {
AutotuneResults::Entry entry;
*entry.mutable_result() = autotune_result_.result();
OpaqueValue serialized;
CHECK(tsl::SerializeToStringDeterministic(entry, &serialized));
return serialized;
}
std::vector<AutotuneResultWrapper>
AutotuneResultWrapper::AutotuneResultsToWrappers(
const AutotuneResults& autotune_results) {
std::vector<AutotuneResultWrapper> wrappers;
wrappers.reserve(autotune_results.results_size());
for (const auto& result : autotune_results.results()) {
wrappers.push_back(
AutotuneResultWrapper(result, autotune_results.version()));
}
return wrappers;
}
absl::StatusOr<AutotuneResults>
AutotuneResultWrapper::AutotuneResultsFromWrappers(
const std::vector<AutotuneResultWrapper>& wrappers) {
AutotuneResults autotune_results;
for (const auto& wrapper : wrappers) {
if (autotune_results.results_size() > 0 &&
autotune_results.version() != wrapper.version_) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"All wrappers must have the same version number");
}
*autotune_results.add_results() = wrapper.autotune_result_;
autotune_results.set_version(wrapper.version_);
}
return autotune_results;
}
} | #include "xla/autotune_result_wrapper.h"
#include <cstdint>
#include <utility>
#include <vector>
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
AutotuneResults ThreeAutotuneEntries(int32_t version) {
AutotuneResults results;
results.set_version(version);
auto r1 = results.add_results();
r1->set_device("dev1");
r1->set_hlo("hlo1");
r1->mutable_result()->set_scratch_bytes(1);
auto r2 = results.add_results();
r2->set_device("dev2");
r2->set_hlo("hlo2");
r2->mutable_result()->set_scratch_bytes(2);
auto r3 = results.add_results();
r3->set_device("dev3");
r3->set_hlo("hlo3");
r3->mutable_result()->set_scratch_bytes(3);
return results;
}
TEST(AutotuneResultWrapperTest, FullRoundTrip) {
std::vector<AutotuneResultWrapper> wrappers =
AutotuneResultWrapper::AutotuneResultsToWrappers(
ThreeAutotuneEntries(42));
std::vector<std::pair<AutotuneResultWrapper::OpaqueKey,
AutotuneResultWrapper::OpaqueValue>>
key_value_pairs;
for (const auto& wrapper : wrappers) {
key_value_pairs.push_back(std::make_pair(wrapper.Key(), wrapper.Value()));
}
std::vector<AutotuneResultWrapper> new_wrappers;
for (const auto& [key, value] : key_value_pairs) {
TF_ASSERT_OK_AND_ASSIGN(AutotuneResultWrapper wrapper,
AutotuneResultWrapper::FromKeyAndValue(key, value));
new_wrappers.push_back(std::move(wrapper));
}
TF_ASSERT_OK_AND_ASSIGN(
AutotuneResults round_tripped,
AutotuneResultWrapper::AutotuneResultsFromWrappers(new_wrappers));
EXPECT_EQ(round_tripped.results_size(), 3);
EXPECT_EQ(round_tripped.version(), 42);
EXPECT_EQ(round_tripped.results(0).device(), "dev1");
EXPECT_EQ(round_tripped.results(0).hlo(), "hlo1");
EXPECT_EQ(round_tripped.results(0).result().scratch_bytes(), 1);
EXPECT_EQ(round_tripped.results(1).device(), "dev2");
EXPECT_EQ(round_tripped.results(1).hlo(), "hlo2");
EXPECT_EQ(round_tripped.results(1).result().scratch_bytes(), 2);
EXPECT_EQ(round_tripped.results(2).device(), "dev3");
EXPECT_EQ(round_tripped.results(2).hlo(), "hlo3");
EXPECT_EQ(round_tripped.results(2).result().scratch_bytes(), 3);
}
TEST(AutotuneResultWrapperTest, InconsistentVersions) {
std::vector<AutotuneResultWrapper> wrappers =
AutotuneResultWrapper::AutotuneResultsToWrappers(
ThreeAutotuneEntries(42));
auto inconsistent_wrappers = AutotuneResultWrapper::AutotuneResultsToWrappers(
ThreeAutotuneEntries(43));
wrappers.insert(wrappers.end(), inconsistent_wrappers.begin(),
inconsistent_wrappers.end());
std::vector<std::pair<AutotuneResultWrapper::OpaqueKey,
AutotuneResultWrapper::OpaqueValue>>
key_value_pairs;
for (const auto& wrapper : wrappers) {
key_value_pairs.push_back(std::make_pair(wrapper.Key(), wrapper.Value()));
}
std::vector<AutotuneResultWrapper> decoded_wrappers;
for (const auto& [key, value] : key_value_pairs) {
TF_ASSERT_OK_AND_ASSIGN(AutotuneResultWrapper wrapper,
AutotuneResultWrapper::FromKeyAndValue(key, value));
decoded_wrappers.push_back(std::move(wrapper));
}
EXPECT_IS_NOT_OK(
AutotuneResultWrapper::AutotuneResultsFromWrappers(decoded_wrappers));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/autotune_result_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/autotune_result_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93c98b2f-f94c-4951-b330-76c929dd1705 | cpp | abseil/abseil-cpp | absl_check | absl/log/absl_check.h | absl/log/absl_check_test.cc | #ifndef ABSL_LOG_ABSL_CHECK_H_
#define ABSL_LOG_ABSL_CHECK_H_
#include "absl/log/internal/check_impl.h"
#define ABSL_CHECK(condition) \
ABSL_LOG_INTERNAL_CHECK_IMPL((condition), #condition)
#define ABSL_QCHECK(condition) \
ABSL_LOG_INTERNAL_QCHECK_IMPL((condition), #condition)
#define ABSL_PCHECK(condition) \
ABSL_LOG_INTERNAL_PCHECK_IMPL((condition), #condition)
#define ABSL_DCHECK(condition) \
ABSL_LOG_INTERNAL_DCHECK_IMPL((condition), #condition)
#define ABSL_CHECK_EQ(val1, val2) \
ABSL_LOG_INTERNAL_CHECK_EQ_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_NE(val1, val2) \
ABSL_LOG_INTERNAL_CHECK_NE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_LE(val1, val2) \
ABSL_LOG_INTERNAL_CHECK_LE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_LT(val1, val2) \
ABSL_LOG_INTERNAL_CHECK_LT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_GE(val1, val2) \
ABSL_LOG_INTERNAL_CHECK_GE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_GT(val1, val2) \
ABSL_LOG_INTERNAL_CHECK_GT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_EQ(val1, val2) \
ABSL_LOG_INTERNAL_QCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_NE(val1, val2) \
ABSL_LOG_INTERNAL_QCHECK_NE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_LE(val1, val2) \
ABSL_LOG_INTERNAL_QCHECK_LE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_LT(val1, val2) \
ABSL_LOG_INTERNAL_QCHECK_LT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_GE(val1, val2) \
ABSL_LOG_INTERNAL_QCHECK_GE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_QCHECK_GT(val1, val2) \
ABSL_LOG_INTERNAL_QCHECK_GT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_EQ(val1, val2) \
ABSL_LOG_INTERNAL_DCHECK_EQ_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_NE(val1, val2) \
ABSL_LOG_INTERNAL_DCHECK_NE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_LE(val1, val2) \
ABSL_LOG_INTERNAL_DCHECK_LE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_LT(val1, val2) \
ABSL_LOG_INTERNAL_DCHECK_LT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_GE(val1, val2) \
ABSL_LOG_INTERNAL_DCHECK_GE_IMPL((val1), #val1, (val2), #val2)
#define ABSL_DCHECK_GT(val1, val2) \
ABSL_LOG_INTERNAL_DCHECK_GT_IMPL((val1), #val1, (val2), #val2)
#define ABSL_CHECK_OK(status) ABSL_LOG_INTERNAL_CHECK_OK_IMPL((status), #status)
#define ABSL_QCHECK_OK(status) \
ABSL_LOG_INTERNAL_QCHECK_OK_IMPL((status), #status)
#define ABSL_DCHECK_OK(status) \
ABSL_LOG_INTERNAL_DCHECK_OK_IMPL((status), #status)
#define ABSL_CHECK_STREQ(s1, s2) \
ABSL_LOG_INTERNAL_CHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_CHECK_STRNE(s1, s2) \
ABSL_LOG_INTERNAL_CHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_CHECK_STRCASEEQ(s1, s2) \
ABSL_LOG_INTERNAL_CHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_CHECK_STRCASENE(s1, s2) \
ABSL_LOG_INTERNAL_CHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_QCHECK_STREQ(s1, s2) \
ABSL_LOG_INTERNAL_QCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_QCHECK_STRNE(s1, s2) \
ABSL_LOG_INTERNAL_QCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_QCHECK_STRCASEEQ(s1, s2) \
ABSL_LOG_INTERNAL_QCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_QCHECK_STRCASENE(s1, s2) \
ABSL_LOG_INTERNAL_QCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_DCHECK_STREQ(s1, s2) \
ABSL_LOG_INTERNAL_DCHECK_STREQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_DCHECK_STRNE(s1, s2) \
ABSL_LOG_INTERNAL_DCHECK_STRNE_IMPL((s1), #s1, (s2), #s2)
#define ABSL_DCHECK_STRCASEEQ(s1, s2) \
ABSL_LOG_INTERNAL_DCHECK_STRCASEEQ_IMPL((s1), #s1, (s2), #s2)
#define ABSL_DCHECK_STRCASENE(s1, s2) \
ABSL_LOG_INTERNAL_DCHECK_STRCASENE_IMPL((s1), #s1, (s2), #s2)
#endif | #include "absl/log/absl_check.h"
#define ABSL_TEST_CHECK ABSL_CHECK
#define ABSL_TEST_CHECK_OK ABSL_CHECK_OK
#define ABSL_TEST_CHECK_EQ ABSL_CHECK_EQ
#define ABSL_TEST_CHECK_NE ABSL_CHECK_NE
#define ABSL_TEST_CHECK_GE ABSL_CHECK_GE
#define ABSL_TEST_CHECK_LE ABSL_CHECK_LE
#define ABSL_TEST_CHECK_GT ABSL_CHECK_GT
#define ABSL_TEST_CHECK_LT ABSL_CHECK_LT
#define ABSL_TEST_CHECK_STREQ ABSL_CHECK_STREQ
#define ABSL_TEST_CHECK_STRNE ABSL_CHECK_STRNE
#define ABSL_TEST_CHECK_STRCASEEQ ABSL_CHECK_STRCASEEQ
#define ABSL_TEST_CHECK_STRCASENE ABSL_CHECK_STRCASENE
#define ABSL_TEST_DCHECK ABSL_DCHECK
#define ABSL_TEST_DCHECK_OK ABSL_DCHECK_OK
#define ABSL_TEST_DCHECK_EQ ABSL_DCHECK_EQ
#define ABSL_TEST_DCHECK_NE ABSL_DCHECK_NE
#define ABSL_TEST_DCHECK_GE ABSL_DCHECK_GE
#define ABSL_TEST_DCHECK_LE ABSL_DCHECK_LE
#define ABSL_TEST_DCHECK_GT ABSL_DCHECK_GT
#define ABSL_TEST_DCHECK_LT ABSL_DCHECK_LT
#define ABSL_TEST_DCHECK_STREQ ABSL_DCHECK_STREQ
#define ABSL_TEST_DCHECK_STRNE ABSL_DCHECK_STRNE
#define ABSL_TEST_DCHECK_STRCASEEQ ABSL_DCHECK_STRCASEEQ
#define ABSL_TEST_DCHECK_STRCASENE ABSL_DCHECK_STRCASENE
#define ABSL_TEST_QCHECK ABSL_QCHECK
#define ABSL_TEST_QCHECK_OK ABSL_QCHECK_OK
#define ABSL_TEST_QCHECK_EQ ABSL_QCHECK_EQ
#define ABSL_TEST_QCHECK_NE ABSL_QCHECK_NE
#define ABSL_TEST_QCHECK_GE ABSL_QCHECK_GE
#define ABSL_TEST_QCHECK_LE ABSL_QCHECK_LE
#define ABSL_TEST_QCHECK_GT ABSL_QCHECK_GT
#define ABSL_TEST_QCHECK_LT ABSL_QCHECK_LT
#define ABSL_TEST_QCHECK_STREQ ABSL_QCHECK_STREQ
#define ABSL_TEST_QCHECK_STRNE ABSL_QCHECK_STRNE
#define ABSL_TEST_QCHECK_STRCASEEQ ABSL_QCHECK_STRCASEEQ
#define ABSL_TEST_QCHECK_STRCASENE ABSL_QCHECK_STRCASENE
#include "gtest/gtest.h"
#include "absl/log/check_test_impl.inc" | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/absl_check.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/absl_check_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
3f3c60b9-ff73-42be-a9ee-70a0a1e549dc | cpp | google/cel-cpp | cel_proto_wrapper | eval/public/structs/cel_proto_wrapper.cc | eval/public/structs/cel_proto_wrapper_test.cc | #include "eval/public/structs/cel_proto_wrapper.h"
#include "absl/types/optional.h"
#include "eval/public/cel_value.h"
#include "eval/public/message_wrapper.h"
#include "eval/public/structs/cel_proto_wrap_util.h"
#include "eval/public/structs/proto_message_type_adapter.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace google::api::expr::runtime {
namespace {
using ::google::protobuf::Arena;
using ::google::protobuf::Descriptor;
using ::google::protobuf::Message;
}
CelValue CelProtoWrapper::InternalWrapMessage(const Message* message) {
return CelValue::CreateMessageWrapper(
MessageWrapper(message, &GetGenericProtoTypeInfoInstance()));
}
CelValue CelProtoWrapper::CreateMessage(const Message* value, Arena* arena) {
return internal::UnwrapMessageToValue(value, &InternalWrapMessage, arena);
}
absl::optional<CelValue> CelProtoWrapper::MaybeWrapValue(
const Descriptor* descriptor, google::protobuf::MessageFactory* factory,
const CelValue& value, Arena* arena) {
const Message* msg =
internal::MaybeWrapValueToMessage(descriptor, factory, value, arena);
if (msg != nullptr) {
return InternalWrapMessage(msg);
} else {
return absl::nullopt;
}
}
} | #include "eval/public/structs/cel_proto_wrapper.h"
#include <cassert>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/protobuf/any.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/empty.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/dynamic_message.h"
#include "google/protobuf/message.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/proto_time_encoding.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google::api::expr::runtime {
namespace {
using ::testing::Eq;
using ::testing::UnorderedPointwise;
using google::protobuf::Duration;
using google::protobuf::ListValue;
using google::protobuf::Struct;
using google::protobuf::Timestamp;
using google::protobuf::Value;
using google::protobuf::Any;
using google::protobuf::BoolValue;
using google::protobuf::BytesValue;
using google::protobuf::DoubleValue;
using google::protobuf::FloatValue;
using google::protobuf::Int32Value;
using google::protobuf::Int64Value;
using google::protobuf::StringValue;
using google::protobuf::UInt32Value;
using google::protobuf::UInt64Value;
using google::protobuf::Arena;
class CelProtoWrapperTest : public ::testing::Test {
protected:
CelProtoWrapperTest() {}
void ExpectWrappedMessage(const CelValue& value,
const google::protobuf::Message& message) {
auto result = CelProtoWrapper::MaybeWrapValue(
message.GetDescriptor(), message.GetReflection()->GetMessageFactory(),
value, arena());
EXPECT_TRUE(result.has_value());
EXPECT_TRUE((*result).IsMessage());
EXPECT_THAT((*result).MessageOrDie(), testutil::EqualsProto(message));
auto identity = CelProtoWrapper::MaybeWrapValue(
message.GetDescriptor(), message.GetReflection()->GetMessageFactory(),
*result, arena());
EXPECT_FALSE(identity.has_value());
result = CelProtoWrapper::MaybeWrapValue(
ReflectedCopy(message)->GetDescriptor(),
ReflectedCopy(message)->GetReflection()->GetMessageFactory(), value,
arena());
EXPECT_TRUE(result.has_value());
EXPECT_TRUE((*result).IsMessage());
EXPECT_THAT((*result).MessageOrDie(), testutil::EqualsProto(message));
}
void ExpectNotWrapped(const CelValue& value, const google::protobuf::Message& message) {
auto result = CelProtoWrapper::MaybeWrapValue(
message.GetDescriptor(), message.GetReflection()->GetMessageFactory(),
value, arena());
EXPECT_FALSE(result.has_value());
}
template <class T>
void ExpectUnwrappedPrimitive(const google::protobuf::Message& message, T result) {
CelValue cel_value = CelProtoWrapper::CreateMessage(&message, arena());
T value;
EXPECT_TRUE(cel_value.GetValue(&value));
EXPECT_THAT(value, Eq(result));
T dyn_value;
CelValue cel_dyn_value =
CelProtoWrapper::CreateMessage(ReflectedCopy(message).get(), arena());
EXPECT_THAT(cel_dyn_value.type(), Eq(cel_value.type()));
EXPECT_TRUE(cel_dyn_value.GetValue(&dyn_value));
EXPECT_THAT(value, Eq(dyn_value));
}
void ExpectUnwrappedMessage(const google::protobuf::Message& message,
google::protobuf::Message* result) {
CelValue cel_value = CelProtoWrapper::CreateMessage(&message, arena());
if (result == nullptr) {
EXPECT_TRUE(cel_value.IsNull());
return;
}
EXPECT_TRUE(cel_value.IsMessage());
EXPECT_THAT(cel_value.MessageOrDie(), testutil::EqualsProto(*result));
}
std::unique_ptr<google::protobuf::Message> ReflectedCopy(
const google::protobuf::Message& message) {
std::unique_ptr<google::protobuf::Message> dynamic_value(
factory_.GetPrototype(message.GetDescriptor())->New());
dynamic_value->CopyFrom(message);
return dynamic_value;
}
Arena* arena() { return &arena_; }
private:
Arena arena_;
google::protobuf::DynamicMessageFactory factory_;
};
TEST_F(CelProtoWrapperTest, TestType) {
Duration msg_duration;
msg_duration.set_seconds(2);
msg_duration.set_nanos(3);
CelValue value_duration1 = CelProtoWrapper::CreateDuration(&msg_duration);
EXPECT_THAT(value_duration1.type(), Eq(CelValue::Type::kDuration));
CelValue value_duration2 =
CelProtoWrapper::CreateMessage(&msg_duration, arena());
EXPECT_THAT(value_duration2.type(), Eq(CelValue::Type::kDuration));
Timestamp msg_timestamp;
msg_timestamp.set_seconds(2);
msg_timestamp.set_nanos(3);
CelValue value_timestamp1 = CelProtoWrapper::CreateTimestamp(&msg_timestamp);
EXPECT_THAT(value_timestamp1.type(), Eq(CelValue::Type::kTimestamp));
CelValue value_timestamp2 =
CelProtoWrapper::CreateMessage(&msg_timestamp, arena());
EXPECT_THAT(value_timestamp2.type(), Eq(CelValue::Type::kTimestamp));
}
TEST_F(CelProtoWrapperTest, TestDuration) {
Duration msg_duration;
msg_duration.set_seconds(2);
msg_duration.set_nanos(3);
CelValue value_duration1 = CelProtoWrapper::CreateDuration(&msg_duration);
EXPECT_THAT(value_duration1.type(), Eq(CelValue::Type::kDuration));
CelValue value_duration2 =
CelProtoWrapper::CreateMessage(&msg_duration, arena());
EXPECT_THAT(value_duration2.type(), Eq(CelValue::Type::kDuration));
CelValue value = CelProtoWrapper::CreateDuration(&msg_duration);
EXPECT_TRUE(value.IsDuration());
Duration out;
auto status = cel::internal::EncodeDuration(value.DurationOrDie(), &out);
EXPECT_TRUE(status.ok());
EXPECT_THAT(out, testutil::EqualsProto(msg_duration));
}
TEST_F(CelProtoWrapperTest, TestTimestamp) {
Timestamp msg_timestamp;
msg_timestamp.set_seconds(2);
msg_timestamp.set_nanos(3);
CelValue value_timestamp1 = CelProtoWrapper::CreateTimestamp(&msg_timestamp);
EXPECT_THAT(value_timestamp1.type(), Eq(CelValue::Type::kTimestamp));
CelValue value_timestamp2 =
CelProtoWrapper::CreateMessage(&msg_timestamp, arena());
EXPECT_THAT(value_timestamp2.type(), Eq(CelValue::Type::kTimestamp));
CelValue value = CelProtoWrapper::CreateTimestamp(&msg_timestamp);
EXPECT_TRUE(value.IsTimestamp());
Timestamp out;
auto status = cel::internal::EncodeTime(value.TimestampOrDie(), &out);
EXPECT_TRUE(status.ok());
EXPECT_THAT(out, testutil::EqualsProto(msg_timestamp));
}
TEST_F(CelProtoWrapperTest, UnwrapValueNull) {
Value json;
json.set_null_value(google::protobuf::NullValue::NULL_VALUE);
ExpectUnwrappedMessage(json, nullptr);
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicValueNull) {
Value value_msg;
value_msg.set_null_value(protobuf::NULL_VALUE);
CelValue value =
CelProtoWrapper::CreateMessage(ReflectedCopy(value_msg).get(), arena());
EXPECT_TRUE(value.IsNull());
}
TEST_F(CelProtoWrapperTest, UnwrapValueBool) {
bool value = true;
Value json;
json.set_bool_value(true);
ExpectUnwrappedPrimitive(json, value);
}
TEST_F(CelProtoWrapperTest, UnwrapValueNumber) {
double value = 1.0;
Value json;
json.set_number_value(value);
ExpectUnwrappedPrimitive(json, value);
}
TEST_F(CelProtoWrapperTest, UnwrapValueString) {
const std::string test = "test";
auto value = CelValue::StringHolder(&test);
Value json;
json.set_string_value(test);
ExpectUnwrappedPrimitive(json, value);
}
TEST_F(CelProtoWrapperTest, UnwrapValueStruct) {
const std::vector<std::string> kFields = {"field1", "field2", "field3"};
Struct value_struct;
auto& value1 = (*value_struct.mutable_fields())[kFields[0]];
value1.set_bool_value(true);
auto& value2 = (*value_struct.mutable_fields())[kFields[1]];
value2.set_number_value(1.0);
auto& value3 = (*value_struct.mutable_fields())[kFields[2]];
value3.set_string_value("test");
CelValue value = CelProtoWrapper::CreateMessage(&value_struct, arena());
ASSERT_TRUE(value.IsMap());
const CelMap* cel_map = value.MapOrDie();
CelValue field1 = CelValue::CreateString(&kFields[0]);
auto field1_presence = cel_map->Has(field1);
ASSERT_OK(field1_presence);
EXPECT_TRUE(*field1_presence);
auto lookup1 = (*cel_map)[field1];
ASSERT_TRUE(lookup1.has_value());
ASSERT_TRUE(lookup1->IsBool());
EXPECT_EQ(lookup1->BoolOrDie(), true);
CelValue field2 = CelValue::CreateString(&kFields[1]);
auto field2_presence = cel_map->Has(field2);
ASSERT_OK(field2_presence);
EXPECT_TRUE(*field2_presence);
auto lookup2 = (*cel_map)[field2];
ASSERT_TRUE(lookup2.has_value());
ASSERT_TRUE(lookup2->IsDouble());
EXPECT_DOUBLE_EQ(lookup2->DoubleOrDie(), 1.0);
CelValue field3 = CelValue::CreateString(&kFields[2]);
auto field3_presence = cel_map->Has(field3);
ASSERT_OK(field3_presence);
EXPECT_TRUE(*field3_presence);
auto lookup3 = (*cel_map)[field3];
ASSERT_TRUE(lookup3.has_value());
ASSERT_TRUE(lookup3->IsString());
EXPECT_EQ(lookup3->StringOrDie().value(), "test");
std::string missing = "missing_field";
CelValue missing_field = CelValue::CreateString(&missing);
auto missing_field_presence = cel_map->Has(missing_field);
ASSERT_OK(missing_field_presence);
EXPECT_FALSE(*missing_field_presence);
const CelList* key_list = cel_map->ListKeys().value();
ASSERT_EQ(key_list->size(), kFields.size());
std::vector<std::string> result_keys;
for (int i = 0; i < key_list->size(); i++) {
CelValue key = (*key_list)[i];
ASSERT_TRUE(key.IsString());
result_keys.push_back(std::string(key.StringOrDie().value()));
}
EXPECT_THAT(result_keys, UnorderedPointwise(Eq(), kFields));
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicStruct) {
Struct struct_msg;
const std::string kFieldInt = "field_int";
const std::string kFieldBool = "field_bool";
(*struct_msg.mutable_fields())[kFieldInt].set_number_value(1.);
(*struct_msg.mutable_fields())[kFieldBool].set_bool_value(true);
CelValue value =
CelProtoWrapper::CreateMessage(ReflectedCopy(struct_msg).get(), arena());
EXPECT_TRUE(value.IsMap());
const CelMap* cel_map = value.MapOrDie();
ASSERT_TRUE(cel_map != nullptr);
{
auto lookup = (*cel_map)[CelValue::CreateString(&kFieldInt)];
ASSERT_TRUE(lookup.has_value());
auto v = lookup.value();
ASSERT_TRUE(v.IsDouble());
EXPECT_THAT(v.DoubleOrDie(), testing::DoubleEq(1.));
}
{
auto lookup = (*cel_map)[CelValue::CreateString(&kFieldBool)];
ASSERT_TRUE(lookup.has_value());
auto v = lookup.value();
ASSERT_TRUE(v.IsBool());
EXPECT_EQ(v.BoolOrDie(), true);
}
{
auto presence = cel_map->Has(CelValue::CreateBool(true));
ASSERT_FALSE(presence.ok());
EXPECT_EQ(presence.status().code(), absl::StatusCode::kInvalidArgument);
auto lookup = (*cel_map)[CelValue::CreateBool(true)];
ASSERT_TRUE(lookup.has_value());
auto v = lookup.value();
ASSERT_TRUE(v.IsError());
}
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicValueStruct) {
const std::string kField1 = "field1";
const std::string kField2 = "field2";
Value value_msg;
(*value_msg.mutable_struct_value()->mutable_fields())[kField1]
.set_number_value(1);
(*value_msg.mutable_struct_value()->mutable_fields())[kField2]
.set_number_value(2);
CelValue value =
CelProtoWrapper::CreateMessage(ReflectedCopy(value_msg).get(), arena());
EXPECT_TRUE(value.IsMap());
EXPECT_TRUE(
(*value.MapOrDie())[CelValue::CreateString(&kField1)].has_value());
EXPECT_TRUE(
(*value.MapOrDie())[CelValue::CreateString(&kField2)].has_value());
}
TEST_F(CelProtoWrapperTest, UnwrapValueList) {
const std::vector<std::string> kFields = {"field1", "field2", "field3"};
ListValue list_value;
list_value.add_values()->set_bool_value(true);
list_value.add_values()->set_number_value(1.0);
list_value.add_values()->set_string_value("test");
CelValue value = CelProtoWrapper::CreateMessage(&list_value, arena());
ASSERT_TRUE(value.IsList());
const CelList* cel_list = value.ListOrDie();
ASSERT_EQ(cel_list->size(), 3);
CelValue value1 = (*cel_list)[0];
ASSERT_TRUE(value1.IsBool());
EXPECT_EQ(value1.BoolOrDie(), true);
auto value2 = (*cel_list)[1];
ASSERT_TRUE(value2.IsDouble());
EXPECT_DOUBLE_EQ(value2.DoubleOrDie(), 1.0);
auto value3 = (*cel_list)[2];
ASSERT_TRUE(value3.IsString());
EXPECT_EQ(value3.StringOrDie().value(), "test");
}
TEST_F(CelProtoWrapperTest, UnwrapDynamicValueListValue) {
Value value_msg;
value_msg.mutable_list_value()->add_values()->set_number_value(1.);
value_msg.mutable_list_value()->add_values()->set_number_value(2.);
CelValue value =
CelProtoWrapper::CreateMessage(ReflectedCopy(value_msg).get(), arena());
EXPECT_TRUE(value.IsList());
EXPECT_THAT((*value.ListOrDie())[0].DoubleOrDie(), testing::DoubleEq(1));
EXPECT_THAT((*value.ListOrDie())[1].DoubleOrDie(), testing::DoubleEq(2));
}
TEST_F(CelProtoWrapperTest, UnwrapAnyValue) {
TestMessage test_message;
test_message.set_string_value("test");
Any any;
any.PackFrom(test_message);
ExpectUnwrappedMessage(any, &test_message);
}
TEST_F(CelProtoWrapperTest, UnwrapInvalidAny) {
Any any;
CelValue value = CelProtoWrapper::CreateMessage(&any, arena());
ASSERT_TRUE(value.IsError());
any.set_type_url("/");
ASSERT_TRUE(CelProtoWrapper::CreateMessage(&any, arena()).IsError());
any.set_type_url("/invalid.proto.name");
ASSERT_TRUE(CelProtoWrapper::CreateMessage(&any, arena()).IsError());
}
TEST_F(CelProtoWrapperTest, UnwrapBoolWrapper) {
bool value = true;
BoolValue wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapInt32Wrapper) {
int64_t value = 12;
Int32Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapUInt32Wrapper) {
uint64_t value = 12;
UInt32Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapInt64Wrapper) {
int64_t value = 12;
Int64Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapUInt64Wrapper) {
uint64_t value = 12;
UInt64Value wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapFloatWrapper) {
double value = 42.5;
FloatValue wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapDoubleWrapper) {
double value = 42.5;
DoubleValue wrapper;
wrapper.set_value(value);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapStringWrapper) {
std::string text = "42";
auto value = CelValue::StringHolder(&text);
StringValue wrapper;
wrapper.set_value(text);
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, UnwrapBytesWrapper) {
std::string text = "42";
auto value = CelValue::BytesHolder(&text);
BytesValue wrapper;
wrapper.set_value("42");
ExpectUnwrappedPrimitive(wrapper, value);
}
TEST_F(CelProtoWrapperTest, WrapNull) {
auto cel_value = CelValue::CreateNull();
Value json;
json.set_null_value(protobuf::NULL_VALUE);
ExpectWrappedMessage(cel_value, json);
Any any;
any.PackFrom(json);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapBool) {
auto cel_value = CelValue::CreateBool(true);
Value json;
json.set_bool_value(true);
ExpectWrappedMessage(cel_value, json);
BoolValue wrapper;
wrapper.set_value(true);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapBytes) {
std::string str = "hello world";
auto cel_value = CelValue::CreateBytes(CelValue::BytesHolder(&str));
BytesValue wrapper;
wrapper.set_value(str);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapBytesToValue) {
std::string str = "hello world";
auto cel_value = CelValue::CreateBytes(CelValue::BytesHolder(&str));
Value json;
json.set_string_value("aGVsbG8gd29ybGQ=");
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapDuration) {
auto cel_value = CelValue::CreateDuration(absl::Seconds(300));
Duration d;
d.set_seconds(300);
ExpectWrappedMessage(cel_value, d);
Any any;
any.PackFrom(d);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapDurationToValue) {
auto cel_value = CelValue::CreateDuration(absl::Seconds(300));
Value json;
json.set_string_value("300s");
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapDouble) {
double num = 1.5;
auto cel_value = CelValue::CreateDouble(num);
Value json;
json.set_number_value(num);
ExpectWrappedMessage(cel_value, json);
DoubleValue wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapDoubleToFloatValue) {
double num = 1.5;
auto cel_value = CelValue::CreateDouble(num);
FloatValue wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
double small_num = -9.9e-100;
wrapper.set_value(small_num);
cel_value = CelValue::CreateDouble(small_num);
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapDoubleOverflow) {
double lowest_double = std::numeric_limits<double>::lowest();
auto cel_value = CelValue::CreateDouble(lowest_double);
FloatValue wrapper;
wrapper.set_value(-std::numeric_limits<float>::infinity());
ExpectWrappedMessage(cel_value, wrapper);
double max_double = std::numeric_limits<double>::max();
cel_value = CelValue::CreateDouble(max_double);
wrapper.set_value(std::numeric_limits<float>::infinity());
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapInt64) {
int32_t num = std::numeric_limits<int32_t>::lowest();
auto cel_value = CelValue::CreateInt64(num);
Value json;
json.set_number_value(static_cast<double>(num));
ExpectWrappedMessage(cel_value, json);
Int64Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapInt64ToInt32Value) {
int32_t num = std::numeric_limits<int32_t>::lowest();
auto cel_value = CelValue::CreateInt64(num);
Int32Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapFailureInt64ToInt32Value) {
int64_t num = std::numeric_limits<int64_t>::lowest();
auto cel_value = CelValue::CreateInt64(num);
Int32Value wrapper;
ExpectNotWrapped(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapInt64ToValue) {
int64_t max = std::numeric_limits<int64_t>::max();
auto cel_value = CelValue::CreateInt64(max);
Value json;
json.set_string_value(absl::StrCat(max));
ExpectWrappedMessage(cel_value, json);
int64_t min = std::numeric_limits<int64_t>::min();
cel_value = CelValue::CreateInt64(min);
json.set_string_value(absl::StrCat(min));
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapUint64) {
uint32_t num = std::numeric_limits<uint32_t>::max();
auto cel_value = CelValue::CreateUint64(num);
Value json;
json.set_number_value(static_cast<double>(num));
ExpectWrappedMessage(cel_value, json);
UInt64Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapUint64ToUint32Value) {
uint32_t num = std::numeric_limits<uint32_t>::max();
auto cel_value = CelValue::CreateUint64(num);
UInt32Value wrapper;
wrapper.set_value(num);
ExpectWrappedMessage(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapUint64ToValue) {
uint64_t num = std::numeric_limits<uint64_t>::max();
auto cel_value = CelValue::CreateUint64(num);
Value json;
json.set_string_value(absl::StrCat(num));
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapFailureUint64ToUint32Value) {
uint64_t num = std::numeric_limits<uint64_t>::max();
auto cel_value = CelValue::CreateUint64(num);
UInt32Value wrapper;
ExpectNotWrapped(cel_value, wrapper);
}
TEST_F(CelProtoWrapperTest, WrapString) {
std::string str = "test";
auto cel_value = CelValue::CreateString(CelValue::StringHolder(&str));
Value json;
json.set_string_value(str);
ExpectWrappedMessage(cel_value, json);
StringValue wrapper;
wrapper.set_value(str);
ExpectWrappedMessage(cel_value, wrapper);
Any any;
any.PackFrom(wrapper);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapTimestamp) {
absl::Time ts = absl::FromUnixSeconds(1615852799);
auto cel_value = CelValue::CreateTimestamp(ts);
Timestamp t;
t.set_seconds(1615852799);
ExpectWrappedMessage(cel_value, t);
Any any;
any.PackFrom(t);
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapTimestampToValue) {
absl::Time ts = absl::FromUnixSeconds(1615852799);
auto cel_value = CelValue::CreateTimestamp(ts);
Value json;
json.set_string_value("2021-03-15T23:59:59Z");
ExpectWrappedMessage(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapList) {
std::vector<CelValue> list_elems = {
CelValue::CreateDouble(1.5),
CelValue::CreateInt64(-2L),
};
ContainerBackedListImpl list(std::move(list_elems));
auto cel_value = CelValue::CreateList(&list);
Value json;
json.mutable_list_value()->add_values()->set_number_value(1.5);
json.mutable_list_value()->add_values()->set_number_value(-2.);
ExpectWrappedMessage(cel_value, json);
ExpectWrappedMessage(cel_value, json.list_value());
Any any;
any.PackFrom(json.list_value());
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapFailureListValueBadJSON) {
TestMessage message;
std::vector<CelValue> list_elems = {
CelValue::CreateDouble(1.5),
CelProtoWrapper::CreateMessage(&message, arena()),
};
ContainerBackedListImpl list(std::move(list_elems));
auto cel_value = CelValue::CreateList(&list);
Value json;
ExpectNotWrapped(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapStruct) {
const std::string kField1 = "field1";
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateString(CelValue::StringHolder(&kField1)),
CelValue::CreateBool(true)}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
auto cel_value = CelValue::CreateMap(cel_map.get());
Value json;
(*json.mutable_struct_value()->mutable_fields())[kField1].set_bool_value(
true);
ExpectWrappedMessage(cel_value, json);
ExpectWrappedMessage(cel_value, json.struct_value());
Any any;
any.PackFrom(json.struct_value());
ExpectWrappedMessage(cel_value, any);
}
TEST_F(CelProtoWrapperTest, WrapFailureStructBadKeyType) {
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateInt64(1L), CelValue::CreateBool(true)}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
auto cel_value = CelValue::CreateMap(cel_map.get());
Value json;
ExpectNotWrapped(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapFailureStructBadValueType) {
const std::string kField1 = "field1";
TestMessage bad_value;
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateString(CelValue::StringHolder(&kField1)),
CelProtoWrapper::CreateMessage(&bad_value, arena())}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
auto cel_value = CelValue::CreateMap(cel_map.get());
Value json;
ExpectNotWrapped(cel_value, json);
}
TEST_F(CelProtoWrapperTest, WrapFailureWrongType) {
auto cel_value = CelValue::CreateNull();
std::vector<const google::protobuf::Message*> wrong_types = {
&BoolValue::default_instance(), &BytesValue::default_instance(),
&DoubleValue::default_instance(), &Duration::default_instance(),
&FloatValue::default_instance(), &Int32Value::default_instance(),
&Int64Value::default_instance(), &ListValue::default_instance(),
&StringValue::default_instance(), &Struct::default_instance(),
&Timestamp::default_instance(), &UInt32Value::default_instance(),
&UInt64Value::default_instance(),
};
for (const auto* wrong_type : wrong_types) {
ExpectNotWrapped(cel_value, *wrong_type);
}
}
TEST_F(CelProtoWrapperTest, WrapFailureErrorToAny) {
auto cel_value = CreateNoSuchFieldError(arena(), "error_field");
ExpectNotWrapped(cel_value, Any::default_instance());
}
class InvalidListKeysCelMapBuilder : public CelMapBuilder {
public:
absl::StatusOr<const CelList*> ListKeys() const override {
return absl::InternalError("Error while invoking ListKeys()");
}
};
TEST_F(CelProtoWrapperTest, DebugString) {
google::protobuf::Empty e;
EXPECT_THAT(CelProtoWrapper::CreateMessage(&e, arena()).DebugString(),
testing::StartsWith("Message: "));
ListValue list_value;
list_value.add_values()->set_bool_value(true);
list_value.add_values()->set_number_value(1.0);
list_value.add_values()->set_string_value("test");
CelValue value = CelProtoWrapper::CreateMessage(&list_value, arena());
EXPECT_EQ(value.DebugString(),
"CelList: [bool: 1, double: 1.000000, string: test]");
Struct value_struct;
auto& value1 = (*value_struct.mutable_fields())["a"];
value1.set_bool_value(true);
auto& value2 = (*value_struct.mutable_fields())["b"];
value2.set_number_value(1.0);
auto& value3 = (*value_struct.mutable_fields())["c"];
value3.set_string_value("test");
value = CelProtoWrapper::CreateMessage(&value_struct, arena());
EXPECT_THAT(
value.DebugString(),
testing::AllOf(testing::StartsWith("CelMap: {"),
testing::HasSubstr("<string: a>: <bool: 1>"),
testing::HasSubstr("<string: b>: <double: 1.0"),
testing::HasSubstr("<string: c>: <string: test>")));
InvalidListKeysCelMapBuilder invalid_cel_map;
auto cel_map_value = CelValue::CreateMap(&invalid_cel_map);
EXPECT_EQ(cel_map_value.DebugString(), "CelMap: invalid list keys");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/cel_proto_wrapper.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/cel_proto_wrapper_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
61403832-c7b7-416d-abcd-da425f14f1b7 | cpp | google/cel-cpp | to_address | internal/to_address.h | internal/to_address_test.cc | #ifndef THIRD_PARTY_CEL_CPP_INTERNAL_TO_ADDRESS_H_
#define THIRD_PARTY_CEL_CPP_INTERNAL_TO_ADDRESS_H_
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/meta/type_traits.h"
namespace cel::internal {
#if defined(__cpp_lib_to_address) && __cpp_lib_to_address >= 201711L
using std::to_address;
#else
template <typename T>
constexpr T* to_address(T* ptr) noexcept {
static_assert(!std::is_function<T>::value, "T must not be a function");
return ptr;
}
template <typename T, typename = void>
struct PointerTraitsToAddress {
static constexpr auto Dispatch(
const T& p ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
return internal::to_address(p.operator->());
}
};
template <typename T>
struct PointerTraitsToAddress<
T, absl::void_t<decltype(std::pointer_traits<T>::to_address(
std::declval<const T&>()))> > {
static constexpr auto Dispatch(
const T& p ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
return std::pointer_traits<T>::to_address(p);
}
};
template <typename T>
constexpr auto to_address(const T& ptr ABSL_ATTRIBUTE_LIFETIME_BOUND) noexcept {
return PointerTraitsToAddress<T>::Dispatch(ptr);
}
#endif
}
#endif | #include "internal/to_address.h"
#include <memory>
#include "internal/testing.h"
namespace cel {
namespace {
TEST(ToAddress, RawPointer) {
char c;
EXPECT_EQ(internal::to_address(&c), &c);
}
struct ImplicitFancyPointer {
using element_type = char;
char* operator->() const { return ptr; }
char* ptr;
};
struct ExplicitFancyPointer {
char* ptr;
};
}
}
namespace std {
template <>
struct pointer_traits<cel::ExplicitFancyPointer> : pointer_traits<char*> {
static constexpr char* to_address(
const cel::ExplicitFancyPointer& efp) noexcept {
return efp.ptr;
}
};
}
namespace cel {
namespace {
TEST(ToAddress, FancyPointerNoPointerTraits) {
char c;
ImplicitFancyPointer ip{&c};
EXPECT_EQ(internal::to_address(ip), &c);
}
TEST(ToAddress, FancyPointerWithPointerTraits) {
char c;
ExplicitFancyPointer ip{&c};
EXPECT_EQ(internal::to_address(ip), &c);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/to_address.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/to_address_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
c36b89df-44db-42b2-bb20-ef6a5f8cc238 | cpp | tensorflow/tensorflow | quantized_concat_op | tensorflow/core/kernels/quantized_concat_op.cc | tensorflow/core/kernels/quantized_concat_op_test.cc | #define EIGEN_USE_THREADS
#include <limits>
#include <utility>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/concat_lib_cpu.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace {
template <typename T>
struct RequantizeCopier {
RequantizeCopier(
const std::vector<std::pair<float, float>>* input_min_and_max,
float output_min, float output_max)
: output_min(output_min),
output_max(output_max),
input_min_and_max(input_min_and_max) {}
inline void Copy(T* dst, const T* src, int input_index, size_t n) {
const float input_min = (*input_min_and_max)[input_index].first;
const float input_max = (*input_min_and_max)[input_index].second;
if (input_min == output_min && input_max == output_max) {
DCHECK(DataTypeCanUseMemcpy(DataTypeToEnum<T>::v()));
memcpy(dst, src, n * sizeof(T));
} else {
Eigen::array<Eigen::DenseIndex, 1> dims;
dims[0] = n;
typename TTypes<T, 1>::UnalignedConstTensor input_array(src, dims);
typename TTypes<T, 1>::UnalignedTensor output_array(dst, dims);
QuantizedToFloatStruct<T> q2f(input_min, input_max);
auto input_float = DEQUANTIZE_WITH_EIGEN(input_array, q2f);
FloatToQuantizedStruct<T> f2q(output_min, output_max);
auto input_requantized = QUANTIZE_WITH_EIGEN(input_float, f2q, T);
output_array = input_requantized;
}
}
float output_min;
float output_max;
const std::vector<std::pair<float, float>>* input_min_and_max;
};
}
template <typename T>
class QuantizedConcatOp : public OpKernel {
public:
typedef std::vector<std::unique_ptr<typename TTypes<T, 2>::ConstMatrix>>
ConstMatrixVector;
explicit QuantizedConcatOp(OpKernelConstruction* c) : OpKernel(c) {}
Status CalculateInputAndOutputRange(
const OpInputList& input_mins, const OpInputList& input_maxes,
const size_t N,
std::vector<std::pair<float, float>>* input_mins_and_maxes,
float* output_min, float* output_max) {
input_mins_and_maxes->reserve(N);
float overall_min = std::numeric_limits<float>::max();
float overall_max = std::numeric_limits<float>::lowest();
for (int i = 0; i < N; ++i) {
if (input_mins[i].NumElements() != 1) {
return errors::InvalidArgument(
"input_mins each tensor's num elements must be 1, given num "
"elements ",
input_mins[i].NumElements(), " in index ", i);
}
if (input_maxes[i].NumElements() != 1) {
return errors::InvalidArgument(
"input_maxes each tensor's num elements must be 1, given num "
"elements ",
input_maxes[i].NumElements(), " in index ", i);
}
const float input_min = input_mins[i].flat<float>()(0);
const float input_max = input_maxes[i].flat<float>()(0);
input_mins_and_maxes->emplace_back(input_min, input_max);
overall_min = std::min(overall_min, input_min);
overall_max = std::max(overall_max, input_max);
}
overall_min = std::min(0.0f, overall_min);
if (std::is_signed<T>::value) {
const float largest_value =
std::max(std::abs(overall_min), std::abs(overall_max));
*output_min = -largest_value;
*output_max = largest_value;
} else {
*output_min = overall_min;
*output_max = overall_max;
}
return absl::OkStatus();
}
int64_t CalculateInputsDim(const TensorShape& input_shape,
const int32_t concat_dim) {
int64_t inputs_flat_dim0 = 1;
for (int d = 0; d < concat_dim; ++d) {
inputs_flat_dim0 *= input_shape.dim_size(d);
}
return inputs_flat_dim0;
}
Status CalculateConcatDims(const size_t N, const TensorShape& input_shape,
int input_dims, const OpInputList& values,
const int32_t concat_dim,
const int64_t inputs_flat_dim0,
ConstMatrixVector* inputs_flat,
int* output_concat_dim) {
inputs_flat->reserve(N);
*output_concat_dim = 0;
const bool input_is_scalar = TensorShapeUtils::IsScalar(input_shape);
for (int i = 0; i < N; ++i) {
const auto in = values[i];
const bool in_is_scalar = TensorShapeUtils::IsScalar(in.shape());
if (!(in.dims() == input_dims || (input_is_scalar && in_is_scalar))) {
return errors::InvalidArgument(
"ConcatOp : Ranks of all input tensors should match: shape[0] = ",
input_shape.DebugString(), " vs. shape[", i,
"] = ", in.shape().DebugString());
}
for (int j = 0; j < input_dims; ++j) {
if (j == concat_dim) {
continue;
}
if (in.dim_size(j) != input_shape.dim_size(j)) {
return errors::InvalidArgument(
"ConcatOp : Dimensions of inputs should match: shape[0] = ",
input_shape.DebugString(), " vs. shape[", i,
"] = ", in.shape().DebugString());
}
}
if (in.NumElements() > 0) {
int64_t inputs_flat_dim1 = in.NumElements() / inputs_flat_dim0;
inputs_flat->emplace_back(new typename TTypes<T, 2>::ConstMatrix(
in.shaped<T, 2>({inputs_flat_dim0, inputs_flat_dim1})));
}
*output_concat_dim += in.dims() > 0 ? in.dim_size(concat_dim) : 1;
}
return absl::OkStatus();
}
void Compute(OpKernelContext* context) override {
const Tensor* concat_dim_tensor = nullptr;
OP_REQUIRES_OK(context, context->input("concat_dim", &concat_dim_tensor));
OP_REQUIRES(
context, TensorShapeUtils::IsScalar(concat_dim_tensor->shape()),
errors::InvalidArgument(
"Concat dim tensor should be a scalar integer, but got shape ",
concat_dim_tensor->shape().DebugString()));
const int32_t concat_dim = concat_dim_tensor->scalar<int32>()();
OpInputList values;
OP_REQUIRES_OK(context, context->input_list("values", &values));
const size_t N = values.size();
OpInputList input_mins;
OP_REQUIRES_OK(context, context->input_list("input_mins", &input_mins));
OP_REQUIRES(context, (input_mins.size() == N),
errors::InvalidArgument(
"QuantizedConcatOp : Expected mins input list length ",
input_mins.size(), " to equal values length ", N));
OpInputList input_maxes;
OP_REQUIRES_OK(context, context->input_list("input_maxes", &input_maxes));
OP_REQUIRES(context, (input_maxes.size() == N),
errors::InvalidArgument(
"QuantizedConcatOp : Expected maxes input list length ",
input_maxes.size(), " to equal values length ", N));
const int input_dims = values[0].dims();
const TensorShape& input_shape = values[0].shape();
OP_REQUIRES(
context, (0 <= concat_dim && concat_dim < input_dims),
errors::InvalidArgument(
"ConcatOp : Expected concatenating dimensions in the range [", 0,
", ", input_dims, "), but got ", concat_dim));
float output_min = std::numeric_limits<float>::max();
float output_max = std::numeric_limits<float>::lowest();
std::vector<std::pair<float, float>> input_mins_and_maxes;
OP_REQUIRES_OK(context,
CalculateInputAndOutputRange(input_mins, input_maxes, N,
&input_mins_and_maxes,
&output_min, &output_max));
const int64_t inputs_flat_dim0 =
CalculateInputsDim(input_shape, concat_dim);
ConstMatrixVector inputs_flat;
int output_concat_dim;
OP_REQUIRES_OK(
context, CalculateConcatDims(N, input_shape, input_dims, values,
concat_dim, inputs_flat_dim0, &inputs_flat,
&output_concat_dim));
TensorShape output_shape(input_shape);
if (output_shape.dims() == 0) {
output_shape.AddDim(output_concat_dim);
} else {
output_shape.set_dim(concat_dim, output_concat_dim);
}
Tensor* output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output));
if (output->NumElements() > 0) {
int64_t output_dim1 = output->NumElements() / inputs_flat_dim0;
auto output_flat = output->shaped<T, 2>({inputs_flat_dim0, output_dim1});
ConcatCPUImpl<T>(
context->device(), inputs_flat, sizeof(T) ,
RequantizeCopier<T>(&input_mins_and_maxes, output_min, output_max),
&output_flat);
}
Tensor* output_min_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(1, {}, &output_min_tensor));
output_min_tensor->flat<float>()(0) = output_min;
Tensor* output_max_tensor = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(2, {}, &output_max_tensor));
output_max_tensor->flat<float>()(0) = output_max;
}
};
#define REGISTER_QUANTIZED_CONCAT(type) \
REGISTER_KERNEL_BUILDER(Name("QuantizedConcat") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("T") \
.HostMemory("concat_dim"), \
QuantizedConcatOp<type>)
REGISTER_QUANTIZED_CONCAT(quint8);
REGISTER_QUANTIZED_CONCAT(qint32);
#undef REGISTER_QUANTIZED_CONCAT
} | #define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
using test::graph::Constant;
class QuantizedConcatTest : public OpsTestBase {
protected:
QuantizedConcatTest() {}
void TestSmall8Bit(float first_min, float first_max, float second_min,
float second_max);
void TestSmall32Bit(float first_min, float first_max, float second_min,
float second_max);
void TestSecondDim8Bit(float first_min, float first_max, float second_min,
float second_max);
void TestInvalidMinMax(const Tensor& first_min, const Tensor& first_max);
};
TEST_F(QuantizedConcatTest, InvalidMin) {
Tensor first_min(DT_FLOAT, {3});
test::FillValues<float>(&first_min, {0.0, 0.0, 0.0});
Tensor first_max(DT_FLOAT, {});
test::FillValues<float>(&first_max, {0.0});
TestInvalidMinMax(first_min, first_max);
}
TEST_F(QuantizedConcatTest, InvalidMax) {
Tensor first_min(DT_FLOAT, {});
test::FillValues<float>(&first_min, {0.0});
Tensor first_max(DT_FLOAT, {3, 0, 2});
TestInvalidMinMax(first_min, first_max);
}
void QuantizedConcatTest::TestInvalidMinMax(const Tensor& first_min,
const Tensor& first_max) {
TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(2, DT_QUINT8))
.Input(FakeInput(2, DT_FLOAT))
.Input(FakeInput(2, DT_FLOAT))
.Attr("N", 2)
.Attr("T", DataTypeToEnum<quint8>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
Tensor first_quantized(DT_QUINT8, {1});
test::FillValues<quint8>(&first_quantized, {1});
Tensor second_quantized(DT_QUINT8, {1});
test::FillValues<quint8>(&second_quantized, {1});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<quint8>(first_quantized.shape(),
first_quantized.flat<quint8>());
AddInputFromArray<quint8>(second_quantized.shape(),
second_quantized.flat<quint8>());
AddInputFromArray<float>(first_min.shape(), first_min.flat<float>());
AddInputFromArray<float>(TensorShape({}), {1.0});
AddInputFromArray<float>(first_max.shape(), first_max.flat<float>());
AddInputFromArray<float>(TensorShape({}), {2.0});
EXPECT_TRUE(errors::IsInvalidArgument(RunOpKernel()));
}
TEST_F(QuantizedConcatTest, Small8Bit) {
TestSmall8Bit(0.0f, 255.0f, 0.0f, 25.0f);
}
TEST_F(QuantizedConcatTest, Small8BitSameRange) {
TestSmall8Bit(0.0f, 255.0f, 0.0f, 255.0f);
}
void QuantizedConcatTest::TestSmall8Bit(float first_min, float first_max,
float second_min, float second_max) {
TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(2, DT_QUINT8))
.Input(FakeInput(2, DT_FLOAT))
.Input(FakeInput(2, DT_FLOAT))
.Attr("N", 2)
.Attr("T", DataTypeToEnum<quint8>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int first_batch = 2;
const int first_height = 2;
const int first_width = 3;
Tensor first_float(DT_FLOAT, {first_batch, first_height, first_width});
test::FillValues<float>(&first_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Tensor first_quantized =
FloatTensorToQuantized<quint8>(first_float, first_min, first_max);
const int second_batch = 2;
const int second_height = 2;
const int second_width = 3;
Tensor second_float(DT_FLOAT, {second_batch, second_height, second_width});
test::FillValues<float>(&second_float,
{13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
Tensor second_quantized =
FloatTensorToQuantized<quint8>(second_float, second_min, second_max);
const int expected_batch = first_batch + second_batch;
Tensor expected_float(DT_FLOAT, {expected_batch, first_height, first_width});
test::FillValues<float>(&expected_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<quint8>(first_quantized.shape(),
first_quantized.flat<quint8>());
AddInputFromArray<quint8>(second_quantized.shape(),
second_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {first_min});
AddInputFromArray<float>(TensorShape({}), {second_min});
AddInputFromArray<float>(TensorShape({}), {first_max});
AddInputFromArray<float>(TensorShape({}), {second_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
}
TEST_F(QuantizedConcatTest, Small32Bit) {
TestSmall32Bit(0.0f, 1200.0f, 0.0f, 2400.0f);
}
TEST_F(QuantizedConcatTest, Small32BitSameRange) {
TestSmall32Bit(-2400.0f, 2400.0f, -2400.0f, 2400.0f);
}
TEST_F(QuantizedConcatTest, Small32BitOneDimSameRangeAsOutput) {
TestSmall32Bit(-2400.0f, 2400.0f, -1200.0f, 2400.0f);
}
void QuantizedConcatTest::TestSmall32Bit(float first_min, float first_max,
float second_min, float second_max) {
TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(2, DT_QINT32))
.Input(FakeInput(2, DT_FLOAT))
.Input(FakeInput(2, DT_FLOAT))
.Attr("N", 2)
.Attr("T", DataTypeToEnum<qint32>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int first_batch = 2;
const int first_height = 2;
const int first_width = 3;
Tensor first_float(DT_FLOAT, {first_batch, first_height, first_width});
test::FillValues<float>(&first_float, {100, 200, 300, 400, 500, 600, 700, 800,
900, 1000, 1100, 1200});
Tensor first_quantized =
FloatTensorToQuantized<qint32>(first_float, first_min, first_max);
const int second_batch = 2;
const int second_height = 2;
const int second_width = 3;
Tensor second_float(DT_FLOAT, {second_batch, second_height, second_width});
test::FillValues<float>(&second_float, {1300, 1400, 1500, 1600, 1700, 1800,
1900, 2000, 2100, 2200, 2300, 2400});
Tensor second_quantized =
FloatTensorToQuantized<qint32>(second_float, second_min, second_max);
const int expected_batch = first_batch + second_batch;
Tensor expected_float(DT_FLOAT, {expected_batch, first_height, first_width});
test::FillValues<float>(
&expected_float,
{100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200,
1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2100, 2200, 2300, 2400});
AddInputFromArray<int32>(TensorShape({}), {0});
AddInputFromArray<qint32>(first_quantized.shape(),
first_quantized.flat<qint32>());
AddInputFromArray<qint32>(second_quantized.shape(),
second_quantized.flat<qint32>());
AddInputFromArray<float>(TensorShape({}), {first_min});
AddInputFromArray<float>(TensorShape({}), {second_min});
AddInputFromArray<float>(TensorShape({}), {first_max});
AddInputFromArray<float>(TensorShape({}), {second_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<qint32>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 0.2);
}
TEST_F(QuantizedConcatTest, SecondDim8Bit) {
TestSecondDim8Bit(-10.0f, 150.0f, 0.0f, 200.0f);
}
TEST_F(QuantizedConcatTest, SecondDim8BitSameRange) {
TestSecondDim8Bit(-10.0f, 150.0f, -10.0f, 150.0f);
}
void QuantizedConcatTest::TestSecondDim8Bit(float first_min, float first_max,
float second_min,
float second_max) {
TF_ASSERT_OK(NodeDefBuilder("quantized_concat_op", "QuantizedConcat")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(2, DT_QUINT8))
.Input(FakeInput(2, DT_FLOAT))
.Input(FakeInput(2, DT_FLOAT))
.Attr("N", 2)
.Attr("T", DataTypeToEnum<quint8>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int first_batch = 2;
const int first_height = 2;
const int first_width = 3;
Tensor first_float(DT_FLOAT, {first_batch, first_height, first_width});
test::FillValues<float>(&first_float,
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
Tensor first_quantized =
FloatTensorToQuantized<quint8>(first_float, first_min, first_max);
const int second_batch = 2;
const int second_height = 2;
const int second_width = 3;
Tensor second_float(DT_FLOAT, {second_batch, second_height, second_width});
test::FillValues<float>(&second_float,
{13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
Tensor second_quantized =
FloatTensorToQuantized<quint8>(second_float, second_min, second_max);
const int expected_height = first_height + second_height;
Tensor expected_float(DT_FLOAT, {first_batch, expected_height, first_width});
test::FillValues<float>(&expected_float,
{1, 2, 3, 4, 5, 6, 13, 14, 15, 16, 17, 18,
7, 8, 9, 10, 11, 12, 19, 20, 21, 22, 23, 24});
AddInputFromArray<int32>(TensorShape({}), {1});
AddInputFromArray<quint8>(first_quantized.shape(),
first_quantized.flat<quint8>());
AddInputFromArray<quint8>(second_quantized.shape(),
second_quantized.flat<quint8>());
AddInputFromArray<float>(TensorShape({}), {first_min});
AddInputFromArray<float>(TensorShape({}), {second_min});
AddInputFromArray<float>(TensorShape({}), {first_max});
AddInputFromArray<float>(TensorShape({}), {second_max});
TF_ASSERT_OK(RunOpKernel());
const Tensor& output_quantized = *GetOutput(0);
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
Tensor output_float =
QuantizedTensorToFloat<quint8>(output_quantized, output_min, output_max);
test::ExpectTensorNear<float>(expected_float, output_float, 1.0);
}
template <typename T>
static void ConcatHelper(::testing::benchmark::State& state,
int concat_dimension, bool same_limits, int dim2) {
Graph* g = new Graph(OpRegistry::Global());
DataType dt = DataTypeToEnum<T>::v();
const int kDim1 = 100;
TensorShape shape({kDim1, dim2});
Tensor concat_dim = test::AsScalar<int32>(concat_dimension);
Tensor in0(dt, shape);
in0.flat<T>().setRandom();
Tensor in1(dt, shape);
in1.flat<T>().setRandom();
Tensor mins0 = test::AsScalar<float>(-1.0);
Tensor maxes0 = test::AsScalar<float>(1.0);
Tensor mins1 = test::AsScalar<float>(same_limits ? -1.0 : -255.0);
Tensor maxes1 = test::AsScalar<float>(same_limits ? 1.0 : 255.0);
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "QuantizedConcat")
.Input(Constant(g, concat_dim))
.Input({Constant(g, in0), Constant(g, in1)})
.Input({Constant(g, mins0), Constant(g, mins1)})
.Input({Constant(g, maxes0), Constant(g, maxes1)})
.Attr("N", 2)
.Attr("T", dt)
.Finalize(g, &node));
test::Benchmark("cpu", g, false).Run(state);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
((kDim1 * dim2) + (kDim1 * dim2)) * sizeof(T));
}
static void BM_QConcatDim0SameLimitQInt32(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 0 , true ,
dim2);
}
static void BM_QConcatDim1SameLimitQInt32(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 1 , true ,
dim2);
}
static void BM_QConcatDim0DifferLimitQInt32(
::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 0 , false ,
dim2);
}
static void BM_QConcatDim1DifferLimitQInt32(
::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 1 , false ,
dim2);
}
BENCHMARK(BM_QConcatDim0SameLimitQInt32)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
BENCHMARK(BM_QConcatDim1SameLimitQInt32)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
BENCHMARK(BM_QConcatDim0DifferLimitQInt32)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
BENCHMARK(BM_QConcatDim1DifferLimitQInt32)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
static void BM_QConcatDim0SameLimitQUint8(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 0 , true ,
dim2);
}
static void BM_QConcatDim1SameLimitQUint8(::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 1 , true ,
dim2);
}
static void BM_QConcatDim0DifferLimitQUint8(
::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 0 , false ,
dim2);
}
static void BM_QConcatDim1DifferLimitQUint8(
::testing::benchmark::State& state) {
const int dim2 = state.range(0);
ConcatHelper<qint32>(state, 1 , false ,
dim2);
}
BENCHMARK(BM_QConcatDim0SameLimitQUint8)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
BENCHMARK(BM_QConcatDim1SameLimitQUint8)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
BENCHMARK(BM_QConcatDim0DifferLimitQUint8)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
BENCHMARK(BM_QConcatDim1DifferLimitQUint8)
->UseRealTime()
->Arg(1000)
->Arg(20000)
->Arg(100000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_concat_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_concat_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |